24 #include <linux/module.h>
25 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
38 #include <linux/slab.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
46 #define SH_ETH_DEF_MSG_ENABLE \
52 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740)
71 pr_warn(
"PHY interface mode was not setup. Set to MII.\n");
81 #if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
82 #define SH_ETH_RESET_DEFAULT 1
93 static void sh_eth_set_rate(
struct net_device *ndev)
98 #if defined(CONFIG_ARCH_R8A7779)
102 switch (mdp->
speed) {
104 sh_eth_write(ndev, sh_eth_read(ndev,
ECMR) & ~bits,
ECMR);
107 sh_eth_write(ndev, sh_eth_read(ndev,
ECMR) | bits,
ECMR);
117 .set_rate = sh_eth_set_rate,
133 .rpadir_value = 0x00020000,
135 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
136 #define SH_ETH_HAS_BOTH_MODULES 1
137 #define SH_ETH_HAS_TSU 1
138 static int sh_eth_check_reset(
struct net_device *ndev);
140 static void sh_eth_set_duplex(
struct net_device *ndev)
150 static void sh_eth_set_rate(
struct net_device *ndev)
154 switch (mdp->
speed) {
156 sh_eth_write(ndev, 0,
RTRATE);
159 sh_eth_write(ndev, 1,
RTRATE);
169 .set_rate = sh_eth_set_rate,
172 .rmcr_value = 0x00000001,
185 .rpadir_value = 2 << 16,
188 #define SH_GIGA_ETH_BASE 0xfee00000
189 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
190 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
191 static void sh_eth_chip_reset_giga(
struct net_device *ndev)
194 unsigned long mahr[2], malr[2];
197 for (i = 0; i < 2; i++) {
198 malr[
i] =
ioread32((
void *)GIGA_MALR(i));
199 mahr[
i] =
ioread32((
void *)GIGA_MAHR(i));
207 for (i = 0; i < 2; i++) {
208 iowrite32(malr[i], (
void *)GIGA_MALR(i));
209 iowrite32(mahr[i], (
void *)GIGA_MAHR(i));
214 static int sh_eth_reset(
struct net_device *ndev)
219 if (sh_eth_is_gether(mdp)) {
220 sh_eth_write(ndev, 0x03,
EDSR);
224 ret = sh_eth_check_reset(ndev);
229 sh_eth_write(ndev, 0x0,
TDLAR);
230 sh_eth_write(ndev, 0x0,
TDFAR);
231 sh_eth_write(ndev, 0x0,
TDFXR);
232 sh_eth_write(ndev, 0x0,
TDFFR);
233 sh_eth_write(ndev, 0x0,
RDLAR);
234 sh_eth_write(ndev, 0x0,
RDFAR);
235 sh_eth_write(ndev, 0x0,
RDFXR);
236 sh_eth_write(ndev, 0x0,
RDFFR);
249 static void sh_eth_set_duplex_giga(
struct net_device *ndev)
259 static void sh_eth_set_rate_giga(
struct net_device *ndev)
263 switch (mdp->
speed) {
265 sh_eth_write(ndev, 0x00000000,
GECMR);
268 sh_eth_write(ndev, 0x00000010,
GECMR);
271 sh_eth_write(ndev, 0x00000020,
GECMR);
281 .set_duplex = sh_eth_set_duplex_giga,
282 .set_rate = sh_eth_set_rate_giga,
294 .fdr_value = 0x0000072f,
295 .rmcr_value = 0x00000001,
303 .rpadir_value = 2 << 16,
311 if (sh_eth_is_gether(mdp))
312 return &sh_eth_my_cpu_data_giga;
314 return &sh_eth_my_cpu_data;
317 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
318 #define SH_ETH_HAS_TSU 1
319 static int sh_eth_check_reset(
struct net_device *ndev);
320 static void sh_eth_reset_hw_crc(
struct net_device *ndev);
322 static void sh_eth_chip_reset(
struct net_device *ndev)
331 static void sh_eth_set_duplex(
struct net_device *ndev)
341 static void sh_eth_set_rate(
struct net_device *ndev)
345 switch (mdp->
speed) {
347 sh_eth_write(ndev, GECMR_10,
GECMR);
350 sh_eth_write(ndev, GECMR_100,
GECMR);
353 sh_eth_write(ndev, GECMR_1000,
GECMR);
363 .set_duplex = sh_eth_set_duplex,
364 .set_rate = sh_eth_set_rate,
385 #if defined(CONFIG_CPU_SUBTYPE_SH7734)
391 static int sh_eth_reset(
struct net_device *ndev)
395 sh_eth_write(ndev, EDSR_ENALL,
EDSR);
398 ret = sh_eth_check_reset(ndev);
403 sh_eth_write(ndev, 0x0,
TDLAR);
404 sh_eth_write(ndev, 0x0,
TDFAR);
405 sh_eth_write(ndev, 0x0,
TDFXR);
406 sh_eth_write(ndev, 0x0,
TDFFR);
407 sh_eth_write(ndev, 0x0,
RDLAR);
408 sh_eth_write(ndev, 0x0,
RDFAR);
409 sh_eth_write(ndev, 0x0,
RDFXR);
410 sh_eth_write(ndev, 0x0,
RDFFR);
413 sh_eth_reset_hw_crc(ndev);
417 sh_eth_select_mii(ndev);
422 static void sh_eth_reset_hw_crc(
struct net_device *ndev)
424 if (sh_eth_my_cpu_data.
hw_crc)
425 sh_eth_write(ndev, 0x0,
CSMR);
428 #elif defined(CONFIG_ARCH_R8A7740)
429 #define SH_ETH_HAS_TSU 1
430 static int sh_eth_check_reset(
struct net_device *ndev);
432 static void sh_eth_chip_reset(
struct net_device *ndev)
440 sh_eth_select_mii(ndev);
443 static int sh_eth_reset(
struct net_device *ndev)
447 sh_eth_write(ndev, EDSR_ENALL,
EDSR);
450 ret = sh_eth_check_reset(ndev);
455 sh_eth_write(ndev, 0x0,
TDLAR);
456 sh_eth_write(ndev, 0x0,
TDFAR);
457 sh_eth_write(ndev, 0x0,
TDFXR);
458 sh_eth_write(ndev, 0x0,
TDFFR);
459 sh_eth_write(ndev, 0x0,
RDLAR);
460 sh_eth_write(ndev, 0x0,
RDFAR);
461 sh_eth_write(ndev, 0x0,
RDFXR);
462 sh_eth_write(ndev, 0x0,
RDFFR);
468 static void sh_eth_set_duplex(
struct net_device *ndev)
478 static void sh_eth_set_rate(
struct net_device *ndev)
482 switch (mdp->
speed) {
484 sh_eth_write(ndev, GECMR_10,
GECMR);
487 sh_eth_write(ndev, GECMR_100,
GECMR);
490 sh_eth_write(ndev, GECMR_1000,
GECMR);
500 .set_duplex = sh_eth_set_duplex,
501 .set_rate = sh_eth_set_rate,
525 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
526 #define SH_ETH_RESET_DEFAULT 1
535 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
536 #define SH_ETH_RESET_DEFAULT 1
537 #define SH_ETH_HAS_TSU 1
554 DEFAULT_FIFO_F_D_RFD;
572 #if defined(SH_ETH_RESET_DEFAULT)
574 static int sh_eth_reset(
struct net_device *ndev)
583 static int sh_eth_check_reset(
struct net_device *ndev)
589 if (!(sh_eth_read(ndev,
EDMR) & 0x3))
602 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
603 static void sh_eth_set_receive_align(
struct sk_buff *
skb)
607 reserve = SH4_SKB_RX_ALIGN - ((
u32)skb->
data & (SH4_SKB_RX_ALIGN - 1));
609 skb_reserve(skb, reserve);
612 static void sh_eth_set_receive_align(
struct sk_buff *skb)
645 static void update_mac_address(
struct net_device *ndev)
662 static void read_mac_address(
struct net_device *ndev,
unsigned char *
mac)
664 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
668 ndev->
dev_addr[1] = (sh_eth_read(ndev,
MAHR) >> 16) & 0xFF;
669 ndev->
dev_addr[2] = (sh_eth_read(ndev,
MAHR) >> 8) & 0xFF;
671 ndev->
dev_addr[4] = (sh_eth_read(ndev,
MALR) >> 8) & 0xFF;
684 static unsigned long sh_eth_get_edtrr_trns(
struct sh_eth_private *mdp)
686 if (sh_eth_is_gether(mdp))
715 static int bb_read(
void *addr,
u32 msk)
735 static void sh_set_mdio(
struct mdiobb_ctrl *ctrl,
int bit)
760 static void sh_mdc_ctrl(
struct mdiobb_ctrl *ctrl,
int bit)
776 .set_mdc = sh_mdc_ctrl,
777 .set_mdio_dir = sh_mmd_ctrl,
778 .set_mdio_data = sh_set_mdio,
779 .get_mdio_data = sh_get_mdio,
783 static void sh_eth_ring_free(
struct net_device *ndev)
810 static void sh_eth_ring_format(
struct net_device *ndev)
817 int rx_ringsize =
sizeof(*rxdesc) * mdp->
num_rx_ring;
818 int tx_ringsize =
sizeof(*txdesc) * mdp->
num_tx_ring;
829 skb = netdev_alloc_skb(ndev, mdp->
rx_buf_sz);
835 sh_eth_set_receive_align(skb);
847 if (sh_eth_is_gether(mdp))
868 if (sh_eth_is_gether(mdp))
877 static int sh_eth_ring_init(
struct net_device *ndev)
880 int rx_ringsize, tx_ringsize, ret = 0;
889 (((ndev->
mtu + 26 + 7) & ~7) + 2 + 16));
897 dev_err(&ndev->
dev,
"Cannot allocate Rx skb\n");
905 dev_err(&ndev->
dev,
"Cannot allocate Tx skb\n");
916 dev_err(&ndev->
dev,
"Cannot allocate Rx Ring (size %d bytes)\n",
929 dev_err(&ndev->
dev,
"Cannot allocate Tx Ring (size %d bytes)\n",
942 sh_eth_ring_free(ndev);
975 ret = sh_eth_reset(ndev);
980 sh_eth_ring_format(ndev);
982 sh_eth_write(ndev, mdp->
cd->rpadir_value,
RPADIR);
985 sh_eth_write(ndev, 0,
EESIPR);
987 #if defined(__LITTLE_ENDIAN)
988 if (mdp->
cd->hw_swap)
992 sh_eth_write(ndev, 0,
EDMR);
995 sh_eth_write(ndev, mdp->
cd->fdr_value,
FDR);
996 sh_eth_write(ndev, 0,
TFTR);
999 sh_eth_write(ndev, mdp->
cd->rmcr_value,
RMCR);
1004 sh_eth_write(ndev, 0x800,
BCULR);
1006 sh_eth_write(ndev, mdp->
cd->fcftr_value,
FCFTR);
1008 if (!mdp->
cd->no_trimd)
1009 sh_eth_write(ndev, 0,
TRIMD);
1015 sh_eth_write(ndev, sh_eth_read(ndev,
EESR),
EESR);
1017 sh_eth_write(ndev, mdp->
cd->eesipr_value,
EESIPR);
1023 sh_eth_write(ndev, val,
ECMR);
1025 if (mdp->
cd->set_rate)
1026 mdp->
cd->set_rate(ndev);
1029 sh_eth_write(ndev, mdp->
cd->ecsr_value,
ECSR);
1033 sh_eth_write(ndev, mdp->
cd->ecsipr_value,
ECSIPR);
1036 update_mac_address(ndev);
1043 if (mdp->
cd->tpauser)
1050 netif_start_queue(ndev);
1058 static int sh_eth_txfree(
struct net_device *ndev)
1082 ndev->
stats.tx_packets++;
1089 static int sh_eth_rx(
struct net_device *ndev,
u32 intr_status)
1100 rxdesc = &mdp->
rx_ring[entry];
1105 #if defined(CONFIG_ARCH_R8A7740)
1113 ndev->
stats.rx_length_errors++;
1117 ndev->
stats.rx_errors++;
1119 ndev->
stats.rx_crc_errors++;
1121 ndev->
stats.rx_frame_errors++;
1123 ndev->
stats.rx_length_errors++;
1125 ndev->
stats.rx_length_errors++;
1127 ndev->
stats.rx_missed_errors++;
1129 ndev->
stats.rx_over_errors++;
1131 if (!mdp->
cd->hw_swap)
1137 if (mdp->
cd->rpadir)
1142 ndev->
stats.rx_packets++;
1147 rxdesc = &mdp->
rx_ring[entry];
1158 skb = netdev_alloc_skb(ndev, mdp->
rx_buf_sz);
1164 sh_eth_set_receive_align(skb);
1166 skb_checksum_none_assert(skb);
1183 (sh_eth_read(ndev,
RDFAR) -
1184 sh_eth_read(ndev,
RDLAR)) >> 4;
1185 sh_eth_write(ndev, EDRRR_R,
EDRRR);
1191 static void sh_eth_rcv_snd_disable(
struct net_device *ndev)
1194 sh_eth_write(ndev, sh_eth_read(ndev,
ECMR) &
1198 static void sh_eth_rcv_snd_enable(
struct net_device *ndev)
1201 sh_eth_write(ndev, sh_eth_read(ndev,
ECMR) |
1206 static void sh_eth_error(
struct net_device *ndev,
int intr_status)
1214 felic_stat = sh_eth_read(ndev,
ECSR);
1215 sh_eth_write(ndev, felic_stat,
ECSR);
1217 ndev->
stats.tx_carrier_errors++;
1226 link_stat = (sh_eth_read(ndev,
PSR));
1228 link_stat = ~link_stat;
1231 sh_eth_rcv_snd_disable(ndev);
1234 sh_eth_write(ndev, sh_eth_read(ndev,
EESIPR) &
1237 sh_eth_write(ndev, sh_eth_read(ndev,
ECSR),
1239 sh_eth_write(ndev, sh_eth_read(ndev,
EESIPR) |
1242 sh_eth_rcv_snd_enable(ndev);
1250 ndev->
stats.tx_aborted_errors++;
1259 ndev->
stats.rx_frame_errors++;
1267 ndev->
stats.tx_fifo_errors++;
1269 dev_err(&ndev->
dev,
"Transmit Descriptor Empty\n");
1274 ndev->
stats.tx_fifo_errors++;
1276 dev_err(&ndev->
dev,
"Transmit FIFO Under flow\n");
1281 ndev->
stats.rx_over_errors++;
1284 dev_err(&ndev->
dev,
"Receive Descriptor Empty\n");
1289 ndev->
stats.rx_fifo_errors++;
1291 dev_err(&ndev->
dev,
"Receive FIFO Overflow\n");
1294 if (!mdp->
cd->no_ade && (intr_status &
EESR_ADE)) {
1296 ndev->
stats.tx_fifo_errors++;
1301 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE |
EESR_TFE;
1302 if (mdp->
cd->no_ade)
1304 if (intr_status & mask) {
1306 u32 edtrr = sh_eth_read(ndev,
EDTRR);
1308 dev_err(&ndev->
dev,
"TX error. status=%8.8x cur_tx=%8.8x ",
1309 intr_status, mdp->
cur_tx);
1310 dev_err(&ndev->
dev,
"dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1313 sh_eth_txfree(ndev);
1316 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1318 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp),
EDTRR);
1321 netif_wake_queue(ndev);
1325 static irqreturn_t sh_eth_interrupt(
int irq,
void *netdev)
1331 u32 intr_status = 0;
1333 spin_lock(&mdp->
lock);
1336 intr_status = sh_eth_read(ndev,
EESR);
1341 sh_eth_write(ndev, intr_status,
EESR);
1353 sh_eth_rx(ndev, intr_status);
1358 sh_eth_txfree(ndev);
1359 netif_wake_queue(ndev);
1363 sh_eth_error(ndev, intr_status);
1366 spin_unlock(&mdp->
lock);
1372 static void sh_eth_adjust_link(
struct net_device *ndev)
1382 if (mdp->
cd->set_duplex)
1383 mdp->
cd->set_duplex(ndev);
1389 if (mdp->
cd->set_rate)
1390 mdp->
cd->set_rate(ndev);
1398 }
else if (mdp->
link) {
1410 static int sh_eth_phy_init(
struct net_device *ndev)
1424 phydev =
phy_connect(ndev, phy_id, sh_eth_adjust_link,
1426 if (IS_ERR(phydev)) {
1428 return PTR_ERR(phydev);
1431 dev_info(&ndev->
dev,
"attached phy %i to driver %s\n",
1432 phydev->
addr, phydev->
drv->name);
1440 static int sh_eth_phy_start(
struct net_device *ndev)
1445 ret = sh_eth_phy_init(ndev);
1456 static int sh_eth_get_settings(
struct net_device *ndev,
1460 unsigned long flags;
1465 spin_unlock_irqrestore(&mdp->
lock, flags);
1470 static int sh_eth_set_settings(
struct net_device *ndev,
1474 unsigned long flags;
1480 sh_eth_rcv_snd_disable(ndev);
1491 if (mdp->
cd->set_duplex)
1492 mdp->
cd->set_duplex(ndev);
1498 sh_eth_rcv_snd_enable(ndev);
1500 spin_unlock_irqrestore(&mdp->
lock, flags);
1505 static int sh_eth_nway_reset(
struct net_device *ndev)
1508 unsigned long flags;
1513 spin_unlock_irqrestore(&mdp->
lock, flags);
1531 "rx_current",
"tx_current",
1532 "rx_dirty",
"tx_dirty",
1534 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1536 static int sh_eth_get_sset_count(
struct net_device *netdev,
int sset)
1546 static void sh_eth_get_ethtool_stats(
struct net_device *ndev,
1561 switch (stringset) {
1563 memcpy(data, *sh_eth_gstrings_stats,
1564 sizeof(sh_eth_gstrings_stats));
1569 static void sh_eth_get_ringparam(
struct net_device *ndev,
1580 static int sh_eth_set_ringparam(
struct net_device *ndev,
1594 if (netif_running(ndev)) {
1595 netif_tx_disable(ndev);
1597 sh_eth_write(ndev, 0x0000,
EESIPR);
1599 sh_eth_write(ndev, 0,
EDTRR);
1600 sh_eth_write(ndev, 0,
EDRRR);
1605 sh_eth_ring_free(ndev);
1607 sh_eth_free_dma_buffer(mdp);
1613 ret = sh_eth_ring_init(ndev);
1615 dev_err(&ndev->
dev,
"%s: sh_eth_ring_init failed.\n", __func__);
1618 ret = sh_eth_dev_init(ndev,
false);
1620 dev_err(&ndev->
dev,
"%s: sh_eth_dev_init failed.\n", __func__);
1624 if (netif_running(ndev)) {
1625 sh_eth_write(ndev, mdp->
cd->eesipr_value,
EESIPR);
1627 sh_eth_write(ndev, EDRRR_R,
EDRRR);
1628 netif_wake_queue(ndev);
1634 static const struct ethtool_ops sh_eth_ethtool_ops = {
1635 .get_settings = sh_eth_get_settings,
1636 .set_settings = sh_eth_set_settings,
1637 .nway_reset = sh_eth_nway_reset,
1638 .get_msglevel = sh_eth_get_msglevel,
1639 .set_msglevel = sh_eth_set_msglevel,
1641 .get_strings = sh_eth_get_strings,
1642 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1643 .get_sset_count = sh_eth_get_sset_count,
1644 .get_ringparam = sh_eth_get_ringparam,
1645 .set_ringparam = sh_eth_set_ringparam,
1649 static int sh_eth_open(
struct net_device *ndev)
1654 pm_runtime_get_sync(&mdp->
pdev->dev);
1657 #
if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1658 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1659 defined(CONFIG_CPU_SUBTYPE_SH7757)
1666 dev_err(&ndev->
dev,
"Can not assign IRQ number\n");
1671 ret = sh_eth_ring_init(ndev);
1676 ret = sh_eth_dev_init(ndev,
true);
1681 ret = sh_eth_phy_start(ndev);
1689 pm_runtime_put_sync(&mdp->
pdev->dev);
1694 static void sh_eth_tx_timeout(
struct net_device *ndev)
1700 netif_stop_queue(ndev);
1703 dev_err(&ndev->
dev,
"%s: transmit timed out, status %8.8x,"
1704 " resetting...\n", ndev->
name, (
int)sh_eth_read(ndev,
EESR));
1707 ndev->
stats.tx_errors++;
1713 rxdesc->
addr = 0xBADF00D0;
1725 sh_eth_dev_init(ndev,
true);
1734 unsigned long flags;
1738 if (!sh_eth_txfree(ndev)) {
1741 netif_stop_queue(ndev);
1742 spin_unlock_irqrestore(&mdp->
lock, flags);
1746 spin_unlock_irqrestore(&mdp->
lock, flags);
1752 if (!mdp->
cd->hw_swap)
1769 if (!(sh_eth_read(ndev,
EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1770 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp),
EDTRR);
1776 static int sh_eth_close(
struct net_device *ndev)
1780 netif_stop_queue(ndev);
1783 sh_eth_write(ndev, 0x0000,
EESIPR);
1786 sh_eth_write(ndev, 0,
EDTRR);
1787 sh_eth_write(ndev, 0,
EDRRR);
1798 sh_eth_ring_free(ndev);
1801 sh_eth_free_dma_buffer(mdp);
1803 pm_runtime_put_sync(&mdp->
pdev->dev);
1812 pm_runtime_get_sync(&mdp->
pdev->dev);
1814 ndev->
stats.tx_dropped += sh_eth_read(ndev,
TROCR);
1815 sh_eth_write(ndev, 0,
TROCR);
1816 ndev->
stats.collisions += sh_eth_read(ndev,
CDCR);
1817 sh_eth_write(ndev, 0,
CDCR);
1818 ndev->
stats.tx_carrier_errors += sh_eth_read(ndev,
LCCR);
1819 sh_eth_write(ndev, 0,
LCCR);
1820 if (sh_eth_is_gether(mdp)) {
1821 ndev->
stats.tx_carrier_errors += sh_eth_read(ndev,
CERCR);
1822 sh_eth_write(ndev, 0,
CERCR);
1823 ndev->
stats.tx_carrier_errors += sh_eth_read(ndev,
CEECR);
1824 sh_eth_write(ndev, 0,
CEECR);
1826 ndev->
stats.tx_carrier_errors += sh_eth_read(ndev,
CNDCR);
1827 sh_eth_write(ndev, 0,
CNDCR);
1829 pm_runtime_put_sync(&mdp->
pdev->dev);
1831 return &ndev->
stats;
1841 if (!netif_running(ndev))
1850 #if defined(SH_ETH_HAS_TSU)
1852 static void *sh_eth_tsu_get_post_reg_offset(
struct sh_eth_private *mdp,
1855 return sh_eth_tsu_get_offset(mdp,
TSU_POST1) + (entry / 8 * 4);
1858 static u32 sh_eth_tsu_get_post_mask(
int entry)
1860 return 0x0f << (28 - ((entry % 8) * 4));
1865 return (0x08 >> (mdp->
port << 1)) << (28 - ((entry % 8) * 4));
1868 static void sh_eth_tsu_enable_cam_entry_post(
struct net_device *ndev,
1875 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1877 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
1880 static bool sh_eth_tsu_disable_cam_entry_post(
struct net_device *ndev,
1884 u32 post_mask, ref_mask,
tmp;
1887 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
1888 post_mask = sh_eth_tsu_get_post_mask(entry);
1889 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
1892 iowrite32(tmp & ~post_mask, reg_offset);
1895 return tmp & ref_mask;
1898 static int sh_eth_tsu_busy(
struct net_device *ndev)
1907 dev_err(&ndev->
dev,
"%s: timeout\n", __func__);
1915 static int sh_eth_tsu_write_entry(
struct net_device *ndev,
void *
reg,
1920 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
1922 if (sh_eth_tsu_busy(ndev) < 0)
1925 val = addr[4] << 8 | addr[5];
1927 if (sh_eth_tsu_busy(ndev) < 0)
1933 static void sh_eth_tsu_read_entry(
void *reg,
u8 *addr)
1938 addr[0] = (val >> 24) & 0xff;
1939 addr[1] = (val >> 16) & 0xff;
1940 addr[2] = (val >> 8) & 0xff;
1941 addr[3] = val & 0xff;
1943 addr[4] = (val >> 8) & 0xff;
1944 addr[5] = val & 0xff;
1948 static int sh_eth_tsu_find_entry(
struct net_device *ndev,
const u8 *addr)
1951 void *reg_offset = sh_eth_tsu_get_offset(mdp,
TSU_ADRH0);
1956 sh_eth_tsu_read_entry(reg_offset, c_addr);
1964 static int sh_eth_tsu_find_empty(
struct net_device *ndev)
1969 memset(blank, 0,
sizeof(blank));
1970 entry = sh_eth_tsu_find_entry(ndev, blank);
1974 static int sh_eth_tsu_disable_cam_entry_table(
struct net_device *ndev,
1978 void *reg_offset = sh_eth_tsu_get_offset(mdp,
TSU_ADRH0);
1982 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp,
TSU_TEN) &
1983 ~(1 << (31 - entry)),
TSU_TEN);
1985 memset(blank, 0,
sizeof(blank));
1986 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
1992 static int sh_eth_tsu_add_entry(
struct net_device *ndev,
const u8 *addr)
1995 void *reg_offset = sh_eth_tsu_get_offset(mdp,
TSU_ADRH0);
2001 i = sh_eth_tsu_find_entry(ndev, addr);
2004 i = sh_eth_tsu_find_empty(ndev);
2007 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2012 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp,
TSU_TEN) |
2017 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2022 static int sh_eth_tsu_del_entry(
struct net_device *ndev,
const u8 *addr)
2030 i = sh_eth_tsu_find_entry(ndev, addr);
2033 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2037 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2045 static int sh_eth_tsu_purge_all(
struct net_device *ndev)
2054 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2058 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2066 static void sh_eth_tsu_purge_mcast(
struct net_device *ndev)
2070 void *reg_offset = sh_eth_tsu_get_offset(mdp,
TSU_ADRH0);
2077 sh_eth_tsu_read_entry(reg_offset, addr);
2078 if (is_multicast_ether_addr(addr))
2079 sh_eth_tsu_del_entry(ndev, addr);
2084 static void sh_eth_set_multicast_list(
struct net_device *ndev)
2089 unsigned long flags;
2099 sh_eth_tsu_purge_mcast(ndev);
2103 sh_eth_tsu_purge_mcast(ndev);
2109 sh_eth_tsu_purge_all(ndev);
2111 }
else if (mdp->
cd->tsu) {
2114 if (mcast_all && is_multicast_ether_addr(ha->
addr))
2117 if (sh_eth_tsu_add_entry(ndev, ha->
addr) < 0) {
2119 sh_eth_tsu_purge_mcast(ndev);
2131 sh_eth_write(ndev, ecmr_bits,
ECMR);
2133 spin_unlock_irqrestore(&mdp->
lock, flags);
2147 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2164 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2174 static int sh_eth_vlan_rx_kill_vid(
struct net_device *ndev,
u16 vid)
2177 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2187 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2198 sh_eth_tsu_write(mdp, 0,
TSU_FCM);
2206 if (sh_eth_is_gether(mdp)) {
2213 sh_eth_tsu_write(mdp, 0,
TSU_FWSR);
2215 sh_eth_tsu_write(mdp, 0,
TSU_TEN);
2223 static int sh_mdio_release(
struct net_device *ndev)
2243 static int sh_mdio_init(
struct net_device *ndev,
int id,
2264 bitbang->
ctrl.ops = &bb_ops;
2270 goto out_free_bitbang;
2274 mdp->
mii_bus->name =
"sh_mii";
2277 mdp->
pdev->name,
id);
2311 static const u16 *sh_eth_get_register_offset(
int register_type)
2315 switch (register_type) {
2317 reg_offset = sh_eth_offset_gigabit;
2320 reg_offset = sh_eth_offset_fast_sh4;
2323 reg_offset = sh_eth_offset_fast_sh3_sh2;
2334 .ndo_open = sh_eth_open,
2335 .ndo_stop = sh_eth_close,
2336 .ndo_start_xmit = sh_eth_start_xmit,
2337 .ndo_get_stats = sh_eth_get_stats,
2338 #if defined(SH_ETH_HAS_TSU)
2339 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2340 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2341 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2343 .ndo_tx_timeout = sh_eth_tx_timeout,
2344 .ndo_do_ioctl = sh_eth_do_ioctl,
2391 mdp = netdev_priv(ndev);
2404 pm_runtime_resume(&pdev->
dev);
2417 #if defined(SH_ETH_HAS_BOTH_MODULES)
2418 mdp->
cd = sh_eth_get_cpu_data(mdp);
2420 mdp->
cd = &sh_eth_my_cpu_data;
2422 sh_eth_set_default_cpu_data(mdp->
cd);
2433 read_mac_address(ndev, pd->
mac_addr);
2440 dev_err(&pdev->
dev,
"Not found TSU resource\n");
2445 resource_size(rtsu));
2446 mdp->
port = devno % 2;
2452 if (mdp->
cd->chip_reset)
2453 mdp->
cd->chip_reset(ndev);
2457 sh_eth_tsu_init(mdp);
2467 ret = sh_mdio_init(ndev, pdev->
id, pd);
2469 goto out_unregister;
2472 pr_info(
"Base address at 0x%x, %pM, IRQ %d.\n",
2475 platform_set_drvdata(pdev, ndev);
2484 if (mdp && mdp->
addr)
2497 struct net_device *ndev = platform_get_drvdata(pdev);
2502 sh_mdio_release(ndev);
2504 pm_runtime_disable(&pdev->
dev);
2507 platform_set_drvdata(pdev,
NULL);
2512 static int sh_eth_runtime_nop(
struct device *
dev)
2525 static struct dev_pm_ops sh_eth_dev_pm_ops = {
2526 .runtime_suspend = sh_eth_runtime_nop,
2527 .runtime_resume = sh_eth_runtime_nop,
2531 .probe = sh_eth_drv_probe,
2532 .remove = sh_eth_drv_remove,
2535 .pm = &sh_eth_dev_pm_ops,