64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
71 #include <linux/slab.h>
75 #include <linux/netdevice.h>
78 #include <linux/if_vlan.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
92 #include <asm/uaccess.h>
93 #include <linux/module.h>
96 #include <linux/mii.h>
104 #define TX_TIMEOUT (1*HZ)
117 static int gfar_change_mtu(
struct net_device *
dev,
int new_mtu);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
142 static void gfar_set_mac_for_addr(
struct net_device *
dev,
int num,
207 gfar_init_rxbdp(rx_queue, rxbdp,
212 netdev_err(ndev,
"Can't allocate RX buffers\n");
213 goto err_rxalloc_fail;
217 gfar_new_rxbdp(rx_queue, rxbdp, skb);
228 free_skb_resources(priv);
232 static int gfar_alloc_skb_resources(
struct net_device *ndev)
257 "Could not allocate buffer descriptors!\n");
289 "Could not allocate tx_skbuff\n");
305 "Could not allocate rx_skbuff\n");
313 if (gfar_init_bds(ndev))
319 free_skb_resources(priv);
323 static void gfar_init_tx_rx_base(
struct gfar_private *priv)
331 gfar_write(baddr, priv->
tx_queue[i]->tx_bd_dma_base);
337 gfar_write(baddr, priv->
rx_queue[i]->rx_bd_dma_base);
342 static void gfar_init_mac(
struct net_device *ndev)
351 gfar_init_tx_rx_base(priv);
368 gfar_clear_exact_match(ndev);
392 gfar_write(®s->
rctrl, rctrl);
405 gfar_write(®s->
tctrl, tctrl);
411 gfar_write(®s->
attreli, attrs);
424 gfar_write(®s->
attr, attrs);
439 rx_packets += priv->
rx_queue[
i]->stats.rx_packets;
450 tx_packets += priv->
tx_queue[
i]->stats.tx_packets;
460 .ndo_open = gfar_enet_open,
461 .ndo_start_xmit = gfar_start_xmit,
462 .ndo_stop = gfar_close,
463 .ndo_change_mtu = gfar_change_mtu,
465 .ndo_set_rx_mode = gfar_set_multi,
466 .ndo_tx_timeout = gfar_timeout,
467 .ndo_do_ioctl = gfar_ioctl,
468 .ndo_get_stats = gfar_get_stats,
471 #ifdef CONFIG_NET_POLL_CONTROLLER
472 .ndo_poll_controller = gfar_netpoll,
481 spin_lock(&priv->
rx_queue[i]->rxlock);
489 spin_lock(&priv->
tx_queue[i]->txlock);
497 spin_unlock(&priv->
rx_queue[i]->rxlock);
505 spin_unlock(&priv->
tx_queue[i]->txlock);
515 static inline int gfar_uses_fcb(
struct gfar_private *priv)
517 return gfar_is_vlan_on(priv) ||
551 for (i = 0; i < priv->
num_grps; i++)
552 napi_disable(&priv->
gfargrp[i].napi);
559 for (i = 0; i < priv->
num_grps; i++)
560 napi_enable(&priv->
gfargrp[i].napi);
563 static int gfar_parse_group(
struct device_node *np,
617 const u32 *stash_len;
618 const u32 *stash_idx;
619 unsigned int num_tx_qs, num_rx_qs;
620 u32 *tx_queues, *rx_queues;
627 num_tx_qs = tx_queues ? *tx_queues : 1;
630 pr_err(
"num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
632 pr_err(
"Cannot do alloc_etherdev, aborting\n");
637 num_rx_qs = rx_queues ? *rx_queues : 1;
640 pr_err(
"num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
642 pr_err(
"Cannot do alloc_etherdev, aborting\n");
646 *pdev = alloc_etherdev_mq(
sizeof(*priv), num_tx_qs);
651 priv = netdev_priv(dev);
652 priv->
node = ofdev->
dev.of_node;
656 netif_set_real_num_rx_queues(dev, num_rx_qs);
661 INIT_LIST_HEAD(&priv->
rx_list.list);
674 err = gfar_parse_group(child, priv, model);
680 err = gfar_parse_group(np, priv, model);
695 goto tx_alloc_failed;
708 goto rx_alloc_failed;
734 if (stash_len || stash_idx)
763 if (ctype && !
strcmp(ctype,
"rgmii-id"))
779 free_rx_pointers(priv);
781 free_tx_pointers(priv);
783 unmap_group_regs(priv);
788 static int gfar_hwtstamp_ioctl(
struct net_device *netdev,
814 switch (
config.rx_filter) {
843 if (!netif_running(dev))
847 return gfar_hwtstamp_ioctl(dev, rq, cmd);
855 static unsigned int reverse_bitmap(
unsigned int bit_map,
unsigned int max_qs)
857 unsigned int new_bit_map = 0x0;
858 int mask = 0x1 << (max_qs - 1), i;
860 for (i = 0; i < max_qs; i++) {
862 new_bit_map = new_bit_map + (1 <<
i);
878 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
884 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
891 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
898 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
903 static void gfar_init_filer_table(
struct gfar_private *priv)
914 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
916 rqfar = cluster_entry_per_class(priv, rqfar,
RQFPR_IPV6);
919 rqfar = cluster_entry_per_class(priv, rqfar,
RQFPR_IPV4);
928 for (i = 0; i < rqfar; i++) {
931 gfar_write_filer(priv, i, rqfcr, rqfpr);
935 static void gfar_detect_errata(
struct gfar_private *priv)
940 unsigned int mod = (svr >> 16) & 0xfff6;
941 unsigned int rev = svr & 0xffff;
944 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
945 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
949 if ((pvr == 0x80850010 && mod == 0x80b0) ||
950 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
954 if ((pvr == 0x80850010 && mod == 0x80b0) ||
955 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
959 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
960 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
964 dev_info(dev,
"enabled errata workarounds, flags: 0x%x\n",
977 int err = 0,
i, grp_idx = 0;
982 err = gfar_of_init(ofdev, &dev);
987 priv = netdev_priv(dev);
990 priv->
node = ofdev->
dev.of_node;
999 gfar_detect_errata(priv);
1013 gfar_write(®s->
maccfg1, tempval);
1019 gfar_write(®s->
maccfg2, tempval);
1036 for (i = 0; i < priv->
num_grps; i++)
1098 baddr = ®s->
isrg0;
1099 for (i = 0; i < priv->
num_grps; i++) {
1102 gfar_write(baddr, isrg);
1112 for (i = 0; i< priv->
num_grps; i++) {
1122 for (grp_idx = 0; grp_idx < priv->
num_grps; grp_idx++) {
1123 priv->
gfargrp[grp_idx].num_rx_queues = 0x0;
1127 priv->
gfargrp[grp_idx].num_rx_queues++;
1132 priv->
gfargrp[grp_idx].num_tx_queues = 0x0;
1136 priv->
gfargrp[grp_idx].num_tx_queues++;
1179 pr_err(
"%s: Cannot register net device, aborting\n", dev->
name);
1188 for (i = 0; i < priv->
num_grps; i++) {
1191 dev->
name,
"_g",
'0' + i,
"_tx");
1193 dev->
name,
"_g",
'0' + i,
"_rx");
1195 dev->
name,
"_g",
'0' + i,
"_er");
1201 gfar_init_filer_table(priv);
1207 netdev_info(dev,
"mac: %pM\n", dev->
dev_addr);
1212 netdev_info(dev,
"Running with NAPI enabled\n");
1214 netdev_info(dev,
"RX BD ring size for Q[%d]: %d\n",
1215 i, priv->
rx_queue[i]->rx_ring_size);
1217 netdev_info(dev,
"TX BD ring size for Q[%d]: %d\n",
1218 i, priv->
tx_queue[i]->tx_ring_size);
1223 unmap_group_regs(priv);
1224 free_tx_pointers(priv);
1225 free_rx_pointers(priv);
1246 unmap_group_regs(priv);
1254 static int gfar_suspend(
struct device *dev)
1259 unsigned long flags;
1262 int magic_packet = priv->
wol_en &&
1268 if (netif_running(ndev)) {
1274 gfar_halt_nodisable(ndev);
1277 tempval = gfar_read(®s->
maccfg1);
1284 gfar_write(®s->
maccfg1, tempval);
1297 tempval = gfar_read(®s->
maccfg2);
1299 gfar_write(®s->
maccfg2, tempval);
1308 static int gfar_resume(
struct device *dev)
1313 unsigned long flags;
1315 int magic_packet = priv->
wol_en &&
1319 if (!netif_running(ndev)) {
1324 if (!magic_packet && priv->
phydev)
1334 tempval = gfar_read(®s->
maccfg2);
1336 gfar_write(®s->
maccfg2, tempval);
1351 static int gfar_restore(
struct device *dev)
1356 if (!netif_running(ndev)) {
1362 gfar_init_bds(ndev);
1364 gfar_set_mac_address(ndev);
1365 gfar_init_mac(ndev);
1383 .resume = gfar_resume,
1384 .freeze = gfar_suspend,
1385 .thaw = gfar_resume,
1386 .restore = gfar_restore,
1389 #define GFAR_PM_OPS (&gfar_pm_ops)
1393 #define GFAR_PM_OPS NULL
1403 struct gfar __iomem *regs = priv->
gfargrp[0].regs;
1406 ecntrl = gfar_read(®s->
ecntrl);
1448 uint gigabit_support =
1457 interface = gfar_get_interface(dev);
1465 dev_err(&dev->
dev,
"could not attach to PHY\n");
1470 gfar_configure_serdes(dev);
1487 static void gfar_configure_serdes(
struct net_device *dev)
1493 dev_warn(&dev->
dev,
"error: SGMII mode requires that the "
1494 "device tree specify a tbi-handle\n");
1500 dev_err(&dev->
dev,
"error: Could not get TBI device\n");
1530 for (i = 0; i < priv->
num_grps; i++) {
1541 gfar_write(®s->
igaddr0, 0);
1542 gfar_write(®s->
igaddr1, 0);
1543 gfar_write(®s->
igaddr2, 0);
1544 gfar_write(®s->
igaddr3, 0);
1545 gfar_write(®s->
igaddr4, 0);
1546 gfar_write(®s->
igaddr5, 0);
1547 gfar_write(®s->
igaddr6, 0);
1548 gfar_write(®s->
igaddr7, 0);
1550 gfar_write(®s->
gaddr0, 0);
1551 gfar_write(®s->
gaddr1, 0);
1552 gfar_write(®s->
gaddr2, 0);
1553 gfar_write(®s->
gaddr3, 0);
1554 gfar_write(®s->
gaddr4, 0);
1555 gfar_write(®s->
gaddr5, 0);
1556 gfar_write(®s->
gaddr6, 0);
1557 gfar_write(®s->
gaddr7, 0);
1564 gfar_write(®s->
rmon.cam1, 0xffffffff);
1565 gfar_write(®s->
rmon.cam2, 0xffffffff);
1575 static int __gfar_is_rx_idle(
struct gfar_private *priv)
1591 if ((res & 0xffff) == (res >> 16))
1598 static void gfar_halt_nodisable(
struct net_device *dev)
1605 for (i = 0; i < priv->
num_grps; i++) {
1616 tempval = gfar_read(®s->
dmactrl);
1622 gfar_write(®s->
dmactrl, tempval);
1625 ret = spin_event_timeout(((gfar_read(®s->
ievent) &
1629 ret = __gfar_is_rx_idle(priv);
1638 struct gfar __iomem *regs = priv->
gfargrp[0].regs;
1641 gfar_halt_nodisable(dev);
1644 tempval = gfar_read(®s->
maccfg1);
1646 gfar_write(®s->
maccfg1, tempval);
1659 unsigned long flags;
1678 for (i = 0; i < priv->
num_grps; i++)
1679 free_grp_irqs(&priv->
gfargrp[i]);
1681 for (i = 0; i < priv->
num_grps; i++)
1686 free_skb_resources(priv);
1691 struct txbd8 *txbdp;
1704 for (j = 0; j < skb_shinfo(tx_queue->
tx_skbuff[i])->nr_frags;
1719 struct rxbd8 *rxbdp;
1743 static void free_skb_resources(
struct gfar_private *priv)
1754 txq = netdev_get_tx_queue(tx_queue->
dev, tx_queue->
qindex);
1756 free_skb_tx_queue(tx_queue);
1757 netdev_tx_reset_queue(txq);
1763 free_skb_rx_queue(rx_queue);
1770 priv->
tx_queue[0]->tx_bd_dma_base);
1776 struct gfar __iomem *regs = priv->
gfargrp[0].regs;
1781 tempval = gfar_read(®s->
maccfg1);
1783 gfar_write(®s->
maccfg1, tempval);
1786 tempval = gfar_read(®s->
dmactrl);
1788 gfar_write(®s->
dmactrl, tempval);
1791 tempval = gfar_read(®s->
dmactrl);
1793 gfar_write(®s->
dmactrl, tempval);
1795 for (i = 0; i < priv->
num_grps; i++) {
1810 struct gfar __iomem *regs = priv->
gfargrp[0].regs;
1817 gfar_write(®s->
txic, 0);
1821 gfar_write(®s->
rxic, 0);
1826 baddr = ®s->
txic0;
1828 gfar_write(baddr + i, 0);
1830 gfar_write(baddr + i, priv->
tx_queue[i]->txic);
1833 baddr = ®s->
rxic0;
1835 gfar_write(baddr + i, 0);
1837 gfar_write(baddr + i, priv->
rx_queue[i]->rxic);
1903 for (i = 0; i < priv->
num_grps; i++) {
1909 err = gfar_alloc_skb_resources(ndev);
1913 gfar_init_mac(ndev);
1915 for (i = 0; i < priv->
num_grps; i++) {
1916 err = register_grp_irqs(&priv->
gfargrp[i]);
1918 for (j = 0; j <
i; j++)
1919 free_grp_irqs(&priv->
gfargrp[j]);
1934 free_skb_resources(priv);
1941 static int gfar_enet_open(
struct net_device *dev)
1951 gfar_set_mac_address(dev);
1953 err = init_phy(dev);
1966 netif_tx_start_all_queues(dev);
1982 static inline void gfar_tx_checksum(
struct sk_buff *skb,
struct txfcb *fcb,
1996 fcb->
phcs = udp_hdr(skb)->check;
1998 fcb->
phcs = tcp_hdr(skb)->check;
2005 fcb->
l3os = (
u16)(skb_network_offset(skb) - fcb_length);
2006 fcb->
l4os = skb_network_header_len(skb);
2017 static inline struct txbd8 *skip_txbd(
struct txbd8 *bdp,
int stride,
2020 struct txbd8 *new_bd = bdp + stride;
2022 return (new_bd >= (base + ring_size)) ? (new_bd -
ring_size) : new_bd;
2025 static inline struct txbd8 *next_txbd(
struct txbd8 *bdp,
struct txbd8 *base,
2028 return skip_txbd(bdp, 1, base, ring_size);
2041 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp =
NULL;
2043 int i, rq = 0, do_tstamp = 0;
2045 unsigned long flags;
2063 txq = netdev_get_tx_queue(dev, rq);
2065 regs = tx_queue->
grp->regs;
2078 (skb_headroom(skb) < fcb_length)) {
2083 dev->
stats.tx_errors++;
2089 skb_set_owner_w(skb_new, skb->
sk);
2095 nr_frags = skb_shinfo(skb)->nr_frags;
2099 nr_txbds = nr_frags + 2;
2101 nr_txbds = nr_frags + 1;
2106 netif_tx_stop_queue(txq);
2107 dev->
stats.tx_fifo_errors++;
2112 tx_queue->
stats.tx_bytes += skb->
len;
2113 tx_queue->
stats.tx_packets++;
2115 txbdp = txbdp_start = tx_queue->
cur_tx;
2120 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2123 if (nr_frags == 0) {
2131 for (i = 0; i < nr_frags; i++) {
2133 txbdp = next_txbd(txbdp, base, tx_queue->
tx_ring_size);
2135 length = skb_shinfo(skb)->frags[
i].size;
2137 lstatus = txbdp->
lstatus | length |
2141 if (i == nr_frags - 1)
2144 bufaddr = skb_frag_dma_map(&priv->
ofdev->dev,
2145 &skb_shinfo(skb)->frags[i],
2155 lstatus = txbdp_start->
lstatus;
2166 fcb = gfar_add_fcb(skb);
2169 ((
unsigned long)fcb % 0x20) > 0x18)) {
2174 gfar_tx_checksum(skb, fcb, fcb_length);
2180 fcb = gfar_add_fcb(skb);
2191 fcb = gfar_add_fcb(skb);
2205 txbdp_tstamp->
bufPtr = txbdp_start->
bufPtr + fcb_length;
2207 (skb_headlen(skb) - fcb_length);
2213 netdev_tx_sent_queue(txq, skb->
len);
2237 txbdp_start->
lstatus = lstatus;
2258 netif_tx_stop_queue(txq);
2260 dev->
stats.tx_fifo_errors++;
2267 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2273 static int gfar_close(
struct net_device *dev)
2286 netif_tx_stop_all_queues(dev);
2292 static int gfar_set_mac_address(
struct net_device *dev)
2294 gfar_set_mac_for_addr(dev, 0, dev->
dev_addr);
2307 tempval = gfar_read(®s->
rctrl);
2313 gfar_write(®s->
rctrl, tempval);
2321 unsigned long flags;
2330 tempval = gfar_read(®s->
tctrl);
2332 gfar_write(®s->
tctrl, tempval);
2335 tempval = gfar_read(®s->
tctrl);
2337 gfar_write(®s->
tctrl, tempval);
2342 tempval = gfar_read(®s->
rctrl);
2344 gfar_write(®s->
rctrl, tempval);
2347 tempval = gfar_read(®s->
rctrl);
2349 gfar_write(®s->
rctrl, tempval);
2354 gfar_change_mtu(dev, dev->
mtu);
2360 static int gfar_change_mtu(
struct net_device *dev,
int new_mtu)
2362 int tempsize, tempval;
2366 int frame_size = new_mtu +
ETH_HLEN;
2368 if (gfar_is_vlan_on(priv))
2372 netif_err(priv, drv, dev,
"Invalid MTU setting\n");
2376 if (gfar_uses_fcb(priv))
2387 if ((oldsize != tempsize) && (dev->
flags &
IFF_UP))
2401 tempval = gfar_read(®s->
maccfg2);
2409 gfar_write(®s->
maccfg2, tempval);
2411 if ((oldsize != tempsize) && (dev->
flags &
IFF_UP))
2429 netif_tx_stop_all_queues(dev);
2432 netif_tx_start_all_queues(dev);
2435 netif_tx_schedule_all(dev);
2438 static void gfar_timeout(
struct net_device *dev)
2442 dev->
stats.tx_errors++;
2446 static void gfar_align_skb(
struct sk_buff *skb)
2468 int frags = 0, nr_txbds = 0;
2471 int tqi = tx_queue->
qindex;
2472 unsigned int bytes_sent = 0;
2477 txq = netdev_get_tx_queue(dev, tqi);
2481 while ((skb = tx_queue->
tx_skbuff[skb_dirtytx])) {
2482 unsigned long flags;
2484 frags = skb_shinfo(skb)->nr_frags;
2490 nr_txbds = frags + 2;
2492 nr_txbds = frags + 1;
2494 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2503 if (
unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2504 next = next_txbd(bdp, base, tx_ring_size);
2512 if (
unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2516 memset(&shhwtstamps, 0,
sizeof(shhwtstamps));
2517 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2525 bdp = next_txbd(bdp, base, tx_ring_size);
2527 for (i = 0; i < frags; i++) {
2531 bdp = next_txbd(bdp, base, tx_ring_size);
2534 bytes_sent += skb->
len;
2540 skb_dirtytx = (skb_dirtytx + 1) &
2546 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2550 if (netif_tx_queue_stopped(txq) && tx_queue->
num_txbdfree)
2551 netif_wake_subqueue(dev, tqi);
2557 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2562 static void gfar_schedule_cleanup(
struct gfar_priv_grp *gfargrp)
2564 unsigned long flags;
2567 if (napi_schedule_prep(&gfargrp->
napi)) {
2576 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2581 static irqreturn_t gfar_transmit(
int irq,
void *grp_id)
2596 gfar_init_rxbdp(rx_queue, bdp, buf);
2608 gfar_align_skb(skb);
2615 return gfar_alloc_skb(dev);
2618 static inline void count_errors(
unsigned short status,
struct net_device *dev)
2661 static inline void gfar_rx_checksum(
struct sk_buff *skb,
struct rxfcb *fcb)
2670 skb_checksum_none_assert(skb);
2690 skb_record_rx_queue(skb, fcb->
rq);
2699 memset(shhwtstamps, 0,
sizeof(*shhwtstamps));
2700 shhwtstamps->
hwtstamp = ns_to_ktime(*ns);
2707 gfar_rx_checksum(skb, fcb);
2718 __vlan_hwaccel_put_tag(skb, fcb->
vlctl);
2736 struct rxbd8 *bdp, *base;
2747 amount_pull = (gfar_uses_fcb(priv) ?
GMAC_FCB_LEN : 0);
2769 count_errors(bdp->
status, dev);
2777 rx_queue->
stats.rx_packets++;
2785 skb_record_rx_queue(skb, rx_queue->
qindex);
2786 gfar_process_frame(dev, skb, amount_pull,
2787 &rx_queue->
grp->napi);
2790 netif_warn(priv, rx_err, dev,
"Missing skb!\n");
2791 rx_queue->
stats.rx_dropped++;
2800 gfar_new_rxbdp(rx_queue, bdp, newskb);
2824 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2825 int tx_cleaned = 0,
i, left_over_budget = budget;
2826 unsigned long serviced_queues = 0;
2837 while (num_queues && left_over_budget) {
2838 budget_per_queue = left_over_budget/
num_queues;
2839 left_over_budget = 0;
2847 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2848 rx_cleaned_per_queue =
2850 rx_cleaned += rx_cleaned_per_queue;
2851 if (rx_cleaned_per_queue < budget_per_queue) {
2852 left_over_budget = left_over_budget +
2854 rx_cleaned_per_queue);
2864 if (rx_cleaned < budget) {
2882 #ifdef CONFIG_NET_POLL_CONTROLLER
2887 static void gfar_netpoll(
struct net_device *dev)
2894 for (i = 0; i < priv->
num_grps; i++) {
2898 gfar_interrupt(priv->
gfargrp[i].interruptTransmit,
2905 for (i = 0; i < priv->
num_grps; i++) {
2907 gfar_interrupt(priv->
gfargrp[i].interruptTransmit,
2916 static irqreturn_t gfar_interrupt(
int irq,
void *grp_id)
2929 gfar_transmit(irq, grp_id);
2933 gfar_error(irq, grp_id);
2944 static void adjust_link(
struct net_device *dev)
2948 unsigned long flags;
2974 switch (phydev->
speed) {
2996 "Ack! Speed (%d) is not 10/100/1000!\n",
3004 gfar_write(®s->
maccfg2, tempval);
3005 gfar_write(®s->
ecntrl, ecntrl);
3029 static void gfar_set_multi(
struct net_device *dev)
3038 tempval = gfar_read(®s->
rctrl);
3040 gfar_write(®s->
rctrl, tempval);
3043 tempval = gfar_read(®s->
rctrl);
3045 gfar_write(®s->
rctrl, tempval);
3050 gfar_write(®s->
igaddr0, 0xffffffff);
3051 gfar_write(®s->
igaddr1, 0xffffffff);
3052 gfar_write(®s->
igaddr2, 0xffffffff);
3053 gfar_write(®s->
igaddr3, 0xffffffff);
3054 gfar_write(®s->
igaddr4, 0xffffffff);
3055 gfar_write(®s->
igaddr5, 0xffffffff);
3056 gfar_write(®s->
igaddr6, 0xffffffff);
3057 gfar_write(®s->
igaddr7, 0xffffffff);
3058 gfar_write(®s->
gaddr0, 0xffffffff);
3059 gfar_write(®s->
gaddr1, 0xffffffff);
3060 gfar_write(®s->
gaddr2, 0xffffffff);
3061 gfar_write(®s->
gaddr3, 0xffffffff);
3062 gfar_write(®s->
gaddr4, 0xffffffff);
3063 gfar_write(®s->
gaddr5, 0xffffffff);
3064 gfar_write(®s->
gaddr6, 0xffffffff);
3065 gfar_write(®s->
gaddr7, 0xffffffff);
3071 gfar_write(®s->
igaddr0, 0x0);
3072 gfar_write(®s->
igaddr1, 0x0);
3073 gfar_write(®s->
igaddr2, 0x0);
3074 gfar_write(®s->
igaddr3, 0x0);
3075 gfar_write(®s->
igaddr4, 0x0);
3076 gfar_write(®s->
igaddr5, 0x0);
3077 gfar_write(®s->
igaddr6, 0x0);
3078 gfar_write(®s->
igaddr7, 0x0);
3079 gfar_write(®s->
gaddr0, 0x0);
3080 gfar_write(®s->
gaddr1, 0x0);
3081 gfar_write(®s->
gaddr2, 0x0);
3082 gfar_write(®s->
gaddr3, 0x0);
3083 gfar_write(®s->
gaddr4, 0x0);
3084 gfar_write(®s->
gaddr5, 0x0);
3085 gfar_write(®s->
gaddr6, 0x0);
3086 gfar_write(®s->
gaddr7, 0x0);
3094 gfar_clear_exact_match(dev);
3107 gfar_set_mac_for_addr(dev, idx, ha->
addr);
3110 gfar_set_hash_for_addr(dev, ha->
addr);
3119 static void gfar_clear_exact_match(
struct net_device *dev)
3122 static const u8 zero_arr[
ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3125 gfar_set_mac_for_addr(dev, idx, zero_arr);
3142 static void gfar_set_hash_for_addr(
struct net_device *dev,
u8 *addr)
3148 u8 whichbit = (result >> (32 -
width)) & 0x1f;
3149 u8 whichreg = result >> (32 - width + 5);
3152 tempval = gfar_read(priv->
hash_regs[whichreg]);
3154 gfar_write(priv->
hash_regs[whichreg], tempval);
3161 static void gfar_set_mac_for_addr(
struct net_device *dev,
int num,
3176 for (idx = 0; idx <
ETH_ALEN; idx++)
3177 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3179 gfar_write(macptr, *((
u32 *) (tmpbuf)));
3181 tempval = *((
u32 *) (tmpbuf + 4));
3183 gfar_write(macptr+1, tempval);
3187 static irqreturn_t gfar_error(
int irq,
void *grp_id)
3203 events &= ~IEVENT_MAG;
3208 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3209 events, gfar_read(®s->
imask));
3213 dev->
stats.tx_errors++;
3216 dev->
stats.tx_window_errors++;
3218 dev->
stats.tx_aborted_errors++;
3220 unsigned long flags;
3223 "TX FIFO underrun, packet dropped\n");
3224 dev->
stats.tx_dropped++;
3236 netif_dbg(priv, tx_err, dev,
"Transmit Error\n");
3239 dev->
stats.rx_errors++;
3244 netif_dbg(priv, rx_err, dev,
"busy error (rstat: %x)\n",
3245 gfar_read(®s->
rstat));
3248 dev->
stats.rx_errors++;
3251 netif_dbg(priv, rx_err, dev,
"babbling RX error\n");
3255 netif_dbg(priv, rx_err, dev,
"bus error\n");
3262 netif_dbg(priv, tx_err, dev,
"babbling TX error\n");
3271 .compatible =
"gianfar",
3274 .compatible =
"fsl,etsec2",
3283 .name =
"fsl-gianfar",
3286 .of_match_table = gfar_match,
3288 .probe = gfar_probe,
3289 .remove = gfar_remove,