Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gianfar.c
Go to the documentation of this file.
1 /* drivers/net/ethernet/freescale/gianfar.c
2  *
3  * Gianfar Ethernet Driver
4  * This driver is designed for the non-CPM ethernet controllers
5  * on the 85xx and 83xx family of integrated processors
6  * Based on 8260_io/fcc_enet.c
7  *
8  * Author: Andy Fleming
9  * Maintainer: Kumar Gala
10  * Modifier: Sandeep Gopalpet <[email protected]>
11  *
12  * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13  * Copyright 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the
17  * Free Software Foundation; either version 2 of the License, or (at your
18  * option) any later version.
19  *
20  * Gianfar: AKA Lambda Draconis, "Dragon"
21  * RA 11 31 24.2
22  * Dec +69 19 52
23  * V 3.84
24  * B-V +1.62
25  *
26  * Theory of operation
27  *
28  * The driver is initialized through of_device. Configuration information
29  * is therefore conveyed through an OF-style device tree.
30  *
31  * The Gianfar Ethernet Controller uses a ring of buffer
32  * descriptors. The beginning is indicated by a register
33  * pointing to the physical address of the start of the ring.
34  * The end is determined by a "wrap" bit being set in the
35  * last descriptor of the ring.
36  *
37  * When a packet is received, the RXF bit in the
38  * IEVENT register is set, triggering an interrupt when the
39  * corresponding bit in the IMASK register is also set (if
40  * interrupt coalescing is active, then the interrupt may not
41  * happen immediately, but will wait until either a set number
42  * of frames or amount of time have passed). In NAPI, the
43  * interrupt handler will signal there is work to be done, and
44  * exit. This method will start at the last known empty
45  * descriptor, and process every subsequent descriptor until there
46  * are none left with data (NAPI will stop after a set number of
47  * packets to give time to other tasks, but will eventually
48  * process all the packets). The data arrives inside a
49  * pre-allocated skb, and so after the skb is passed up to the
50  * stack, a new skb must be allocated, and the address field in
51  * the buffer descriptor must be updated to indicate this new
52  * skb.
53  *
54  * When the kernel requests that a packet be transmitted, the
55  * driver starts where it left off last time, and points the
56  * descriptor at the buffer which was passed in. The driver
57  * then informs the DMA engine that there are packets ready to
58  * be transmitted. Once the controller is finished transmitting
59  * the packet, an interrupt may be triggered (under the same
60  * conditions as for reception, but depending on the TXF bit).
61  * The driver then cleans up the buffer.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 #define DEBUG
66 
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/init.h>
74 #include <linux/delay.h>
75 #include <linux/netdevice.h>
76 #include <linux/etherdevice.h>
77 #include <linux/skbuff.h>
78 #include <linux/if_vlan.h>
79 #include <linux/spinlock.h>
80 #include <linux/mm.h>
81 #include <linux/of_mdio.h>
82 #include <linux/of_platform.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87 #include <linux/net_tstamp.h>
88 
89 #include <asm/io.h>
90 #include <asm/reg.h>
91 #include <asm/irq.h>
92 #include <asm/uaccess.h>
93 #include <linux/module.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98 #include <linux/phy_fixed.h>
99 #include <linux/of.h>
100 #include <linux/of_net.h>
101 
102 #include "gianfar.h"
103 
104 #define TX_TIMEOUT (1*HZ)
105 
106 const char gfar_driver_version[] = "1.3";
107 
108 static int gfar_enet_open(struct net_device *dev);
109 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
110 static void gfar_reset_task(struct work_struct *work);
111 static void gfar_timeout(struct net_device *dev);
112 static int gfar_close(struct net_device *dev);
113 struct sk_buff *gfar_new_skb(struct net_device *dev);
114 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
115  struct sk_buff *skb);
116 static int gfar_set_mac_address(struct net_device *dev);
117 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
118 static irqreturn_t gfar_error(int irq, void *dev_id);
119 static irqreturn_t gfar_transmit(int irq, void *dev_id);
120 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
121 static void adjust_link(struct net_device *dev);
122 static void init_registers(struct net_device *dev);
123 static int init_phy(struct net_device *dev);
124 static int gfar_probe(struct platform_device *ofdev);
125 static int gfar_remove(struct platform_device *ofdev);
126 static void free_skb_resources(struct gfar_private *priv);
127 static void gfar_set_multi(struct net_device *dev);
128 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
129 static void gfar_configure_serdes(struct net_device *dev);
130 static int gfar_poll(struct napi_struct *napi, int budget);
131 #ifdef CONFIG_NET_POLL_CONTROLLER
132 static void gfar_netpoll(struct net_device *dev);
133 #endif
134 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137  int amount_pull, struct napi_struct *napi);
138 void gfar_halt(struct net_device *dev);
139 static void gfar_halt_nodisable(struct net_device *dev);
140 void gfar_start(struct net_device *dev);
141 static void gfar_clear_exact_match(struct net_device *dev);
142 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
143  const u8 *addr);
144 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
145 
146 MODULE_AUTHOR("Freescale Semiconductor, Inc");
147 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148 MODULE_LICENSE("GPL");
149 
150 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151  dma_addr_t buf)
152 {
153  u32 lstatus;
154 
155  bdp->bufPtr = buf;
156 
157  lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
158  if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
159  lstatus |= BD_LFLAG(RXBD_WRAP);
160 
161  eieio();
162 
163  bdp->lstatus = lstatus;
164 }
165 
166 static int gfar_init_bds(struct net_device *ndev)
167 {
168  struct gfar_private *priv = netdev_priv(ndev);
169  struct gfar_priv_tx_q *tx_queue = NULL;
170  struct gfar_priv_rx_q *rx_queue = NULL;
171  struct txbd8 *txbdp;
172  struct rxbd8 *rxbdp;
173  int i, j;
174 
175  for (i = 0; i < priv->num_tx_queues; i++) {
176  tx_queue = priv->tx_queue[i];
177  /* Initialize some variables in our dev structure */
178  tx_queue->num_txbdfree = tx_queue->tx_ring_size;
179  tx_queue->dirty_tx = tx_queue->tx_bd_base;
180  tx_queue->cur_tx = tx_queue->tx_bd_base;
181  tx_queue->skb_curtx = 0;
182  tx_queue->skb_dirtytx = 0;
183 
184  /* Initialize Transmit Descriptor Ring */
185  txbdp = tx_queue->tx_bd_base;
186  for (j = 0; j < tx_queue->tx_ring_size; j++) {
187  txbdp->lstatus = 0;
188  txbdp->bufPtr = 0;
189  txbdp++;
190  }
191 
192  /* Set the last descriptor in the ring to indicate wrap */
193  txbdp--;
194  txbdp->status |= TXBD_WRAP;
195  }
196 
197  for (i = 0; i < priv->num_rx_queues; i++) {
198  rx_queue = priv->rx_queue[i];
199  rx_queue->cur_rx = rx_queue->rx_bd_base;
200  rx_queue->skb_currx = 0;
201  rxbdp = rx_queue->rx_bd_base;
202 
203  for (j = 0; j < rx_queue->rx_ring_size; j++) {
204  struct sk_buff *skb = rx_queue->rx_skbuff[j];
205 
206  if (skb) {
207  gfar_init_rxbdp(rx_queue, rxbdp,
208  rxbdp->bufPtr);
209  } else {
210  skb = gfar_new_skb(ndev);
211  if (!skb) {
212  netdev_err(ndev, "Can't allocate RX buffers\n");
213  goto err_rxalloc_fail;
214  }
215  rx_queue->rx_skbuff[j] = skb;
216 
217  gfar_new_rxbdp(rx_queue, rxbdp, skb);
218  }
219 
220  rxbdp++;
221  }
222 
223  }
224 
225  return 0;
226 
227 err_rxalloc_fail:
228  free_skb_resources(priv);
229  return -ENOMEM;
230 }
231 
232 static int gfar_alloc_skb_resources(struct net_device *ndev)
233 {
234  void *vaddr;
236  int i, j, k;
237  struct gfar_private *priv = netdev_priv(ndev);
238  struct device *dev = &priv->ofdev->dev;
239  struct gfar_priv_tx_q *tx_queue = NULL;
240  struct gfar_priv_rx_q *rx_queue = NULL;
241 
242  priv->total_tx_ring_size = 0;
243  for (i = 0; i < priv->num_tx_queues; i++)
244  priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
245 
246  priv->total_rx_ring_size = 0;
247  for (i = 0; i < priv->num_rx_queues; i++)
248  priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
249 
250  /* Allocate memory for the buffer descriptors */
251  vaddr = dma_alloc_coherent(dev,
252  sizeof(struct txbd8) * priv->total_tx_ring_size +
253  sizeof(struct rxbd8) * priv->total_rx_ring_size,
254  &addr, GFP_KERNEL);
255  if (!vaddr) {
256  netif_err(priv, ifup, ndev,
257  "Could not allocate buffer descriptors!\n");
258  return -ENOMEM;
259  }
260 
261  for (i = 0; i < priv->num_tx_queues; i++) {
262  tx_queue = priv->tx_queue[i];
263  tx_queue->tx_bd_base = vaddr;
264  tx_queue->tx_bd_dma_base = addr;
265  tx_queue->dev = ndev;
266  /* enet DMA only understands physical addresses */
267  addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
268  vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
269  }
270 
271  /* Start the rx descriptor ring where the tx ring leaves off */
272  for (i = 0; i < priv->num_rx_queues; i++) {
273  rx_queue = priv->rx_queue[i];
274  rx_queue->rx_bd_base = vaddr;
275  rx_queue->rx_bd_dma_base = addr;
276  rx_queue->dev = ndev;
277  addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
278  vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
279  }
280 
281  /* Setup the skbuff rings */
282  for (i = 0; i < priv->num_tx_queues; i++) {
283  tx_queue = priv->tx_queue[i];
284  tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
285  tx_queue->tx_ring_size,
286  GFP_KERNEL);
287  if (!tx_queue->tx_skbuff) {
288  netif_err(priv, ifup, ndev,
289  "Could not allocate tx_skbuff\n");
290  goto cleanup;
291  }
292 
293  for (k = 0; k < tx_queue->tx_ring_size; k++)
294  tx_queue->tx_skbuff[k] = NULL;
295  }
296 
297  for (i = 0; i < priv->num_rx_queues; i++) {
298  rx_queue = priv->rx_queue[i];
299  rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
300  rx_queue->rx_ring_size,
301  GFP_KERNEL);
302 
303  if (!rx_queue->rx_skbuff) {
304  netif_err(priv, ifup, ndev,
305  "Could not allocate rx_skbuff\n");
306  goto cleanup;
307  }
308 
309  for (j = 0; j < rx_queue->rx_ring_size; j++)
310  rx_queue->rx_skbuff[j] = NULL;
311  }
312 
313  if (gfar_init_bds(ndev))
314  goto cleanup;
315 
316  return 0;
317 
318 cleanup:
319  free_skb_resources(priv);
320  return -ENOMEM;
321 }
322 
323 static void gfar_init_tx_rx_base(struct gfar_private *priv)
324 {
325  struct gfar __iomem *regs = priv->gfargrp[0].regs;
326  u32 __iomem *baddr;
327  int i;
328 
329  baddr = &regs->tbase0;
330  for (i = 0; i < priv->num_tx_queues; i++) {
331  gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
332  baddr += 2;
333  }
334 
335  baddr = &regs->rbase0;
336  for (i = 0; i < priv->num_rx_queues; i++) {
337  gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
338  baddr += 2;
339  }
340 }
341 
342 static void gfar_init_mac(struct net_device *ndev)
343 {
344  struct gfar_private *priv = netdev_priv(ndev);
345  struct gfar __iomem *regs = priv->gfargrp[0].regs;
346  u32 rctrl = 0;
347  u32 tctrl = 0;
348  u32 attrs = 0;
349 
350  /* write the tx/rx base registers */
351  gfar_init_tx_rx_base(priv);
352 
353  /* Configure the coalescing support */
354  gfar_configure_coalescing(priv, 0xFF, 0xFF);
355 
356  if (priv->rx_filer_enable) {
357  rctrl |= RCTRL_FILREN;
358  /* Program the RIR0 reg with the required distribution */
359  gfar_write(&regs->rir0, DEFAULT_RIR0);
360  }
361 
362  if (ndev->features & NETIF_F_RXCSUM)
363  rctrl |= RCTRL_CHECKSUMMING;
364 
365  if (priv->extended_hash) {
366  rctrl |= RCTRL_EXTHASH;
367 
368  gfar_clear_exact_match(ndev);
369  rctrl |= RCTRL_EMEN;
370  }
371 
372  if (priv->padding) {
373  rctrl &= ~RCTRL_PAL_MASK;
374  rctrl |= RCTRL_PADDING(priv->padding);
375  }
376 
377  /* Insert receive time stamps into padding alignment bytes */
379  rctrl &= ~RCTRL_PAL_MASK;
380  rctrl |= RCTRL_PADDING(8);
381  priv->padding = 8;
382  }
383 
384  /* Enable HW time stamping if requested from user space */
385  if (priv->hwts_rx_en)
387 
388  if (ndev->features & NETIF_F_HW_VLAN_RX)
389  rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
390 
391  /* Init rctrl based on our settings */
392  gfar_write(&regs->rctrl, rctrl);
393 
394  if (ndev->features & NETIF_F_IP_CSUM)
395  tctrl |= TCTRL_INIT_CSUM;
396 
397  if (priv->prio_sched_en)
398  tctrl |= TCTRL_TXSCHED_PRIO;
399  else {
400  tctrl |= TCTRL_TXSCHED_WRRS;
401  gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
402  gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
403  }
404 
405  gfar_write(&regs->tctrl, tctrl);
406 
407  /* Set the extraction length and index */
408  attrs = ATTRELI_EL(priv->rx_stash_size) |
409  ATTRELI_EI(priv->rx_stash_index);
410 
411  gfar_write(&regs->attreli, attrs);
412 
413  /* Start with defaults, and add stashing or locking
414  * depending on the approprate variables
415  */
416  attrs = ATTR_INIT_SETTINGS;
417 
418  if (priv->bd_stash_en)
419  attrs |= ATTR_BDSTASH;
420 
421  if (priv->rx_stash_size != 0)
422  attrs |= ATTR_BUFSTASH;
423 
424  gfar_write(&regs->attr, attrs);
425 
426  gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
427  gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
428  gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
429 }
430 
431 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
432 {
433  struct gfar_private *priv = netdev_priv(dev);
434  unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
435  unsigned long tx_packets = 0, tx_bytes = 0;
436  int i;
437 
438  for (i = 0; i < priv->num_rx_queues; i++) {
439  rx_packets += priv->rx_queue[i]->stats.rx_packets;
440  rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
441  rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
442  }
443 
444  dev->stats.rx_packets = rx_packets;
445  dev->stats.rx_bytes = rx_bytes;
446  dev->stats.rx_dropped = rx_dropped;
447 
448  for (i = 0; i < priv->num_tx_queues; i++) {
449  tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
450  tx_packets += priv->tx_queue[i]->stats.tx_packets;
451  }
452 
453  dev->stats.tx_bytes = tx_bytes;
454  dev->stats.tx_packets = tx_packets;
455 
456  return &dev->stats;
457 }
458 
459 static const struct net_device_ops gfar_netdev_ops = {
460  .ndo_open = gfar_enet_open,
461  .ndo_start_xmit = gfar_start_xmit,
462  .ndo_stop = gfar_close,
463  .ndo_change_mtu = gfar_change_mtu,
464  .ndo_set_features = gfar_set_features,
465  .ndo_set_rx_mode = gfar_set_multi,
466  .ndo_tx_timeout = gfar_timeout,
467  .ndo_do_ioctl = gfar_ioctl,
468  .ndo_get_stats = gfar_get_stats,
469  .ndo_set_mac_address = eth_mac_addr,
470  .ndo_validate_addr = eth_validate_addr,
471 #ifdef CONFIG_NET_POLL_CONTROLLER
472  .ndo_poll_controller = gfar_netpoll,
473 #endif
474 };
475 
476 void lock_rx_qs(struct gfar_private *priv)
477 {
478  int i;
479 
480  for (i = 0; i < priv->num_rx_queues; i++)
481  spin_lock(&priv->rx_queue[i]->rxlock);
482 }
483 
484 void lock_tx_qs(struct gfar_private *priv)
485 {
486  int i;
487 
488  for (i = 0; i < priv->num_tx_queues; i++)
489  spin_lock(&priv->tx_queue[i]->txlock);
490 }
491 
492 void unlock_rx_qs(struct gfar_private *priv)
493 {
494  int i;
495 
496  for (i = 0; i < priv->num_rx_queues; i++)
497  spin_unlock(&priv->rx_queue[i]->rxlock);
498 }
499 
500 void unlock_tx_qs(struct gfar_private *priv)
501 {
502  int i;
503 
504  for (i = 0; i < priv->num_tx_queues; i++)
505  spin_unlock(&priv->tx_queue[i]->txlock);
506 }
507 
508 static bool gfar_is_vlan_on(struct gfar_private *priv)
509 {
510  return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
511  (priv->ndev->features & NETIF_F_HW_VLAN_TX);
512 }
513 
514 /* Returns 1 if incoming frames use an FCB */
515 static inline int gfar_uses_fcb(struct gfar_private *priv)
516 {
517  return gfar_is_vlan_on(priv) ||
518  (priv->ndev->features & NETIF_F_RXCSUM) ||
520 }
521 
522 static void free_tx_pointers(struct gfar_private *priv)
523 {
524  int i;
525 
526  for (i = 0; i < priv->num_tx_queues; i++)
527  kfree(priv->tx_queue[i]);
528 }
529 
530 static void free_rx_pointers(struct gfar_private *priv)
531 {
532  int i;
533 
534  for (i = 0; i < priv->num_rx_queues; i++)
535  kfree(priv->rx_queue[i]);
536 }
537 
538 static void unmap_group_regs(struct gfar_private *priv)
539 {
540  int i;
541 
542  for (i = 0; i < MAXGROUPS; i++)
543  if (priv->gfargrp[i].regs)
544  iounmap(priv->gfargrp[i].regs);
545 }
546 
547 static void disable_napi(struct gfar_private *priv)
548 {
549  int i;
550 
551  for (i = 0; i < priv->num_grps; i++)
552  napi_disable(&priv->gfargrp[i].napi);
553 }
554 
555 static void enable_napi(struct gfar_private *priv)
556 {
557  int i;
558 
559  for (i = 0; i < priv->num_grps; i++)
560  napi_enable(&priv->gfargrp[i].napi);
561 }
562 
563 static int gfar_parse_group(struct device_node *np,
564  struct gfar_private *priv, const char *model)
565 {
566  u32 *queue_mask;
567 
568  priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
569  if (!priv->gfargrp[priv->num_grps].regs)
570  return -ENOMEM;
571 
572  priv->gfargrp[priv->num_grps].interruptTransmit =
573  irq_of_parse_and_map(np, 0);
574 
575  /* If we aren't the FEC we have multiple interrupts */
576  if (model && strcasecmp(model, "FEC")) {
577  priv->gfargrp[priv->num_grps].interruptReceive =
578  irq_of_parse_and_map(np, 1);
579  priv->gfargrp[priv->num_grps].interruptError =
580  irq_of_parse_and_map(np,2);
581  if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
582  priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
583  priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
584  return -EINVAL;
585  }
586 
587  priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
588  priv->gfargrp[priv->num_grps].priv = priv;
589  spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
590  if (priv->mode == MQ_MG_MODE) {
591  queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
592  priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
593  *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
594  queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
595  priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
596  *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
597  } else {
598  priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
599  priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
600  }
601  priv->num_grps++;
602 
603  return 0;
604 }
605 
606 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
607 {
608  const char *model;
609  const char *ctype;
610  const void *mac_addr;
611  int err = 0, i;
612  struct net_device *dev = NULL;
613  struct gfar_private *priv = NULL;
614  struct device_node *np = ofdev->dev.of_node;
615  struct device_node *child = NULL;
616  const u32 *stash;
617  const u32 *stash_len;
618  const u32 *stash_idx;
619  unsigned int num_tx_qs, num_rx_qs;
620  u32 *tx_queues, *rx_queues;
621 
622  if (!np || !of_device_is_available(np))
623  return -ENODEV;
624 
625  /* parse the num of tx and rx queues */
626  tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
627  num_tx_qs = tx_queues ? *tx_queues : 1;
628 
629  if (num_tx_qs > MAX_TX_QS) {
630  pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
631  num_tx_qs, MAX_TX_QS);
632  pr_err("Cannot do alloc_etherdev, aborting\n");
633  return -EINVAL;
634  }
635 
636  rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
637  num_rx_qs = rx_queues ? *rx_queues : 1;
638 
639  if (num_rx_qs > MAX_RX_QS) {
640  pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
641  num_rx_qs, MAX_RX_QS);
642  pr_err("Cannot do alloc_etherdev, aborting\n");
643  return -EINVAL;
644  }
645 
646  *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
647  dev = *pdev;
648  if (NULL == dev)
649  return -ENOMEM;
650 
651  priv = netdev_priv(dev);
652  priv->node = ofdev->dev.of_node;
653  priv->ndev = dev;
654 
655  priv->num_tx_queues = num_tx_qs;
656  netif_set_real_num_rx_queues(dev, num_rx_qs);
657  priv->num_rx_queues = num_rx_qs;
658  priv->num_grps = 0x0;
659 
660  /* Init Rx queue filer rule set linked list */
661  INIT_LIST_HEAD(&priv->rx_list.list);
662  priv->rx_list.count = 0;
663  mutex_init(&priv->rx_queue_access);
664 
665  model = of_get_property(np, "model", NULL);
666 
667  for (i = 0; i < MAXGROUPS; i++)
668  priv->gfargrp[i].regs = NULL;
669 
670  /* Parse and initialize group specific information */
671  if (of_device_is_compatible(np, "fsl,etsec2")) {
672  priv->mode = MQ_MG_MODE;
673  for_each_child_of_node(np, child) {
674  err = gfar_parse_group(child, priv, model);
675  if (err)
676  goto err_grp_init;
677  }
678  } else {
679  priv->mode = SQ_SG_MODE;
680  err = gfar_parse_group(np, priv, model);
681  if (err)
682  goto err_grp_init;
683  }
684 
685  for (i = 0; i < priv->num_tx_queues; i++)
686  priv->tx_queue[i] = NULL;
687  for (i = 0; i < priv->num_rx_queues; i++)
688  priv->rx_queue[i] = NULL;
689 
690  for (i = 0; i < priv->num_tx_queues; i++) {
691  priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
692  GFP_KERNEL);
693  if (!priv->tx_queue[i]) {
694  err = -ENOMEM;
695  goto tx_alloc_failed;
696  }
697  priv->tx_queue[i]->tx_skbuff = NULL;
698  priv->tx_queue[i]->qindex = i;
699  priv->tx_queue[i]->dev = dev;
700  spin_lock_init(&(priv->tx_queue[i]->txlock));
701  }
702 
703  for (i = 0; i < priv->num_rx_queues; i++) {
704  priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
705  GFP_KERNEL);
706  if (!priv->rx_queue[i]) {
707  err = -ENOMEM;
708  goto rx_alloc_failed;
709  }
710  priv->rx_queue[i]->rx_skbuff = NULL;
711  priv->rx_queue[i]->qindex = i;
712  priv->rx_queue[i]->dev = dev;
713  spin_lock_init(&(priv->rx_queue[i]->rxlock));
714  }
715 
716 
717  stash = of_get_property(np, "bd-stash", NULL);
718 
719  if (stash) {
721  priv->bd_stash_en = 1;
722  }
723 
724  stash_len = of_get_property(np, "rx-stash-len", NULL);
725 
726  if (stash_len)
727  priv->rx_stash_size = *stash_len;
728 
729  stash_idx = of_get_property(np, "rx-stash-idx", NULL);
730 
731  if (stash_idx)
732  priv->rx_stash_index = *stash_idx;
733 
734  if (stash_len || stash_idx)
736 
737  mac_addr = of_get_mac_address(np);
738 
739  if (mac_addr)
740  memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
741 
742  if (model && !strcasecmp(model, "TSEC"))
747 
748  if (model && !strcasecmp(model, "eTSEC"))
759 
760  ctype = of_get_property(np, "phy-connection-type", NULL);
761 
762  /* We only care about rgmii-id. The rest are autodetected */
763  if (ctype && !strcmp(ctype, "rgmii-id"))
765  else
767 
768  if (of_get_property(np, "fsl,magic-packet", NULL))
770 
771  priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
772 
773  /* Find the TBI PHY. If it's not there, we don't support SGMII */
774  priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
775 
776  return 0;
777 
778 rx_alloc_failed:
779  free_rx_pointers(priv);
780 tx_alloc_failed:
781  free_tx_pointers(priv);
782 err_grp_init:
783  unmap_group_regs(priv);
784  free_netdev(dev);
785  return err;
786 }
787 
788 static int gfar_hwtstamp_ioctl(struct net_device *netdev,
789  struct ifreq *ifr, int cmd)
790 {
791  struct hwtstamp_config config;
792  struct gfar_private *priv = netdev_priv(netdev);
793 
794  if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
795  return -EFAULT;
796 
797  /* reserved for future extensions */
798  if (config.flags)
799  return -EINVAL;
800 
801  switch (config.tx_type) {
802  case HWTSTAMP_TX_OFF:
803  priv->hwts_tx_en = 0;
804  break;
805  case HWTSTAMP_TX_ON:
807  return -ERANGE;
808  priv->hwts_tx_en = 1;
809  break;
810  default:
811  return -ERANGE;
812  }
813 
814  switch (config.rx_filter) {
816  if (priv->hwts_rx_en) {
817  stop_gfar(netdev);
818  priv->hwts_rx_en = 0;
819  startup_gfar(netdev);
820  }
821  break;
822  default:
824  return -ERANGE;
825  if (!priv->hwts_rx_en) {
826  stop_gfar(netdev);
827  priv->hwts_rx_en = 1;
828  startup_gfar(netdev);
829  }
830  config.rx_filter = HWTSTAMP_FILTER_ALL;
831  break;
832  }
833 
834  return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
835  -EFAULT : 0;
836 }
837 
838 /* Ioctl MII Interface */
839 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
840 {
841  struct gfar_private *priv = netdev_priv(dev);
842 
843  if (!netif_running(dev))
844  return -EINVAL;
845 
846  if (cmd == SIOCSHWTSTAMP)
847  return gfar_hwtstamp_ioctl(dev, rq, cmd);
848 
849  if (!priv->phydev)
850  return -ENODEV;
851 
852  return phy_mii_ioctl(priv->phydev, rq, cmd);
853 }
854 
855 static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
856 {
857  unsigned int new_bit_map = 0x0;
858  int mask = 0x1 << (max_qs - 1), i;
859 
860  for (i = 0; i < max_qs; i++) {
861  if (bit_map & mask)
862  new_bit_map = new_bit_map + (1 << i);
863  mask = mask >> 0x1;
864  }
865  return new_bit_map;
866 }
867 
868 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
869  u32 class)
870 {
871  u32 rqfpr = FPR_FILER_MASK;
872  u32 rqfcr = 0x0;
873 
874  rqfar--;
876  priv->ftp_rqfpr[rqfar] = rqfpr;
877  priv->ftp_rqfcr[rqfar] = rqfcr;
878  gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
879 
880  rqfar--;
881  rqfcr = RQFCR_CMP_NOMATCH;
882  priv->ftp_rqfpr[rqfar] = rqfpr;
883  priv->ftp_rqfcr[rqfar] = rqfcr;
884  gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
885 
886  rqfar--;
888  rqfpr = class;
889  priv->ftp_rqfcr[rqfar] = rqfcr;
890  priv->ftp_rqfpr[rqfar] = rqfpr;
891  gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
892 
893  rqfar--;
895  rqfpr = class;
896  priv->ftp_rqfcr[rqfar] = rqfcr;
897  priv->ftp_rqfpr[rqfar] = rqfpr;
898  gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
899 
900  return rqfar;
901 }
902 
903 static void gfar_init_filer_table(struct gfar_private *priv)
904 {
905  int i = 0x0;
906  u32 rqfar = MAX_FILER_IDX;
907  u32 rqfcr = 0x0;
908  u32 rqfpr = FPR_FILER_MASK;
909 
910  /* Default rule */
911  rqfcr = RQFCR_CMP_MATCH;
912  priv->ftp_rqfcr[rqfar] = rqfcr;
913  priv->ftp_rqfpr[rqfar] = rqfpr;
914  gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
915 
916  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
917  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
918  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
919  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
920  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
921  rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
922 
923  /* cur_filer_idx indicated the first non-masked rule */
924  priv->cur_filer_idx = rqfar;
925 
926  /* Rest are masked rules */
927  rqfcr = RQFCR_CMP_NOMATCH;
928  for (i = 0; i < rqfar; i++) {
929  priv->ftp_rqfcr[i] = rqfcr;
930  priv->ftp_rqfpr[i] = rqfpr;
931  gfar_write_filer(priv, i, rqfcr, rqfpr);
932  }
933 }
934 
935 static void gfar_detect_errata(struct gfar_private *priv)
936 {
937  struct device *dev = &priv->ofdev->dev;
938  unsigned int pvr = mfspr(SPRN_PVR);
939  unsigned int svr = mfspr(SPRN_SVR);
940  unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
941  unsigned int rev = svr & 0xffff;
942 
943  /* MPC8313 Rev 2.0 and higher; All MPC837x */
944  if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
945  (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
946  priv->errata |= GFAR_ERRATA_74;
947 
948  /* MPC8313 and MPC837x all rev */
949  if ((pvr == 0x80850010 && mod == 0x80b0) ||
950  (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
951  priv->errata |= GFAR_ERRATA_76;
952 
953  /* MPC8313 and MPC837x all rev */
954  if ((pvr == 0x80850010 && mod == 0x80b0) ||
955  (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
956  priv->errata |= GFAR_ERRATA_A002;
957 
958  /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
959  if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
960  (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
961  priv->errata |= GFAR_ERRATA_12;
962 
963  if (priv->errata)
964  dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
965  priv->errata);
966 }
967 
968 /* Set up the ethernet device structure, private data,
969  * and anything else we need before we start
970  */
971 static int gfar_probe(struct platform_device *ofdev)
972 {
973  u32 tempval;
974  struct net_device *dev = NULL;
975  struct gfar_private *priv = NULL;
976  struct gfar __iomem *regs = NULL;
977  int err = 0, i, grp_idx = 0;
978  u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
979  u32 isrg = 0;
980  u32 __iomem *baddr;
981 
982  err = gfar_of_init(ofdev, &dev);
983 
984  if (err)
985  return err;
986 
987  priv = netdev_priv(dev);
988  priv->ndev = dev;
989  priv->ofdev = ofdev;
990  priv->node = ofdev->dev.of_node;
991  SET_NETDEV_DEV(dev, &ofdev->dev);
992 
993  spin_lock_init(&priv->bflock);
994  INIT_WORK(&priv->reset_task, gfar_reset_task);
995 
996  dev_set_drvdata(&ofdev->dev, priv);
997  regs = priv->gfargrp[0].regs;
998 
999  gfar_detect_errata(priv);
1000 
1001  /* Stop the DMA engine now, in case it was running before
1002  * (The firmware could have used it, and left it running).
1003  */
1004  gfar_halt(dev);
1005 
1006  /* Reset MAC layer */
1007  gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1008 
1009  /* We need to delay at least 3 TX clocks */
1010  udelay(2);
1011 
1012  tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1013  gfar_write(&regs->maccfg1, tempval);
1014 
1015  /* Initialize MACCFG2. */
1016  tempval = MACCFG2_INIT_SETTINGS;
1017  if (gfar_has_errata(priv, GFAR_ERRATA_74))
1019  gfar_write(&regs->maccfg2, tempval);
1020 
1021  /* Initialize ECNTRL */
1022  gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1023 
1024  /* Set the dev->base_addr to the gfar reg region */
1025  dev->base_addr = (unsigned long) regs;
1026 
1027  SET_NETDEV_DEV(dev, &ofdev->dev);
1028 
1029  /* Fill in the dev structure */
1030  dev->watchdog_timeo = TX_TIMEOUT;
1031  dev->mtu = 1500;
1032  dev->netdev_ops = &gfar_netdev_ops;
1033  dev->ethtool_ops = &gfar_ethtool_ops;
1034 
1035  /* Register for napi ...We are registering NAPI for each grp */
1036  for (i = 0; i < priv->num_grps; i++)
1037  netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1038  GFAR_DEV_WEIGHT);
1039 
1040  if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1043  dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1045  }
1046 
1047  if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1049  dev->features |= NETIF_F_HW_VLAN_RX;
1050  }
1051 
1053  priv->extended_hash = 1;
1054  priv->hash_width = 9;
1055 
1056  priv->hash_regs[0] = &regs->igaddr0;
1057  priv->hash_regs[1] = &regs->igaddr1;
1058  priv->hash_regs[2] = &regs->igaddr2;
1059  priv->hash_regs[3] = &regs->igaddr3;
1060  priv->hash_regs[4] = &regs->igaddr4;
1061  priv->hash_regs[5] = &regs->igaddr5;
1062  priv->hash_regs[6] = &regs->igaddr6;
1063  priv->hash_regs[7] = &regs->igaddr7;
1064  priv->hash_regs[8] = &regs->gaddr0;
1065  priv->hash_regs[9] = &regs->gaddr1;
1066  priv->hash_regs[10] = &regs->gaddr2;
1067  priv->hash_regs[11] = &regs->gaddr3;
1068  priv->hash_regs[12] = &regs->gaddr4;
1069  priv->hash_regs[13] = &regs->gaddr5;
1070  priv->hash_regs[14] = &regs->gaddr6;
1071  priv->hash_regs[15] = &regs->gaddr7;
1072 
1073  } else {
1074  priv->extended_hash = 0;
1075  priv->hash_width = 8;
1076 
1077  priv->hash_regs[0] = &regs->gaddr0;
1078  priv->hash_regs[1] = &regs->gaddr1;
1079  priv->hash_regs[2] = &regs->gaddr2;
1080  priv->hash_regs[3] = &regs->gaddr3;
1081  priv->hash_regs[4] = &regs->gaddr4;
1082  priv->hash_regs[5] = &regs->gaddr5;
1083  priv->hash_regs[6] = &regs->gaddr6;
1084  priv->hash_regs[7] = &regs->gaddr7;
1085  }
1086 
1088  priv->padding = DEFAULT_PADDING;
1089  else
1090  priv->padding = 0;
1091 
1092  if (dev->features & NETIF_F_IP_CSUM ||
1095 
1096  /* Program the isrg regs only if number of grps > 1 */
1097  if (priv->num_grps > 1) {
1098  baddr = &regs->isrg0;
1099  for (i = 0; i < priv->num_grps; i++) {
1100  isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
1101  isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
1102  gfar_write(baddr, isrg);
1103  baddr++;
1104  isrg = 0x0;
1105  }
1106  }
1107 
1108  /* Need to reverse the bit maps as bit_map's MSB is q0
1109  * but, for_each_set_bit parses from right to left, which
1110  * basically reverses the queue numbers
1111  */
1112  for (i = 0; i< priv->num_grps; i++) {
1113  priv->gfargrp[i].tx_bit_map =
1114  reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1115  priv->gfargrp[i].rx_bit_map =
1116  reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1117  }
1118 
1119  /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1120  * also assign queues to groups
1121  */
1122  for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1123  priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1124 
1125  for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1126  priv->num_rx_queues) {
1127  priv->gfargrp[grp_idx].num_rx_queues++;
1128  priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1129  rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1130  rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1131  }
1132  priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1133 
1134  for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1135  priv->num_tx_queues) {
1136  priv->gfargrp[grp_idx].num_tx_queues++;
1137  priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1138  tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1139  tqueue = tqueue | (TQUEUE_EN0 >> i);
1140  }
1141  priv->gfargrp[grp_idx].rstat = rstat;
1142  priv->gfargrp[grp_idx].tstat = tstat;
1143  rstat = tstat =0;
1144  }
1145 
1146  gfar_write(&regs->rqueue, rqueue);
1147  gfar_write(&regs->tqueue, tqueue);
1148 
1150 
1151  /* Initializing some of the rx/tx queue level parameters */
1152  for (i = 0; i < priv->num_tx_queues; i++) {
1153  priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1154  priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1155  priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1156  priv->tx_queue[i]->txic = DEFAULT_TXIC;
1157  }
1158 
1159  for (i = 0; i < priv->num_rx_queues; i++) {
1160  priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1161  priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1162  priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1163  }
1164 
1165  /* always enable rx filer */
1166  priv->rx_filer_enable = 1;
1167  /* Enable most messages by default */
1168  priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1169  /* use pritority h/w tx queue scheduling for single queue devices */
1170  if (priv->num_tx_queues == 1)
1171  priv->prio_sched_en = 1;
1172 
1173  /* Carrier starts down, phylib will bring it up */
1174  netif_carrier_off(dev);
1175 
1176  err = register_netdev(dev);
1177 
1178  if (err) {
1179  pr_err("%s: Cannot register net device, aborting\n", dev->name);
1180  goto register_fail;
1181  }
1182 
1183  device_init_wakeup(&dev->dev,
1184  priv->device_flags &
1186 
1187  /* fill out IRQ number and name fields */
1188  for (i = 0; i < priv->num_grps; i++) {
1190  sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
1191  dev->name, "_g", '0' + i, "_tx");
1192  sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
1193  dev->name, "_g", '0' + i, "_rx");
1194  sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
1195  dev->name, "_g", '0' + i, "_er");
1196  } else
1197  strcpy(priv->gfargrp[i].int_name_tx, dev->name);
1198  }
1199 
1200  /* Initialize the filer table */
1201  gfar_init_filer_table(priv);
1202 
1203  /* Create all the sysfs files */
1204  gfar_init_sysfs(dev);
1205 
1206  /* Print out the device info */
1207  netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1208 
1209  /* Even more device info helps when determining which kernel
1210  * provided which set of benchmarks.
1211  */
1212  netdev_info(dev, "Running with NAPI enabled\n");
1213  for (i = 0; i < priv->num_rx_queues; i++)
1214  netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1215  i, priv->rx_queue[i]->rx_ring_size);
1216  for (i = 0; i < priv->num_tx_queues; i++)
1217  netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1218  i, priv->tx_queue[i]->tx_ring_size);
1219 
1220  return 0;
1221 
1222 register_fail:
1223  unmap_group_regs(priv);
1224  free_tx_pointers(priv);
1225  free_rx_pointers(priv);
1226  if (priv->phy_node)
1227  of_node_put(priv->phy_node);
1228  if (priv->tbi_node)
1229  of_node_put(priv->tbi_node);
1230  free_netdev(dev);
1231  return err;
1232 }
1233 
1234 static int gfar_remove(struct platform_device *ofdev)
1235 {
1236  struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
1237 
1238  if (priv->phy_node)
1239  of_node_put(priv->phy_node);
1240  if (priv->tbi_node)
1241  of_node_put(priv->tbi_node);
1242 
1243  dev_set_drvdata(&ofdev->dev, NULL);
1244 
1245  unregister_netdev(priv->ndev);
1246  unmap_group_regs(priv);
1247  free_netdev(priv->ndev);
1248 
1249  return 0;
1250 }
1251 
1252 #ifdef CONFIG_PM
1253 
1254 static int gfar_suspend(struct device *dev)
1255 {
1256  struct gfar_private *priv = dev_get_drvdata(dev);
1257  struct net_device *ndev = priv->ndev;
1258  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1259  unsigned long flags;
1260  u32 tempval;
1261 
1262  int magic_packet = priv->wol_en &&
1263  (priv->device_flags &
1265 
1266  netif_device_detach(ndev);
1267 
1268  if (netif_running(ndev)) {
1269 
1270  local_irq_save(flags);
1271  lock_tx_qs(priv);
1272  lock_rx_qs(priv);
1273 
1274  gfar_halt_nodisable(ndev);
1275 
1276  /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1277  tempval = gfar_read(&regs->maccfg1);
1278 
1279  tempval &= ~MACCFG1_TX_EN;
1280 
1281  if (!magic_packet)
1282  tempval &= ~MACCFG1_RX_EN;
1283 
1284  gfar_write(&regs->maccfg1, tempval);
1285 
1286  unlock_rx_qs(priv);
1287  unlock_tx_qs(priv);
1288  local_irq_restore(flags);
1289 
1290  disable_napi(priv);
1291 
1292  if (magic_packet) {
1293  /* Enable interrupt on Magic Packet */
1294  gfar_write(&regs->imask, IMASK_MAG);
1295 
1296  /* Enable Magic Packet mode */
1297  tempval = gfar_read(&regs->maccfg2);
1298  tempval |= MACCFG2_MPEN;
1299  gfar_write(&regs->maccfg2, tempval);
1300  } else {
1301  phy_stop(priv->phydev);
1302  }
1303  }
1304 
1305  return 0;
1306 }
1307 
1308 static int gfar_resume(struct device *dev)
1309 {
1310  struct gfar_private *priv = dev_get_drvdata(dev);
1311  struct net_device *ndev = priv->ndev;
1312  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1313  unsigned long flags;
1314  u32 tempval;
1315  int magic_packet = priv->wol_en &&
1316  (priv->device_flags &
1318 
1319  if (!netif_running(ndev)) {
1320  netif_device_attach(ndev);
1321  return 0;
1322  }
1323 
1324  if (!magic_packet && priv->phydev)
1325  phy_start(priv->phydev);
1326 
1327  /* Disable Magic Packet mode, in case something
1328  * else woke us up.
1329  */
1330  local_irq_save(flags);
1331  lock_tx_qs(priv);
1332  lock_rx_qs(priv);
1333 
1334  tempval = gfar_read(&regs->maccfg2);
1335  tempval &= ~MACCFG2_MPEN;
1336  gfar_write(&regs->maccfg2, tempval);
1337 
1338  gfar_start(ndev);
1339 
1340  unlock_rx_qs(priv);
1341  unlock_tx_qs(priv);
1342  local_irq_restore(flags);
1343 
1344  netif_device_attach(ndev);
1345 
1346  enable_napi(priv);
1347 
1348  return 0;
1349 }
1350 
1351 static int gfar_restore(struct device *dev)
1352 {
1353  struct gfar_private *priv = dev_get_drvdata(dev);
1354  struct net_device *ndev = priv->ndev;
1355 
1356  if (!netif_running(ndev)) {
1357  netif_device_attach(ndev);
1358 
1359  return 0;
1360  }
1361 
1362  gfar_init_bds(ndev);
1363  init_registers(ndev);
1364  gfar_set_mac_address(ndev);
1365  gfar_init_mac(ndev);
1366  gfar_start(ndev);
1367 
1368  priv->oldlink = 0;
1369  priv->oldspeed = 0;
1370  priv->oldduplex = -1;
1371 
1372  if (priv->phydev)
1373  phy_start(priv->phydev);
1374 
1375  netif_device_attach(ndev);
1376  enable_napi(priv);
1377 
1378  return 0;
1379 }
1380 
1381 static struct dev_pm_ops gfar_pm_ops = {
1382  .suspend = gfar_suspend,
1383  .resume = gfar_resume,
1384  .freeze = gfar_suspend,
1385  .thaw = gfar_resume,
1386  .restore = gfar_restore,
1387 };
1388 
1389 #define GFAR_PM_OPS (&gfar_pm_ops)
1390 
1391 #else
1392 
1393 #define GFAR_PM_OPS NULL
1394 
1395 #endif
1396 
1397 /* Reads the controller's registers to determine what interface
1398  * connects it to the PHY.
1399  */
1400 static phy_interface_t gfar_get_interface(struct net_device *dev)
1401 {
1402  struct gfar_private *priv = netdev_priv(dev);
1403  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1404  u32 ecntrl;
1405 
1406  ecntrl = gfar_read(&regs->ecntrl);
1407 
1408  if (ecntrl & ECNTRL_SGMII_MODE)
1409  return PHY_INTERFACE_MODE_SGMII;
1410 
1411  if (ecntrl & ECNTRL_TBI_MODE) {
1412  if (ecntrl & ECNTRL_REDUCED_MODE)
1413  return PHY_INTERFACE_MODE_RTBI;
1414  else
1415  return PHY_INTERFACE_MODE_TBI;
1416  }
1417 
1418  if (ecntrl & ECNTRL_REDUCED_MODE) {
1419  if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1420  return PHY_INTERFACE_MODE_RMII;
1421  }
1422  else {
1423  phy_interface_t interface = priv->interface;
1424 
1425  /* This isn't autodetected right now, so it must
1426  * be set by the device tree or platform code.
1427  */
1428  if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1430 
1431  return PHY_INTERFACE_MODE_RGMII;
1432  }
1433  }
1434 
1436  return PHY_INTERFACE_MODE_GMII;
1437 
1438  return PHY_INTERFACE_MODE_MII;
1439 }
1440 
1441 
1442 /* Initializes driver's PHY state, and attaches to the PHY.
1443  * Returns 0 on success.
1444  */
1445 static int init_phy(struct net_device *dev)
1446 {
1447  struct gfar_private *priv = netdev_priv(dev);
1448  uint gigabit_support =
1452 
1453  priv->oldlink = 0;
1454  priv->oldspeed = 0;
1455  priv->oldduplex = -1;
1456 
1457  interface = gfar_get_interface(dev);
1458 
1459  priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1460  interface);
1461  if (!priv->phydev)
1462  priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
1463  interface);
1464  if (!priv->phydev) {
1465  dev_err(&dev->dev, "could not attach to PHY\n");
1466  return -ENODEV;
1467  }
1468 
1469  if (interface == PHY_INTERFACE_MODE_SGMII)
1470  gfar_configure_serdes(dev);
1471 
1472  /* Remove any features not supported by the controller */
1473  priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1474  priv->phydev->advertising = priv->phydev->supported;
1475 
1476  return 0;
1477 }
1478 
1479 /* Initialize TBI PHY interface for communicating with the
1480  * SERDES lynx PHY on the chip. We communicate with this PHY
1481  * through the MDIO bus on each controller, treating it as a
1482  * "normal" PHY at the address found in the TBIPA register. We assume
1483  * that the TBIPA register is valid. Either the MDIO bus code will set
1484  * it to a value that doesn't conflict with other PHYs on the bus, or the
1485  * value doesn't matter, as there are no other PHYs on the bus.
1486  */
1487 static void gfar_configure_serdes(struct net_device *dev)
1488 {
1489  struct gfar_private *priv = netdev_priv(dev);
1490  struct phy_device *tbiphy;
1491 
1492  if (!priv->tbi_node) {
1493  dev_warn(&dev->dev, "error: SGMII mode requires that the "
1494  "device tree specify a tbi-handle\n");
1495  return;
1496  }
1497 
1498  tbiphy = of_phy_find_device(priv->tbi_node);
1499  if (!tbiphy) {
1500  dev_err(&dev->dev, "error: Could not get TBI device\n");
1501  return;
1502  }
1503 
1504  /* If the link is already up, we must already be ok, and don't need to
1505  * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1506  * everything for us? Resetting it takes the link down and requires
1507  * several seconds for it to come back.
1508  */
1509  if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1510  return;
1511 
1512  /* Single clk mode, mii mode off(for serdes communication) */
1514 
1515  phy_write(tbiphy, MII_ADVERTISE,
1518 
1519  phy_write(tbiphy, MII_BMCR,
1521  BMCR_SPEED1000);
1522 }
1523 
1524 static void init_registers(struct net_device *dev)
1525 {
1526  struct gfar_private *priv = netdev_priv(dev);
1527  struct gfar __iomem *regs = NULL;
1528  int i;
1529 
1530  for (i = 0; i < priv->num_grps; i++) {
1531  regs = priv->gfargrp[i].regs;
1532  /* Clear IEVENT */
1533  gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1534 
1535  /* Initialize IMASK */
1536  gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1537  }
1538 
1539  regs = priv->gfargrp[0].regs;
1540  /* Init hash registers to zero */
1541  gfar_write(&regs->igaddr0, 0);
1542  gfar_write(&regs->igaddr1, 0);
1543  gfar_write(&regs->igaddr2, 0);
1544  gfar_write(&regs->igaddr3, 0);
1545  gfar_write(&regs->igaddr4, 0);
1546  gfar_write(&regs->igaddr5, 0);
1547  gfar_write(&regs->igaddr6, 0);
1548  gfar_write(&regs->igaddr7, 0);
1549 
1550  gfar_write(&regs->gaddr0, 0);
1551  gfar_write(&regs->gaddr1, 0);
1552  gfar_write(&regs->gaddr2, 0);
1553  gfar_write(&regs->gaddr3, 0);
1554  gfar_write(&regs->gaddr4, 0);
1555  gfar_write(&regs->gaddr5, 0);
1556  gfar_write(&regs->gaddr6, 0);
1557  gfar_write(&regs->gaddr7, 0);
1558 
1559  /* Zero out the rmon mib registers if it has them */
1560  if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1561  memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1562 
1563  /* Mask off the CAM interrupts */
1564  gfar_write(&regs->rmon.cam1, 0xffffffff);
1565  gfar_write(&regs->rmon.cam2, 0xffffffff);
1566  }
1567 
1568  /* Initialize the max receive buffer length */
1569  gfar_write(&regs->mrblr, priv->rx_buffer_size);
1570 
1571  /* Initialize the Minimum Frame Length Register */
1572  gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1573 }
1574 
1575 static int __gfar_is_rx_idle(struct gfar_private *priv)
1576 {
1577  u32 res;
1578 
1579  /* Normaly TSEC should not hang on GRS commands, so we should
1580  * actually wait for IEVENT_GRSC flag.
1581  */
1582  if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
1583  return 0;
1584 
1585  /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1586  * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1587  * and the Rx can be safely reset.
1588  */
1589  res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1590  res &= 0x7f807f80;
1591  if ((res & 0xffff) == (res >> 16))
1592  return 1;
1593 
1594  return 0;
1595 }
1596 
1597 /* Halt the receive and transmit queues */
1598 static void gfar_halt_nodisable(struct net_device *dev)
1599 {
1600  struct gfar_private *priv = netdev_priv(dev);
1601  struct gfar __iomem *regs = NULL;
1602  u32 tempval;
1603  int i;
1604 
1605  for (i = 0; i < priv->num_grps; i++) {
1606  regs = priv->gfargrp[i].regs;
1607  /* Mask all interrupts */
1608  gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1609 
1610  /* Clear all interrupts */
1611  gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1612  }
1613 
1614  regs = priv->gfargrp[0].regs;
1615  /* Stop the DMA, and wait for it to stop */
1616  tempval = gfar_read(&regs->dmactrl);
1617  if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1618  (DMACTRL_GRS | DMACTRL_GTS)) {
1619  int ret;
1620 
1621  tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1622  gfar_write(&regs->dmactrl, tempval);
1623 
1624  do {
1625  ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1626  (IEVENT_GRSC | IEVENT_GTSC)) ==
1627  (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1628  if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1629  ret = __gfar_is_rx_idle(priv);
1630  } while (!ret);
1631  }
1632 }
1633 
1634 /* Halt the receive and transmit queues */
1635 void gfar_halt(struct net_device *dev)
1636 {
1637  struct gfar_private *priv = netdev_priv(dev);
1638  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1639  u32 tempval;
1640 
1641  gfar_halt_nodisable(dev);
1642 
1643  /* Disable Rx and Tx */
1644  tempval = gfar_read(&regs->maccfg1);
1645  tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1646  gfar_write(&regs->maccfg1, tempval);
1647 }
1648 
1649 static void free_grp_irqs(struct gfar_priv_grp *grp)
1650 {
1651  free_irq(grp->interruptError, grp);
1652  free_irq(grp->interruptTransmit, grp);
1653  free_irq(grp->interruptReceive, grp);
1654 }
1655 
1656 void stop_gfar(struct net_device *dev)
1657 {
1658  struct gfar_private *priv = netdev_priv(dev);
1659  unsigned long flags;
1660  int i;
1661 
1662  phy_stop(priv->phydev);
1663 
1664 
1665  /* Lock it down */
1666  local_irq_save(flags);
1667  lock_tx_qs(priv);
1668  lock_rx_qs(priv);
1669 
1670  gfar_halt(dev);
1671 
1672  unlock_rx_qs(priv);
1673  unlock_tx_qs(priv);
1674  local_irq_restore(flags);
1675 
1676  /* Free the IRQs */
1678  for (i = 0; i < priv->num_grps; i++)
1679  free_grp_irqs(&priv->gfargrp[i]);
1680  } else {
1681  for (i = 0; i < priv->num_grps; i++)
1682  free_irq(priv->gfargrp[i].interruptTransmit,
1683  &priv->gfargrp[i]);
1684  }
1685 
1686  free_skb_resources(priv);
1687 }
1688 
1689 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1690 {
1691  struct txbd8 *txbdp;
1692  struct gfar_private *priv = netdev_priv(tx_queue->dev);
1693  int i, j;
1694 
1695  txbdp = tx_queue->tx_bd_base;
1696 
1697  for (i = 0; i < tx_queue->tx_ring_size; i++) {
1698  if (!tx_queue->tx_skbuff[i])
1699  continue;
1700 
1701  dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
1702  txbdp->length, DMA_TO_DEVICE);
1703  txbdp->lstatus = 0;
1704  for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1705  j++) {
1706  txbdp++;
1707  dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
1708  txbdp->length, DMA_TO_DEVICE);
1709  }
1710  txbdp++;
1711  dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1712  tx_queue->tx_skbuff[i] = NULL;
1713  }
1714  kfree(tx_queue->tx_skbuff);
1715 }
1716 
1717 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1718 {
1719  struct rxbd8 *rxbdp;
1720  struct gfar_private *priv = netdev_priv(rx_queue->dev);
1721  int i;
1722 
1723  rxbdp = rx_queue->rx_bd_base;
1724 
1725  for (i = 0; i < rx_queue->rx_ring_size; i++) {
1726  if (rx_queue->rx_skbuff[i]) {
1727  dma_unmap_single(&priv->ofdev->dev,
1728  rxbdp->bufPtr, priv->rx_buffer_size,
1729  DMA_FROM_DEVICE);
1730  dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1731  rx_queue->rx_skbuff[i] = NULL;
1732  }
1733  rxbdp->lstatus = 0;
1734  rxbdp->bufPtr = 0;
1735  rxbdp++;
1736  }
1737  kfree(rx_queue->rx_skbuff);
1738 }
1739 
1740 /* If there are any tx skbs or rx skbs still around, free them.
1741  * Then free tx_skbuff and rx_skbuff
1742  */
1743 static void free_skb_resources(struct gfar_private *priv)
1744 {
1745  struct gfar_priv_tx_q *tx_queue = NULL;
1746  struct gfar_priv_rx_q *rx_queue = NULL;
1747  int i;
1748 
1749  /* Go through all the buffer descriptors and free their data buffers */
1750  for (i = 0; i < priv->num_tx_queues; i++) {
1751  struct netdev_queue *txq;
1752 
1753  tx_queue = priv->tx_queue[i];
1754  txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1755  if (tx_queue->tx_skbuff)
1756  free_skb_tx_queue(tx_queue);
1757  netdev_tx_reset_queue(txq);
1758  }
1759 
1760  for (i = 0; i < priv->num_rx_queues; i++) {
1761  rx_queue = priv->rx_queue[i];
1762  if (rx_queue->rx_skbuff)
1763  free_skb_rx_queue(rx_queue);
1764  }
1765 
1766  dma_free_coherent(&priv->ofdev->dev,
1767  sizeof(struct txbd8) * priv->total_tx_ring_size +
1768  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1769  priv->tx_queue[0]->tx_bd_base,
1770  priv->tx_queue[0]->tx_bd_dma_base);
1771 }
1772 
1773 void gfar_start(struct net_device *dev)
1774 {
1775  struct gfar_private *priv = netdev_priv(dev);
1776  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1777  u32 tempval;
1778  int i = 0;
1779 
1780  /* Enable Rx and Tx in MACCFG1 */
1781  tempval = gfar_read(&regs->maccfg1);
1782  tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1783  gfar_write(&regs->maccfg1, tempval);
1784 
1785  /* Initialize DMACTRL to have WWR and WOP */
1786  tempval = gfar_read(&regs->dmactrl);
1787  tempval |= DMACTRL_INIT_SETTINGS;
1788  gfar_write(&regs->dmactrl, tempval);
1789 
1790  /* Make sure we aren't stopped */
1791  tempval = gfar_read(&regs->dmactrl);
1792  tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1793  gfar_write(&regs->dmactrl, tempval);
1794 
1795  for (i = 0; i < priv->num_grps; i++) {
1796  regs = priv->gfargrp[i].regs;
1797  /* Clear THLT/RHLT, so that the DMA starts polling now */
1798  gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1799  gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1800  /* Unmask the interrupts we look for */
1801  gfar_write(&regs->imask, IMASK_DEFAULT);
1802  }
1803 
1804  dev->trans_start = jiffies; /* prevent tx timeout */
1805 }
1806 
1808  unsigned long tx_mask, unsigned long rx_mask)
1809 {
1810  struct gfar __iomem *regs = priv->gfargrp[0].regs;
1811  u32 __iomem *baddr;
1812  int i = 0;
1813 
1814  /* Backward compatible case ---- even if we enable
1815  * multiple queues, there's only single reg to program
1816  */
1817  gfar_write(&regs->txic, 0);
1818  if (likely(priv->tx_queue[0]->txcoalescing))
1819  gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1820 
1821  gfar_write(&regs->rxic, 0);
1822  if (unlikely(priv->rx_queue[0]->rxcoalescing))
1823  gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1824 
1825  if (priv->mode == MQ_MG_MODE) {
1826  baddr = &regs->txic0;
1827  for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1828  gfar_write(baddr + i, 0);
1829  if (likely(priv->tx_queue[i]->txcoalescing))
1830  gfar_write(baddr + i, priv->tx_queue[i]->txic);
1831  }
1832 
1833  baddr = &regs->rxic0;
1834  for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1835  gfar_write(baddr + i, 0);
1836  if (likely(priv->rx_queue[i]->rxcoalescing))
1837  gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1838  }
1839  }
1840 }
1841 
1842 static int register_grp_irqs(struct gfar_priv_grp *grp)
1843 {
1844  struct gfar_private *priv = grp->priv;
1845  struct net_device *dev = priv->ndev;
1846  int err;
1847 
1848  /* If the device has multiple interrupts, register for
1849  * them. Otherwise, only register for the one
1850  */
1852  /* Install our interrupt handlers for Error,
1853  * Transmit, and Receive
1854  */
1855  if ((err = request_irq(grp->interruptError, gfar_error,
1856  0, grp->int_name_er, grp)) < 0) {
1857  netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1858  grp->interruptError);
1859 
1860  goto err_irq_fail;
1861  }
1862 
1863  if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1864  0, grp->int_name_tx, grp)) < 0) {
1865  netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1866  grp->interruptTransmit);
1867  goto tx_irq_fail;
1868  }
1869 
1870  if ((err = request_irq(grp->interruptReceive, gfar_receive,
1871  0, grp->int_name_rx, grp)) < 0) {
1872  netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1873  grp->interruptReceive);
1874  goto rx_irq_fail;
1875  }
1876  } else {
1877  if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
1878  0, grp->int_name_tx, grp)) < 0) {
1879  netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1880  grp->interruptTransmit);
1881  goto err_irq_fail;
1882  }
1883  }
1884 
1885  return 0;
1886 
1887 rx_irq_fail:
1888  free_irq(grp->interruptTransmit, grp);
1889 tx_irq_fail:
1890  free_irq(grp->interruptError, grp);
1891 err_irq_fail:
1892  return err;
1893 
1894 }
1895 
1896 /* Bring the controller up and running */
1897 int startup_gfar(struct net_device *ndev)
1898 {
1899  struct gfar_private *priv = netdev_priv(ndev);
1900  struct gfar __iomem *regs = NULL;
1901  int err, i, j;
1902 
1903  for (i = 0; i < priv->num_grps; i++) {
1904  regs= priv->gfargrp[i].regs;
1905  gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1906  }
1907 
1908  regs= priv->gfargrp[0].regs;
1909  err = gfar_alloc_skb_resources(ndev);
1910  if (err)
1911  return err;
1912 
1913  gfar_init_mac(ndev);
1914 
1915  for (i = 0; i < priv->num_grps; i++) {
1916  err = register_grp_irqs(&priv->gfargrp[i]);
1917  if (err) {
1918  for (j = 0; j < i; j++)
1919  free_grp_irqs(&priv->gfargrp[j]);
1920  goto irq_fail;
1921  }
1922  }
1923 
1924  /* Start the controller */
1925  gfar_start(ndev);
1926 
1927  phy_start(priv->phydev);
1928 
1929  gfar_configure_coalescing(priv, 0xFF, 0xFF);
1930 
1931  return 0;
1932 
1933 irq_fail:
1934  free_skb_resources(priv);
1935  return err;
1936 }
1937 
1938 /* Called when something needs to use the ethernet device
1939  * Returns 0 for success.
1940  */
1941 static int gfar_enet_open(struct net_device *dev)
1942 {
1943  struct gfar_private *priv = netdev_priv(dev);
1944  int err;
1945 
1946  enable_napi(priv);
1947 
1948  /* Initialize a bunch of registers */
1949  init_registers(dev);
1950 
1951  gfar_set_mac_address(dev);
1952 
1953  err = init_phy(dev);
1954 
1955  if (err) {
1956  disable_napi(priv);
1957  return err;
1958  }
1959 
1960  err = startup_gfar(dev);
1961  if (err) {
1962  disable_napi(priv);
1963  return err;
1964  }
1965 
1966  netif_tx_start_all_queues(dev);
1967 
1968  device_set_wakeup_enable(&dev->dev, priv->wol_en);
1969 
1970  return err;
1971 }
1972 
1973 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1974 {
1975  struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1976 
1977  memset(fcb, 0, GMAC_FCB_LEN);
1978 
1979  return fcb;
1980 }
1981 
1982 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1983  int fcb_length)
1984 {
1985  /* If we're here, it's a IP packet with a TCP or UDP
1986  * payload. We set it to checksum, using a pseudo-header
1987  * we provide
1988  */
1989  u8 flags = TXFCB_DEFAULT;
1990 
1991  /* Tell the controller what the protocol is
1992  * And provide the already calculated phcs
1993  */
1994  if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1995  flags |= TXFCB_UDP;
1996  fcb->phcs = udp_hdr(skb)->check;
1997  } else
1998  fcb->phcs = tcp_hdr(skb)->check;
1999 
2000  /* l3os is the distance between the start of the
2001  * frame (skb->data) and the start of the IP hdr.
2002  * l4os is the distance between the start of the
2003  * l3 hdr and the l4 hdr
2004  */
2005  fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2006  fcb->l4os = skb_network_header_len(skb);
2007 
2008  fcb->flags = flags;
2009 }
2010 
2011 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2012 {
2013  fcb->flags |= TXFCB_VLN;
2014  fcb->vlctl = vlan_tx_tag_get(skb);
2015 }
2016 
2017 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2018  struct txbd8 *base, int ring_size)
2019 {
2020  struct txbd8 *new_bd = bdp + stride;
2021 
2022  return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2023 }
2024 
2025 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2026  int ring_size)
2027 {
2028  return skip_txbd(bdp, 1, base, ring_size);
2029 }
2030 
2031 /* This is called by the kernel when a frame is ready for transmission.
2032  * It is pointed to by the dev->hard_start_xmit function pointer
2033  */
2034 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2035 {
2036  struct gfar_private *priv = netdev_priv(dev);
2037  struct gfar_priv_tx_q *tx_queue = NULL;
2038  struct netdev_queue *txq;
2039  struct gfar __iomem *regs = NULL;
2040  struct txfcb *fcb = NULL;
2041  struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2042  u32 lstatus;
2043  int i, rq = 0, do_tstamp = 0;
2044  u32 bufaddr;
2045  unsigned long flags;
2046  unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2047 
2048  /* TOE=1 frames larger than 2500 bytes may see excess delays
2049  * before start of transmission.
2050  */
2051  if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
2052  skb->ip_summed == CHECKSUM_PARTIAL &&
2053  skb->len > 2500)) {
2054  int ret;
2055 
2056  ret = skb_checksum_help(skb);
2057  if (ret)
2058  return ret;
2059  }
2060 
2061  rq = skb->queue_mapping;
2062  tx_queue = priv->tx_queue[rq];
2063  txq = netdev_get_tx_queue(dev, rq);
2064  base = tx_queue->tx_bd_base;
2065  regs = tx_queue->grp->regs;
2066 
2067  /* check if time stamp should be generated */
2068  if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2069  priv->hwts_tx_en)) {
2070  do_tstamp = 1;
2071  fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2072  }
2073 
2074  /* make space for additional header when fcb is needed */
2075  if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2076  vlan_tx_tag_present(skb) ||
2077  unlikely(do_tstamp)) &&
2078  (skb_headroom(skb) < fcb_length)) {
2079  struct sk_buff *skb_new;
2080 
2081  skb_new = skb_realloc_headroom(skb, fcb_length);
2082  if (!skb_new) {
2083  dev->stats.tx_errors++;
2084  kfree_skb(skb);
2085  return NETDEV_TX_OK;
2086  }
2087 
2088  if (skb->sk)
2089  skb_set_owner_w(skb_new, skb->sk);
2090  consume_skb(skb);
2091  skb = skb_new;
2092  }
2093 
2094  /* total number of fragments in the SKB */
2095  nr_frags = skb_shinfo(skb)->nr_frags;
2096 
2097  /* calculate the required number of TxBDs for this skb */
2098  if (unlikely(do_tstamp))
2099  nr_txbds = nr_frags + 2;
2100  else
2101  nr_txbds = nr_frags + 1;
2102 
2103  /* check if there is space to queue this packet */
2104  if (nr_txbds > tx_queue->num_txbdfree) {
2105  /* no space, stop the queue */
2106  netif_tx_stop_queue(txq);
2107  dev->stats.tx_fifo_errors++;
2108  return NETDEV_TX_BUSY;
2109  }
2110 
2111  /* Update transmit stats */
2112  tx_queue->stats.tx_bytes += skb->len;
2113  tx_queue->stats.tx_packets++;
2114 
2115  txbdp = txbdp_start = tx_queue->cur_tx;
2116  lstatus = txbdp->lstatus;
2117 
2118  /* Time stamp insertion requires one additional TxBD */
2119  if (unlikely(do_tstamp))
2120  txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2121  tx_queue->tx_ring_size);
2122 
2123  if (nr_frags == 0) {
2124  if (unlikely(do_tstamp))
2125  txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2126  TXBD_INTERRUPT);
2127  else
2128  lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2129  } else {
2130  /* Place the fragment addresses and lengths into the TxBDs */
2131  for (i = 0; i < nr_frags; i++) {
2132  /* Point at the next BD, wrapping as needed */
2133  txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2134 
2135  length = skb_shinfo(skb)->frags[i].size;
2136 
2137  lstatus = txbdp->lstatus | length |
2139 
2140  /* Handle the last BD specially */
2141  if (i == nr_frags - 1)
2142  lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2143 
2144  bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
2145  &skb_shinfo(skb)->frags[i],
2146  0,
2147  length,
2148  DMA_TO_DEVICE);
2149 
2150  /* set the TxBD length and buffer pointer */
2151  txbdp->bufPtr = bufaddr;
2152  txbdp->lstatus = lstatus;
2153  }
2154 
2155  lstatus = txbdp_start->lstatus;
2156  }
2157 
2158  /* Add TxPAL between FCB and frame if required */
2159  if (unlikely(do_tstamp)) {
2160  skb_push(skb, GMAC_TXPAL_LEN);
2161  memset(skb->data, 0, GMAC_TXPAL_LEN);
2162  }
2163 
2164  /* Set up checksumming */
2165  if (CHECKSUM_PARTIAL == skb->ip_summed) {
2166  fcb = gfar_add_fcb(skb);
2167  /* as specified by errata */
2168  if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
2169  ((unsigned long)fcb % 0x20) > 0x18)) {
2170  __skb_pull(skb, GMAC_FCB_LEN);
2171  skb_checksum_help(skb);
2172  } else {
2173  lstatus |= BD_LFLAG(TXBD_TOE);
2174  gfar_tx_checksum(skb, fcb, fcb_length);
2175  }
2176  }
2177 
2178  if (vlan_tx_tag_present(skb)) {
2179  if (unlikely(NULL == fcb)) {
2180  fcb = gfar_add_fcb(skb);
2181  lstatus |= BD_LFLAG(TXBD_TOE);
2182  }
2183 
2184  gfar_tx_vlan(skb, fcb);
2185  }
2186 
2187  /* Setup tx hardware time stamping if requested */
2188  if (unlikely(do_tstamp)) {
2189  skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2190  if (fcb == NULL)
2191  fcb = gfar_add_fcb(skb);
2192  fcb->ptp = 1;
2193  lstatus |= BD_LFLAG(TXBD_TOE);
2194  }
2195 
2196  txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2197  skb_headlen(skb), DMA_TO_DEVICE);
2198 
2199  /* If time stamping is requested one additional TxBD must be set up. The
2200  * first TxBD points to the FCB and must have a data length of
2201  * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2202  * the full frame length.
2203  */
2204  if (unlikely(do_tstamp)) {
2205  txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2206  txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2207  (skb_headlen(skb) - fcb_length);
2208  lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2209  } else {
2210  lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2211  }
2212 
2213  netdev_tx_sent_queue(txq, skb->len);
2214 
2215  /* We can work in parallel with gfar_clean_tx_ring(), except
2216  * when modifying num_txbdfree. Note that we didn't grab the lock
2217  * when we were reading the num_txbdfree and checking for available
2218  * space, that's because outside of this function it can only grow,
2219  * and once we've got needed space, it cannot suddenly disappear.
2220  *
2221  * The lock also protects us from gfar_error(), which can modify
2222  * regs->tstat and thus retrigger the transfers, which is why we
2223  * also must grab the lock before setting ready bit for the first
2224  * to be transmitted BD.
2225  */
2226  spin_lock_irqsave(&tx_queue->txlock, flags);
2227 
2228  /* The powerpc-specific eieio() is used, as wmb() has too strong
2229  * semantics (it requires synchronization between cacheable and
2230  * uncacheable mappings, which eieio doesn't provide and which we
2231  * don't need), thus requiring a more expensive sync instruction. At
2232  * some point, the set of architecture-independent barrier functions
2233  * should be expanded to include weaker barriers.
2234  */
2235  eieio();
2236 
2237  txbdp_start->lstatus = lstatus;
2238 
2239  eieio(); /* force lstatus write before tx_skbuff */
2240 
2241  tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2242 
2243  /* Update the current skb pointer to the next entry we will use
2244  * (wrapping if necessary)
2245  */
2246  tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2247  TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2248 
2249  tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2250 
2251  /* reduce TxBD free count */
2252  tx_queue->num_txbdfree -= (nr_txbds);
2253 
2254  /* If the next BD still needs to be cleaned up, then the bds
2255  * are full. We need to tell the kernel to stop sending us stuff.
2256  */
2257  if (!tx_queue->num_txbdfree) {
2258  netif_tx_stop_queue(txq);
2259 
2260  dev->stats.tx_fifo_errors++;
2261  }
2262 
2263  /* Tell the DMA to go go go */
2264  gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2265 
2266  /* Unlock priv */
2267  spin_unlock_irqrestore(&tx_queue->txlock, flags);
2268 
2269  return NETDEV_TX_OK;
2270 }
2271 
2272 /* Stops the kernel queue, and halts the controller */
2273 static int gfar_close(struct net_device *dev)
2274 {
2275  struct gfar_private *priv = netdev_priv(dev);
2276 
2277  disable_napi(priv);
2278 
2279  cancel_work_sync(&priv->reset_task);
2280  stop_gfar(dev);
2281 
2282  /* Disconnect from the PHY */
2283  phy_disconnect(priv->phydev);
2284  priv->phydev = NULL;
2285 
2286  netif_tx_stop_all_queues(dev);
2287 
2288  return 0;
2289 }
2290 
2291 /* Changes the mac address if the controller is not running. */
2292 static int gfar_set_mac_address(struct net_device *dev)
2293 {
2294  gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2295 
2296  return 0;
2297 }
2298 
2299 /* Check if rx parser should be activated */
2301 {
2302  struct gfar __iomem *regs;
2303  u32 tempval;
2304 
2305  regs = priv->gfargrp[0].regs;
2306 
2307  tempval = gfar_read(&regs->rctrl);
2308  /* If parse is no longer required, then disable parser */
2309  if (tempval & RCTRL_REQ_PARSER)
2310  tempval |= RCTRL_PRSDEP_INIT;
2311  else
2312  tempval &= ~RCTRL_PRSDEP_INIT;
2313  gfar_write(&regs->rctrl, tempval);
2314 }
2315 
2316 /* Enables and disables VLAN insertion/extraction */
2318 {
2319  struct gfar_private *priv = netdev_priv(dev);
2320  struct gfar __iomem *regs = NULL;
2321  unsigned long flags;
2322  u32 tempval;
2323 
2324  regs = priv->gfargrp[0].regs;
2325  local_irq_save(flags);
2326  lock_rx_qs(priv);
2327 
2328  if (features & NETIF_F_HW_VLAN_TX) {
2329  /* Enable VLAN tag insertion */
2330  tempval = gfar_read(&regs->tctrl);
2331  tempval |= TCTRL_VLINS;
2332  gfar_write(&regs->tctrl, tempval);
2333  } else {
2334  /* Disable VLAN tag insertion */
2335  tempval = gfar_read(&regs->tctrl);
2336  tempval &= ~TCTRL_VLINS;
2337  gfar_write(&regs->tctrl, tempval);
2338  }
2339 
2340  if (features & NETIF_F_HW_VLAN_RX) {
2341  /* Enable VLAN tag extraction */
2342  tempval = gfar_read(&regs->rctrl);
2343  tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2344  gfar_write(&regs->rctrl, tempval);
2345  } else {
2346  /* Disable VLAN tag extraction */
2347  tempval = gfar_read(&regs->rctrl);
2348  tempval &= ~RCTRL_VLEX;
2349  gfar_write(&regs->rctrl, tempval);
2350 
2352  }
2353 
2354  gfar_change_mtu(dev, dev->mtu);
2355 
2356  unlock_rx_qs(priv);
2357  local_irq_restore(flags);
2358 }
2359 
2360 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2361 {
2362  int tempsize, tempval;
2363  struct gfar_private *priv = netdev_priv(dev);
2364  struct gfar __iomem *regs = priv->gfargrp[0].regs;
2365  int oldsize = priv->rx_buffer_size;
2366  int frame_size = new_mtu + ETH_HLEN;
2367 
2368  if (gfar_is_vlan_on(priv))
2369  frame_size += VLAN_HLEN;
2370 
2371  if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2372  netif_err(priv, drv, dev, "Invalid MTU setting\n");
2373  return -EINVAL;
2374  }
2375 
2376  if (gfar_uses_fcb(priv))
2377  frame_size += GMAC_FCB_LEN;
2378 
2379  frame_size += priv->padding;
2380 
2381  tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2383 
2384  /* Only stop and start the controller if it isn't already
2385  * stopped, and we changed something
2386  */
2387  if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2388  stop_gfar(dev);
2389 
2390  priv->rx_buffer_size = tempsize;
2391 
2392  dev->mtu = new_mtu;
2393 
2394  gfar_write(&regs->mrblr, priv->rx_buffer_size);
2395  gfar_write(&regs->maxfrm, priv->rx_buffer_size);
2396 
2397  /* If the mtu is larger than the max size for standard
2398  * ethernet frames (ie, a jumbo frame), then set maccfg2
2399  * to allow huge frames, and to check the length
2400  */
2401  tempval = gfar_read(&regs->maccfg2);
2402 
2403  if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2404  gfar_has_errata(priv, GFAR_ERRATA_74))
2405  tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2406  else
2407  tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2408 
2409  gfar_write(&regs->maccfg2, tempval);
2410 
2411  if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2412  startup_gfar(dev);
2413 
2414  return 0;
2415 }
2416 
2417 /* gfar_reset_task gets scheduled when a packet has not been
2418  * transmitted after a set amount of time.
2419  * For now, assume that clearing out all the structures, and
2420  * starting over will fix the problem.
2421  */
2422 static void gfar_reset_task(struct work_struct *work)
2423 {
2424  struct gfar_private *priv = container_of(work, struct gfar_private,
2425  reset_task);
2426  struct net_device *dev = priv->ndev;
2427 
2428  if (dev->flags & IFF_UP) {
2429  netif_tx_stop_all_queues(dev);
2430  stop_gfar(dev);
2431  startup_gfar(dev);
2432  netif_tx_start_all_queues(dev);
2433  }
2434 
2435  netif_tx_schedule_all(dev);
2436 }
2437 
2438 static void gfar_timeout(struct net_device *dev)
2439 {
2440  struct gfar_private *priv = netdev_priv(dev);
2441 
2442  dev->stats.tx_errors++;
2443  schedule_work(&priv->reset_task);
2444 }
2445 
2446 static void gfar_align_skb(struct sk_buff *skb)
2447 {
2448  /* We need the data buffer to be aligned properly. We will reserve
2449  * as many bytes as needed to align the data properly
2450  */
2451  skb_reserve(skb, RXBUF_ALIGNMENT -
2452  (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2453 }
2454 
2455 /* Interrupt Handler for Transmit complete */
2456 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2457 {
2458  struct net_device *dev = tx_queue->dev;
2459  struct netdev_queue *txq;
2460  struct gfar_private *priv = netdev_priv(dev);
2461  struct gfar_priv_rx_q *rx_queue = NULL;
2462  struct txbd8 *bdp, *next = NULL;
2463  struct txbd8 *lbdp = NULL;
2464  struct txbd8 *base = tx_queue->tx_bd_base;
2465  struct sk_buff *skb;
2466  int skb_dirtytx;
2467  int tx_ring_size = tx_queue->tx_ring_size;
2468  int frags = 0, nr_txbds = 0;
2469  int i;
2470  int howmany = 0;
2471  int tqi = tx_queue->qindex;
2472  unsigned int bytes_sent = 0;
2473  u32 lstatus;
2474  size_t buflen;
2475 
2476  rx_queue = priv->rx_queue[tqi];
2477  txq = netdev_get_tx_queue(dev, tqi);
2478  bdp = tx_queue->dirty_tx;
2479  skb_dirtytx = tx_queue->skb_dirtytx;
2480 
2481  while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2482  unsigned long flags;
2483 
2484  frags = skb_shinfo(skb)->nr_frags;
2485 
2486  /* When time stamping, one additional TxBD must be freed.
2487  * Also, we need to dma_unmap_single() the TxPAL.
2488  */
2489  if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2490  nr_txbds = frags + 2;
2491  else
2492  nr_txbds = frags + 1;
2493 
2494  lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2495 
2496  lstatus = lbdp->lstatus;
2497 
2498  /* Only clean completed frames */
2499  if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2500  (lstatus & BD_LENGTH_MASK))
2501  break;
2502 
2503  if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2504  next = next_txbd(bdp, base, tx_ring_size);
2505  buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2506  } else
2507  buflen = bdp->length;
2508 
2509  dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2510  buflen, DMA_TO_DEVICE);
2511 
2512  if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2513  struct skb_shared_hwtstamps shhwtstamps;
2514  u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2515 
2516  memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2517  shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2519  skb_tstamp_tx(skb, &shhwtstamps);
2520  bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2521  bdp = next;
2522  }
2523 
2524  bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2525  bdp = next_txbd(bdp, base, tx_ring_size);
2526 
2527  for (i = 0; i < frags; i++) {
2528  dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
2529  bdp->length, DMA_TO_DEVICE);
2530  bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2531  bdp = next_txbd(bdp, base, tx_ring_size);
2532  }
2533 
2534  bytes_sent += skb->len;
2535 
2536  dev_kfree_skb_any(skb);
2537 
2538  tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2539 
2540  skb_dirtytx = (skb_dirtytx + 1) &
2541  TX_RING_MOD_MASK(tx_ring_size);
2542 
2543  howmany++;
2544  spin_lock_irqsave(&tx_queue->txlock, flags);
2545  tx_queue->num_txbdfree += nr_txbds;
2546  spin_unlock_irqrestore(&tx_queue->txlock, flags);
2547  }
2548 
2549  /* If we freed a buffer, we can restart transmission, if necessary */
2550  if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2551  netif_wake_subqueue(dev, tqi);
2552 
2553  /* Update dirty indicators */
2554  tx_queue->skb_dirtytx = skb_dirtytx;
2555  tx_queue->dirty_tx = bdp;
2556 
2557  netdev_tx_completed_queue(txq, howmany, bytes_sent);
2558 
2559  return howmany;
2560 }
2561 
2562 static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2563 {
2564  unsigned long flags;
2565 
2566  spin_lock_irqsave(&gfargrp->grplock, flags);
2567  if (napi_schedule_prep(&gfargrp->napi)) {
2568  gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2569  __napi_schedule(&gfargrp->napi);
2570  } else {
2571  /* Clear IEVENT, so interrupts aren't called again
2572  * because of the packets that have already arrived.
2573  */
2574  gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2575  }
2576  spin_unlock_irqrestore(&gfargrp->grplock, flags);
2577 
2578 }
2579 
2580 /* Interrupt Handler for Transmit complete */
2581 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2582 {
2583  gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2584  return IRQ_HANDLED;
2585 }
2586 
2587 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2588  struct sk_buff *skb)
2589 {
2590  struct net_device *dev = rx_queue->dev;
2591  struct gfar_private *priv = netdev_priv(dev);
2592  dma_addr_t buf;
2593 
2594  buf = dma_map_single(&priv->ofdev->dev, skb->data,
2596  gfar_init_rxbdp(rx_queue, bdp, buf);
2597 }
2598 
2599 static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2600 {
2601  struct gfar_private *priv = netdev_priv(dev);
2602  struct sk_buff *skb;
2603 
2604  skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2605  if (!skb)
2606  return NULL;
2607 
2608  gfar_align_skb(skb);
2609 
2610  return skb;
2611 }
2612 
2613 struct sk_buff *gfar_new_skb(struct net_device *dev)
2614 {
2615  return gfar_alloc_skb(dev);
2616 }
2617 
2618 static inline void count_errors(unsigned short status, struct net_device *dev)
2619 {
2620  struct gfar_private *priv = netdev_priv(dev);
2621  struct net_device_stats *stats = &dev->stats;
2622  struct gfar_extra_stats *estats = &priv->extra_stats;
2623 
2624  /* If the packet was truncated, none of the other errors matter */
2625  if (status & RXBD_TRUNCATED) {
2626  stats->rx_length_errors++;
2627 
2628  estats->rx_trunc++;
2629 
2630  return;
2631  }
2632  /* Count the errors, if there were any */
2633  if (status & (RXBD_LARGE | RXBD_SHORT)) {
2634  stats->rx_length_errors++;
2635 
2636  if (status & RXBD_LARGE)
2637  estats->rx_large++;
2638  else
2639  estats->rx_short++;
2640  }
2641  if (status & RXBD_NONOCTET) {
2642  stats->rx_frame_errors++;
2643  estats->rx_nonoctet++;
2644  }
2645  if (status & RXBD_CRCERR) {
2646  estats->rx_crcerr++;
2647  stats->rx_crc_errors++;
2648  }
2649  if (status & RXBD_OVERRUN) {
2650  estats->rx_overrun++;
2651  stats->rx_crc_errors++;
2652  }
2653 }
2654 
2655 irqreturn_t gfar_receive(int irq, void *grp_id)
2656 {
2657  gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2658  return IRQ_HANDLED;
2659 }
2660 
2661 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2662 {
2663  /* If valid headers were found, and valid sums
2664  * were verified, then we tell the kernel that no
2665  * checksumming is necessary. Otherwise, it is [FIXME]
2666  */
2667  if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2669  else
2670  skb_checksum_none_assert(skb);
2671 }
2672 
2673 
2674 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2675 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2676  int amount_pull, struct napi_struct *napi)
2677 {
2678  struct gfar_private *priv = netdev_priv(dev);
2679  struct rxfcb *fcb = NULL;
2680 
2681  gro_result_t ret;
2682 
2683  /* fcb is at the beginning if exists */
2684  fcb = (struct rxfcb *)skb->data;
2685 
2686  /* Remove the FCB from the skb
2687  * Remove the padded bytes, if there are any
2688  */
2689  if (amount_pull) {
2690  skb_record_rx_queue(skb, fcb->rq);
2691  skb_pull(skb, amount_pull);
2692  }
2693 
2694  /* Get receive timestamp from the skb */
2695  if (priv->hwts_rx_en) {
2696  struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2697  u64 *ns = (u64 *) skb->data;
2698 
2699  memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2700  shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2701  }
2702 
2703  if (priv->padding)
2704  skb_pull(skb, priv->padding);
2705 
2706  if (dev->features & NETIF_F_RXCSUM)
2707  gfar_rx_checksum(skb, fcb);
2708 
2709  /* Tell the skb what kind of packet this is */
2710  skb->protocol = eth_type_trans(skb, dev);
2711 
2712  /* There's need to check for NETIF_F_HW_VLAN_RX here.
2713  * Even if vlan rx accel is disabled, on some chips
2714  * RXFCB_VLN is pseudo randomly set.
2715  */
2716  if (dev->features & NETIF_F_HW_VLAN_RX &&
2717  fcb->flags & RXFCB_VLN)
2718  __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2719 
2720  /* Send the packet up the stack */
2721  ret = napi_gro_receive(napi, skb);
2722 
2723  if (GRO_DROP == ret)
2724  priv->extra_stats.kernel_dropped++;
2725 
2726  return 0;
2727 }
2728 
2729 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2730  * until the budget/quota has been reached. Returns the number
2731  * of frames handled
2732  */
2733 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2734 {
2735  struct net_device *dev = rx_queue->dev;
2736  struct rxbd8 *bdp, *base;
2737  struct sk_buff *skb;
2738  int pkt_len;
2739  int amount_pull;
2740  int howmany = 0;
2741  struct gfar_private *priv = netdev_priv(dev);
2742 
2743  /* Get the first full descriptor */
2744  bdp = rx_queue->cur_rx;
2745  base = rx_queue->rx_bd_base;
2746 
2747  amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2748 
2749  while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2750  struct sk_buff *newskb;
2751 
2752  rmb();
2753 
2754  /* Add another skb for the future */
2755  newskb = gfar_new_skb(dev);
2756 
2757  skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2758 
2759  dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2761 
2762  if (unlikely(!(bdp->status & RXBD_ERR) &&
2763  bdp->length > priv->rx_buffer_size))
2764  bdp->status = RXBD_LARGE;
2765 
2766  /* We drop the frame if we failed to allocate a new buffer */
2767  if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2768  bdp->status & RXBD_ERR)) {
2769  count_errors(bdp->status, dev);
2770 
2771  if (unlikely(!newskb))
2772  newskb = skb;
2773  else if (skb)
2774  dev_kfree_skb(skb);
2775  } else {
2776  /* Increment the number of packets */
2777  rx_queue->stats.rx_packets++;
2778  howmany++;
2779 
2780  if (likely(skb)) {
2781  pkt_len = bdp->length - ETH_FCS_LEN;
2782  /* Remove the FCS from the packet length */
2783  skb_put(skb, pkt_len);
2784  rx_queue->stats.rx_bytes += pkt_len;
2785  skb_record_rx_queue(skb, rx_queue->qindex);
2786  gfar_process_frame(dev, skb, amount_pull,
2787  &rx_queue->grp->napi);
2788 
2789  } else {
2790  netif_warn(priv, rx_err, dev, "Missing skb!\n");
2791  rx_queue->stats.rx_dropped++;
2792  priv->extra_stats.rx_skbmissing++;
2793  }
2794 
2795  }
2796 
2797  rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2798 
2799  /* Setup the new bdp */
2800  gfar_new_rxbdp(rx_queue, bdp, newskb);
2801 
2802  /* Update to the next pointer */
2803  bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2804 
2805  /* update to point at the next skb */
2806  rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2807  RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2808  }
2809 
2810  /* Update the current rxbd pointer to be the next one */
2811  rx_queue->cur_rx = bdp;
2812 
2813  return howmany;
2814 }
2815 
2816 static int gfar_poll(struct napi_struct *napi, int budget)
2817 {
2818  struct gfar_priv_grp *gfargrp =
2819  container_of(napi, struct gfar_priv_grp, napi);
2820  struct gfar_private *priv = gfargrp->priv;
2821  struct gfar __iomem *regs = gfargrp->regs;
2822  struct gfar_priv_tx_q *tx_queue = NULL;
2823  struct gfar_priv_rx_q *rx_queue = NULL;
2824  int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2825  int tx_cleaned = 0, i, left_over_budget = budget;
2826  unsigned long serviced_queues = 0;
2827  int num_queues = 0;
2828 
2829  num_queues = gfargrp->num_rx_queues;
2830  budget_per_queue = budget/num_queues;
2831 
2832  /* Clear IEVENT, so interrupts aren't called again
2833  * because of the packets that have already arrived
2834  */
2835  gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2836 
2837  while (num_queues && left_over_budget) {
2838  budget_per_queue = left_over_budget/num_queues;
2839  left_over_budget = 0;
2840 
2841  for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2842  if (test_bit(i, &serviced_queues))
2843  continue;
2844  rx_queue = priv->rx_queue[i];
2845  tx_queue = priv->tx_queue[rx_queue->qindex];
2846 
2847  tx_cleaned += gfar_clean_tx_ring(tx_queue);
2848  rx_cleaned_per_queue =
2849  gfar_clean_rx_ring(rx_queue, budget_per_queue);
2850  rx_cleaned += rx_cleaned_per_queue;
2851  if (rx_cleaned_per_queue < budget_per_queue) {
2852  left_over_budget = left_over_budget +
2853  (budget_per_queue -
2854  rx_cleaned_per_queue);
2855  set_bit(i, &serviced_queues);
2856  num_queues--;
2857  }
2858  }
2859  }
2860 
2861  if (tx_cleaned)
2862  return budget;
2863 
2864  if (rx_cleaned < budget) {
2865  napi_complete(napi);
2866 
2867  /* Clear the halt bit in RSTAT */
2868  gfar_write(&regs->rstat, gfargrp->rstat);
2869 
2870  gfar_write(&regs->imask, IMASK_DEFAULT);
2871 
2872  /* If we are coalescing interrupts, update the timer
2873  * Otherwise, clear it
2874  */
2875  gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2876  gfargrp->tx_bit_map);
2877  }
2878 
2879  return rx_cleaned;
2880 }
2881 
2882 #ifdef CONFIG_NET_POLL_CONTROLLER
2883 /* Polling 'interrupt' - used by things like netconsole to send skbs
2884  * without having to re-enable interrupts. It's not called while
2885  * the interrupt routine is executing.
2886  */
2887 static void gfar_netpoll(struct net_device *dev)
2888 {
2889  struct gfar_private *priv = netdev_priv(dev);
2890  int i;
2891 
2892  /* If the device has multiple interrupts, run tx/rx */
2894  for (i = 0; i < priv->num_grps; i++) {
2895  disable_irq(priv->gfargrp[i].interruptTransmit);
2896  disable_irq(priv->gfargrp[i].interruptReceive);
2897  disable_irq(priv->gfargrp[i].interruptError);
2898  gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2899  &priv->gfargrp[i]);
2900  enable_irq(priv->gfargrp[i].interruptError);
2901  enable_irq(priv->gfargrp[i].interruptReceive);
2902  enable_irq(priv->gfargrp[i].interruptTransmit);
2903  }
2904  } else {
2905  for (i = 0; i < priv->num_grps; i++) {
2906  disable_irq(priv->gfargrp[i].interruptTransmit);
2907  gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2908  &priv->gfargrp[i]);
2909  enable_irq(priv->gfargrp[i].interruptTransmit);
2910  }
2911  }
2912 }
2913 #endif
2914 
2915 /* The interrupt handler for devices with one interrupt */
2916 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2917 {
2918  struct gfar_priv_grp *gfargrp = grp_id;
2919 
2920  /* Save ievent for future reference */
2921  u32 events = gfar_read(&gfargrp->regs->ievent);
2922 
2923  /* Check for reception */
2924  if (events & IEVENT_RX_MASK)
2925  gfar_receive(irq, grp_id);
2926 
2927  /* Check for transmit completion */
2928  if (events & IEVENT_TX_MASK)
2929  gfar_transmit(irq, grp_id);
2930 
2931  /* Check for errors */
2932  if (events & IEVENT_ERR_MASK)
2933  gfar_error(irq, grp_id);
2934 
2935  return IRQ_HANDLED;
2936 }
2937 
2938 /* Called every time the controller might need to be made
2939  * aware of new link state. The PHY code conveys this
2940  * information through variables in the phydev structure, and this
2941  * function converts those variables into the appropriate
2942  * register values, and can bring down the device if needed.
2943  */
2944 static void adjust_link(struct net_device *dev)
2945 {
2946  struct gfar_private *priv = netdev_priv(dev);
2947  struct gfar __iomem *regs = priv->gfargrp[0].regs;
2948  unsigned long flags;
2949  struct phy_device *phydev = priv->phydev;
2950  int new_state = 0;
2951 
2952  local_irq_save(flags);
2953  lock_tx_qs(priv);
2954 
2955  if (phydev->link) {
2956  u32 tempval = gfar_read(&regs->maccfg2);
2957  u32 ecntrl = gfar_read(&regs->ecntrl);
2958 
2959  /* Now we make sure that we can be in full duplex mode.
2960  * If not, we operate in half-duplex mode.
2961  */
2962  if (phydev->duplex != priv->oldduplex) {
2963  new_state = 1;
2964  if (!(phydev->duplex))
2965  tempval &= ~(MACCFG2_FULL_DUPLEX);
2966  else
2967  tempval |= MACCFG2_FULL_DUPLEX;
2968 
2969  priv->oldduplex = phydev->duplex;
2970  }
2971 
2972  if (phydev->speed != priv->oldspeed) {
2973  new_state = 1;
2974  switch (phydev->speed) {
2975  case 1000:
2976  tempval =
2977  ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2978 
2979  ecntrl &= ~(ECNTRL_R100);
2980  break;
2981  case 100:
2982  case 10:
2983  tempval =
2984  ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2985 
2986  /* Reduced mode distinguishes
2987  * between 10 and 100
2988  */
2989  if (phydev->speed == SPEED_100)
2990  ecntrl |= ECNTRL_R100;
2991  else
2992  ecntrl &= ~(ECNTRL_R100);
2993  break;
2994  default:
2995  netif_warn(priv, link, dev,
2996  "Ack! Speed (%d) is not 10/100/1000!\n",
2997  phydev->speed);
2998  break;
2999  }
3000 
3001  priv->oldspeed = phydev->speed;
3002  }
3003 
3004  gfar_write(&regs->maccfg2, tempval);
3005  gfar_write(&regs->ecntrl, ecntrl);
3006 
3007  if (!priv->oldlink) {
3008  new_state = 1;
3009  priv->oldlink = 1;
3010  }
3011  } else if (priv->oldlink) {
3012  new_state = 1;
3013  priv->oldlink = 0;
3014  priv->oldspeed = 0;
3015  priv->oldduplex = -1;
3016  }
3017 
3018  if (new_state && netif_msg_link(priv))
3019  phy_print_status(phydev);
3020  unlock_tx_qs(priv);
3021  local_irq_restore(flags);
3022 }
3023 
3024 /* Update the hash table based on the current list of multicast
3025  * addresses we subscribe to. Also, change the promiscuity of
3026  * the device based on the flags (this function is called
3027  * whenever dev->flags is changed
3028  */
3029 static void gfar_set_multi(struct net_device *dev)
3030 {
3031  struct netdev_hw_addr *ha;
3032  struct gfar_private *priv = netdev_priv(dev);
3033  struct gfar __iomem *regs = priv->gfargrp[0].regs;
3034  u32 tempval;
3035 
3036  if (dev->flags & IFF_PROMISC) {
3037  /* Set RCTRL to PROM */
3038  tempval = gfar_read(&regs->rctrl);
3039  tempval |= RCTRL_PROM;
3040  gfar_write(&regs->rctrl, tempval);
3041  } else {
3042  /* Set RCTRL to not PROM */
3043  tempval = gfar_read(&regs->rctrl);
3044  tempval &= ~(RCTRL_PROM);
3045  gfar_write(&regs->rctrl, tempval);
3046  }
3047 
3048  if (dev->flags & IFF_ALLMULTI) {
3049  /* Set the hash to rx all multicast frames */
3050  gfar_write(&regs->igaddr0, 0xffffffff);
3051  gfar_write(&regs->igaddr1, 0xffffffff);
3052  gfar_write(&regs->igaddr2, 0xffffffff);
3053  gfar_write(&regs->igaddr3, 0xffffffff);
3054  gfar_write(&regs->igaddr4, 0xffffffff);
3055  gfar_write(&regs->igaddr5, 0xffffffff);
3056  gfar_write(&regs->igaddr6, 0xffffffff);
3057  gfar_write(&regs->igaddr7, 0xffffffff);
3058  gfar_write(&regs->gaddr0, 0xffffffff);
3059  gfar_write(&regs->gaddr1, 0xffffffff);
3060  gfar_write(&regs->gaddr2, 0xffffffff);
3061  gfar_write(&regs->gaddr3, 0xffffffff);
3062  gfar_write(&regs->gaddr4, 0xffffffff);
3063  gfar_write(&regs->gaddr5, 0xffffffff);
3064  gfar_write(&regs->gaddr6, 0xffffffff);
3065  gfar_write(&regs->gaddr7, 0xffffffff);
3066  } else {
3067  int em_num;
3068  int idx;
3069 
3070  /* zero out the hash */
3071  gfar_write(&regs->igaddr0, 0x0);
3072  gfar_write(&regs->igaddr1, 0x0);
3073  gfar_write(&regs->igaddr2, 0x0);
3074  gfar_write(&regs->igaddr3, 0x0);
3075  gfar_write(&regs->igaddr4, 0x0);
3076  gfar_write(&regs->igaddr5, 0x0);
3077  gfar_write(&regs->igaddr6, 0x0);
3078  gfar_write(&regs->igaddr7, 0x0);
3079  gfar_write(&regs->gaddr0, 0x0);
3080  gfar_write(&regs->gaddr1, 0x0);
3081  gfar_write(&regs->gaddr2, 0x0);
3082  gfar_write(&regs->gaddr3, 0x0);
3083  gfar_write(&regs->gaddr4, 0x0);
3084  gfar_write(&regs->gaddr5, 0x0);
3085  gfar_write(&regs->gaddr6, 0x0);
3086  gfar_write(&regs->gaddr7, 0x0);
3087 
3088  /* If we have extended hash tables, we need to
3089  * clear the exact match registers to prepare for
3090  * setting them
3091  */
3092  if (priv->extended_hash) {
3093  em_num = GFAR_EM_NUM + 1;
3094  gfar_clear_exact_match(dev);
3095  idx = 1;
3096  } else {
3097  idx = 0;
3098  em_num = 0;
3099  }
3100 
3101  if (netdev_mc_empty(dev))
3102  return;
3103 
3104  /* Parse the list, and set the appropriate bits */
3105  netdev_for_each_mc_addr(ha, dev) {
3106  if (idx < em_num) {
3107  gfar_set_mac_for_addr(dev, idx, ha->addr);
3108  idx++;
3109  } else
3110  gfar_set_hash_for_addr(dev, ha->addr);
3111  }
3112  }
3113 }
3114 
3115 
3116 /* Clears each of the exact match registers to zero, so they
3117  * don't interfere with normal reception
3118  */
3119 static void gfar_clear_exact_match(struct net_device *dev)
3120 {
3121  int idx;
3122  static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3123 
3124  for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3125  gfar_set_mac_for_addr(dev, idx, zero_arr);
3126 }
3127 
3128 /* Set the appropriate hash bit for the given addr */
3129 /* The algorithm works like so:
3130  * 1) Take the Destination Address (ie the multicast address), and
3131  * do a CRC on it (little endian), and reverse the bits of the
3132  * result.
3133  * 2) Use the 8 most significant bits as a hash into a 256-entry
3134  * table. The table is controlled through 8 32-bit registers:
3135  * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3136  * gaddr7. This means that the 3 most significant bits in the
3137  * hash index which gaddr register to use, and the 5 other bits
3138  * indicate which bit (assuming an IBM numbering scheme, which
3139  * for PowerPC (tm) is usually the case) in the register holds
3140  * the entry.
3141  */
3142 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3143 {
3144  u32 tempval;
3145  struct gfar_private *priv = netdev_priv(dev);
3146  u32 result = ether_crc(ETH_ALEN, addr);
3147  int width = priv->hash_width;
3148  u8 whichbit = (result >> (32 - width)) & 0x1f;
3149  u8 whichreg = result >> (32 - width + 5);
3150  u32 value = (1 << (31-whichbit));
3151 
3152  tempval = gfar_read(priv->hash_regs[whichreg]);
3153  tempval |= value;
3154  gfar_write(priv->hash_regs[whichreg], tempval);
3155 }
3156 
3157 
3158 /* There are multiple MAC Address register pairs on some controllers
3159  * This function sets the numth pair to a given address
3160  */
3161 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3162  const u8 *addr)
3163 {
3164  struct gfar_private *priv = netdev_priv(dev);
3165  struct gfar __iomem *regs = priv->gfargrp[0].regs;
3166  int idx;
3167  char tmpbuf[ETH_ALEN];
3168  u32 tempval;
3169  u32 __iomem *macptr = &regs->macstnaddr1;
3170 
3171  macptr += num*2;
3172 
3173  /* Now copy it into the mac registers backwards, cuz
3174  * little endian is silly
3175  */
3176  for (idx = 0; idx < ETH_ALEN; idx++)
3177  tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3178 
3179  gfar_write(macptr, *((u32 *) (tmpbuf)));
3180 
3181  tempval = *((u32 *) (tmpbuf + 4));
3182 
3183  gfar_write(macptr+1, tempval);
3184 }
3185 
3186 /* GFAR error interrupt handler */
3187 static irqreturn_t gfar_error(int irq, void *grp_id)
3188 {
3189  struct gfar_priv_grp *gfargrp = grp_id;
3190  struct gfar __iomem *regs = gfargrp->regs;
3191  struct gfar_private *priv= gfargrp->priv;
3192  struct net_device *dev = priv->ndev;
3193 
3194  /* Save ievent for future reference */
3195  u32 events = gfar_read(&regs->ievent);
3196 
3197  /* Clear IEVENT */
3198  gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3199 
3200  /* Magic Packet is not an error. */
3202  (events & IEVENT_MAG))
3203  events &= ~IEVENT_MAG;
3204 
3205  /* Hmm... */
3206  if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3207  netdev_dbg(dev,
3208  "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3209  events, gfar_read(&regs->imask));
3210 
3211  /* Update the error counters */
3212  if (events & IEVENT_TXE) {
3213  dev->stats.tx_errors++;
3214 
3215  if (events & IEVENT_LC)
3216  dev->stats.tx_window_errors++;
3217  if (events & IEVENT_CRL)
3218  dev->stats.tx_aborted_errors++;
3219  if (events & IEVENT_XFUN) {
3220  unsigned long flags;
3221 
3222  netif_dbg(priv, tx_err, dev,
3223  "TX FIFO underrun, packet dropped\n");
3224  dev->stats.tx_dropped++;
3225  priv->extra_stats.tx_underrun++;
3226 
3227  local_irq_save(flags);
3228  lock_tx_qs(priv);
3229 
3230  /* Reactivate the Tx Queues */
3231  gfar_write(&regs->tstat, gfargrp->tstat);
3232 
3233  unlock_tx_qs(priv);
3234  local_irq_restore(flags);
3235  }
3236  netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3237  }
3238  if (events & IEVENT_BSY) {
3239  dev->stats.rx_errors++;
3240  priv->extra_stats.rx_bsy++;
3241 
3242  gfar_receive(irq, grp_id);
3243 
3244  netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3245  gfar_read(&regs->rstat));
3246  }
3247  if (events & IEVENT_BABR) {
3248  dev->stats.rx_errors++;
3249  priv->extra_stats.rx_babr++;
3250 
3251  netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3252  }
3253  if (events & IEVENT_EBERR) {
3254  priv->extra_stats.eberr++;
3255  netif_dbg(priv, rx_err, dev, "bus error\n");
3256  }
3257  if (events & IEVENT_RXC)
3258  netif_dbg(priv, rx_status, dev, "control frame\n");
3259 
3260  if (events & IEVENT_BABT) {
3261  priv->extra_stats.tx_babt++;
3262  netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3263  }
3264  return IRQ_HANDLED;
3265 }
3266 
3267 static struct of_device_id gfar_match[] =
3268 {
3269  {
3270  .type = "network",
3271  .compatible = "gianfar",
3272  },
3273  {
3274  .compatible = "fsl,etsec2",
3275  },
3276  {},
3277 };
3278 MODULE_DEVICE_TABLE(of, gfar_match);
3279 
3280 /* Structure for a device driver */
3281 static struct platform_driver gfar_driver = {
3282  .driver = {
3283  .name = "fsl-gianfar",
3284  .owner = THIS_MODULE,
3285  .pm = GFAR_PM_OPS,
3286  .of_match_table = gfar_match,
3287  },
3288  .probe = gfar_probe,
3289  .remove = gfar_remove,
3290 };
3291 
3292 module_platform_driver(gfar_driver);