Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
core.c
Go to the documentation of this file.
1 /*
2  * drivers/net/ethernet/ibm/emac/core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <[email protected]> or <[email protected]>
13  *
14  * Based on original work by
15  * Matt Porter <[email protected]>
16  * (c) 2003 Benjamin Herrenschmidt <[email protected]>
17  * Armin Kuster <[email protected]>
18  * Johnnie Peters <[email protected]>
19  *
20  * This program is free software; you can redistribute it and/or modify it
21  * under the terms of the GNU General Public License as published by the
22  * Free Software Foundation; either version 2 of the License, or (at your
23  * option) any later version.
24  *
25  */
26 
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
41 #include <linux/of.h>
42 #include <linux/of_net.h>
43 #include <linux/slab.h>
44 
45 #include <asm/processor.h>
46 #include <asm/io.h>
47 #include <asm/dma.h>
48 #include <asm/uaccess.h>
49 #include <asm/dcr.h>
50 #include <asm/dcr-regs.h>
51 
52 #include "core.h"
53 
54 /*
55  * Lack of dma_unmap_???? calls is intentional.
56  *
57  * API-correct usage requires additional support state information to be
58  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59  * EMAC design (e.g. TX buffer passed from network stack can be split into
60  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61  * maintaining such information will add additional overhead.
62  * Current DMA API implementation for 4xx processors only ensures cache coherency
63  * and dma_unmap_???? routines are empty and are likely to stay this way.
64  * I decided to omit dma_unmap_??? calls because I don't want to add additional
65  * complexity just for the sake of following some abstract API, when it doesn't
66  * add any real benefit to the driver. I understand that this decision maybe
67  * controversial, but I really tried to make code API-correct and efficient
68  * at the same time and didn't come up with code I liked :(. --ebs
69  */
70 
71 #define DRV_NAME "emac"
72 #define DRV_VERSION "3.54"
73 #define DRV_DESC "PPC 4xx OCP EMAC driver"
74 
77  ("Eugene Surovegin <[email protected]> or <[email protected]>");
78 MODULE_LICENSE("GPL");
79 
80 /*
81  * PPC64 doesn't (yet) have a cacheable_memcpy
82  */
83 #ifdef CONFIG_PPC64
84 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
85 #endif
86 
87 /* minimum number of free TX descriptors required to wake up TX process */
88 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
89 
90 /* If packet size is less than this number, we allocate small skb and copy packet
91  * contents into it instead of just sending original big skb up
92  */
93 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
94 
95 /* Since multiple EMACs share MDIO lines in various ways, we need
96  * to avoid re-using the same PHY ID in cases where the arch didn't
97  * setup precise phy_map entries
98  *
99  * XXX This is something that needs to be reworked as we can have multiple
100  * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101  * probably require in that case to have explicit PHY IDs in the device-tree
102  */
103 static u32 busy_phy_map;
104 static DEFINE_MUTEX(emac_phy_map_lock);
105 
106 /* This is the wait queue used to wait on any event related to probe, that
107  * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
108  */
109 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
110 
111 /* Having stable interface names is a doomed idea. However, it would be nice
112  * if we didn't have completely random interface names at boot too :-) It's
113  * just a matter of making everybody's life easier. Since we are doing
114  * threaded probing, it's a bit harder though. The base idea here is that
115  * we make up a list of all emacs in the device-tree before we register the
116  * driver. Every emac will then wait for the previous one in the list to
117  * initialize before itself. We should also keep that list ordered by
118  * cell_index.
119  * That list is only 4 entries long, meaning that additional EMACs don't
120  * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
121  */
122 
123 #define EMAC_BOOT_LIST_SIZE 4
124 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
125 
126 /* How long should I wait for dependent devices ? */
127 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
128 
129 /* I don't want to litter system log with timeout errors
130  * when we have brain-damaged PHY.
131  */
132 static inline void emac_report_timeout_error(struct emac_instance *dev,
133  const char *error)
134 {
135  if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
138  DBG(dev, "%s" NL, error);
139  else if (net_ratelimit())
140  printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
141  error);
142 }
143 
144 /* EMAC PHY clock workaround:
145  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146  * which allows controlling each EMAC clock
147  */
148 static inline void emac_rx_clk_tx(struct emac_instance *dev)
149 {
150 #ifdef CONFIG_PPC_DCR_NATIVE
151  if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
152  dcri_clrset(SDR0, SDR0_MFR,
153  0, SDR0_MFR_ECS >> dev->cell_index);
154 #endif
155 }
156 
157 static inline void emac_rx_clk_default(struct emac_instance *dev)
158 {
159 #ifdef CONFIG_PPC_DCR_NATIVE
160  if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
161  dcri_clrset(SDR0, SDR0_MFR,
162  SDR0_MFR_ECS >> dev->cell_index, 0);
163 #endif
164 }
165 
166 /* PHY polling intervals */
167 #define PHY_POLL_LINK_ON HZ
168 #define PHY_POLL_LINK_OFF (HZ / 5)
169 
170 /* Graceful stop timeouts in us.
171  * We should allow up to 1 frame time (full-duplex, ignoring collisions)
172  */
173 #define STOP_TIMEOUT_10 1230
174 #define STOP_TIMEOUT_100 124
175 #define STOP_TIMEOUT_1000 13
176 #define STOP_TIMEOUT_1000_JUMBO 73
177 
178 static unsigned char default_mcast_addr[] = {
179  0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
180 };
181 
182 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
184  "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185  "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186  "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187  "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188  "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189  "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190  "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191  "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192  "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193  "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194  "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195  "tx_bd_excessive_collisions", "tx_bd_late_collision",
196  "tx_bd_multple_collisions", "tx_bd_single_collision",
197  "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
198  "tx_errors"
199 };
200 
201 static irqreturn_t emac_irq(int irq, void *dev_instance);
202 static void emac_clean_tx_ring(struct emac_instance *dev);
203 static void __emac_set_multicast_list(struct emac_instance *dev);
204 
205 static inline int emac_phy_supports_gige(int phy_mode)
206 {
207  return phy_mode == PHY_MODE_GMII ||
208  phy_mode == PHY_MODE_RGMII ||
209  phy_mode == PHY_MODE_SGMII ||
210  phy_mode == PHY_MODE_TBI ||
211  phy_mode == PHY_MODE_RTBI;
212 }
213 
214 static inline int emac_phy_gpcs(int phy_mode)
215 {
216  return phy_mode == PHY_MODE_SGMII ||
217  phy_mode == PHY_MODE_TBI ||
218  phy_mode == PHY_MODE_RTBI;
219 }
220 
221 static inline void emac_tx_enable(struct emac_instance *dev)
222 {
223  struct emac_regs __iomem *p = dev->emacp;
224  u32 r;
225 
226  DBG(dev, "tx_enable" NL);
227 
228  r = in_be32(&p->mr0);
229  if (!(r & EMAC_MR0_TXE))
230  out_be32(&p->mr0, r | EMAC_MR0_TXE);
231 }
232 
233 static void emac_tx_disable(struct emac_instance *dev)
234 {
235  struct emac_regs __iomem *p = dev->emacp;
236  u32 r;
237 
238  DBG(dev, "tx_disable" NL);
239 
240  r = in_be32(&p->mr0);
241  if (r & EMAC_MR0_TXE) {
242  int n = dev->stop_timeout;
243  out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
244  while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245  udelay(1);
246  --n;
247  }
248  if (unlikely(!n))
249  emac_report_timeout_error(dev, "TX disable timeout");
250  }
251 }
252 
253 static void emac_rx_enable(struct emac_instance *dev)
254 {
255  struct emac_regs __iomem *p = dev->emacp;
256  u32 r;
257 
258  if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
259  goto out;
260 
261  DBG(dev, "rx_enable" NL);
262 
263  r = in_be32(&p->mr0);
264  if (!(r & EMAC_MR0_RXE)) {
265  if (unlikely(!(r & EMAC_MR0_RXI))) {
266  /* Wait if previous async disable is still in progress */
267  int n = dev->stop_timeout;
268  while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269  udelay(1);
270  --n;
271  }
272  if (unlikely(!n))
273  emac_report_timeout_error(dev,
274  "RX disable timeout");
275  }
276  out_be32(&p->mr0, r | EMAC_MR0_RXE);
277  }
278  out:
279  ;
280 }
281 
282 static void emac_rx_disable(struct emac_instance *dev)
283 {
284  struct emac_regs __iomem *p = dev->emacp;
285  u32 r;
286 
287  DBG(dev, "rx_disable" NL);
288 
289  r = in_be32(&p->mr0);
290  if (r & EMAC_MR0_RXE) {
291  int n = dev->stop_timeout;
292  out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
293  while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294  udelay(1);
295  --n;
296  }
297  if (unlikely(!n))
298  emac_report_timeout_error(dev, "RX disable timeout");
299  }
300 }
301 
302 static inline void emac_netif_stop(struct emac_instance *dev)
303 {
304  netif_tx_lock_bh(dev->ndev);
305  netif_addr_lock(dev->ndev);
306  dev->no_mcast = 1;
307  netif_addr_unlock(dev->ndev);
308  netif_tx_unlock_bh(dev->ndev);
309  dev->ndev->trans_start = jiffies; /* prevent tx timeout */
310  mal_poll_disable(dev->mal, &dev->commac);
311  netif_tx_disable(dev->ndev);
312 }
313 
314 static inline void emac_netif_start(struct emac_instance *dev)
315 {
316  netif_tx_lock_bh(dev->ndev);
317  netif_addr_lock(dev->ndev);
318  dev->no_mcast = 0;
319  if (dev->mcast_pending && netif_running(dev->ndev))
320  __emac_set_multicast_list(dev);
321  netif_addr_unlock(dev->ndev);
322  netif_tx_unlock_bh(dev->ndev);
323 
324  netif_wake_queue(dev->ndev);
325 
326  /* NOTE: unconditional netif_wake_queue is only appropriate
327  * so long as all callers are assured to have free tx slots
328  * (taken from tg3... though the case where that is wrong is
329  * not terribly harmful)
330  */
331  mal_poll_enable(dev->mal, &dev->commac);
332 }
333 
334 static inline void emac_rx_disable_async(struct emac_instance *dev)
335 {
336  struct emac_regs __iomem *p = dev->emacp;
337  u32 r;
338 
339  DBG(dev, "rx_disable_async" NL);
340 
341  r = in_be32(&p->mr0);
342  if (r & EMAC_MR0_RXE)
343  out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
344 }
345 
346 static int emac_reset(struct emac_instance *dev)
347 {
348  struct emac_regs __iomem *p = dev->emacp;
349  int n = 20;
350 
351  DBG(dev, "reset" NL);
352 
353  if (!dev->reset_failed) {
354  /* 40x erratum suggests stopping RX channel before reset,
355  * we stop TX as well
356  */
357  emac_rx_disable(dev);
358  emac_tx_disable(dev);
359  }
360 
361 #ifdef CONFIG_PPC_DCR_NATIVE
362  /* Enable internal clock source */
363  if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
364  dcri_clrset(SDR0, SDR0_ETH_CFG,
365  0, SDR0_ETH_CFG_ECS << dev->cell_index);
366 #endif
367 
368  out_be32(&p->mr0, EMAC_MR0_SRST);
369  while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
370  --n;
371 
372 #ifdef CONFIG_PPC_DCR_NATIVE
373  /* Enable external clock source */
374  if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
375  dcri_clrset(SDR0, SDR0_ETH_CFG,
376  SDR0_ETH_CFG_ECS << dev->cell_index, 0);
377 #endif
378 
379  if (n) {
380  dev->reset_failed = 0;
381  return 0;
382  } else {
383  emac_report_timeout_error(dev, "reset timeout");
384  dev->reset_failed = 1;
385  return -ETIMEDOUT;
386  }
387 }
388 
389 static void emac_hash_mc(struct emac_instance *dev)
390 {
391  const int regs = EMAC_XAHT_REGS(dev);
392  u32 *gaht_base = emac_gaht_base(dev);
393  u32 gaht_temp[regs];
394  struct netdev_hw_addr *ha;
395  int i;
396 
397  DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
398 
399  memset(gaht_temp, 0, sizeof (gaht_temp));
400 
401  netdev_for_each_mc_addr(ha, dev->ndev) {
402  int slot, reg, mask;
403  DBG2(dev, "mc %pM" NL, ha->addr);
404 
405  slot = EMAC_XAHT_CRC_TO_SLOT(dev,
406  ether_crc(ETH_ALEN, ha->addr));
407  reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
408  mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
409 
410  gaht_temp[reg] |= mask;
411  }
412 
413  for (i = 0; i < regs; i++)
414  out_be32(gaht_base + i, gaht_temp[i]);
415 }
416 
417 static inline u32 emac_iff2rmr(struct net_device *ndev)
418 {
419  struct emac_instance *dev = netdev_priv(ndev);
420  u32 r;
421 
423 
424  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
425  r |= EMAC4_RMR_BASE;
426  else
427  r |= EMAC_RMR_BASE;
428 
429  if (ndev->flags & IFF_PROMISC)
430  r |= EMAC_RMR_PME;
431  else if (ndev->flags & IFF_ALLMULTI ||
432  (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
433  r |= EMAC_RMR_PMME;
434  else if (!netdev_mc_empty(ndev))
435  r |= EMAC_RMR_MAE;
436 
437  if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
438  r &= ~EMAC4_RMR_MJS_MASK;
439  r |= EMAC4_RMR_MJS(ndev->mtu);
440  }
441 
442  return r;
443 }
444 
445 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
446 {
448 
449  DBG2(dev, "__emac_calc_base_mr1" NL);
450 
451  switch(tx_size) {
452  case 2048:
453  ret |= EMAC_MR1_TFS_2K;
454  break;
455  default:
456  printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
457  dev->ndev->name, tx_size);
458  }
459 
460  switch(rx_size) {
461  case 16384:
462  ret |= EMAC_MR1_RFS_16K;
463  break;
464  case 4096:
465  ret |= EMAC_MR1_RFS_4K;
466  break;
467  default:
468  printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
469  dev->ndev->name, rx_size);
470  }
471 
472  return ret;
473 }
474 
475 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
476 {
478  EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
479 
480  DBG2(dev, "__emac4_calc_base_mr1" NL);
481 
482  switch(tx_size) {
483  case 16384:
484  ret |= EMAC4_MR1_TFS_16K;
485  break;
486  case 4096:
487  ret |= EMAC4_MR1_TFS_4K;
488  break;
489  case 2048:
490  ret |= EMAC4_MR1_TFS_2K;
491  break;
492  default:
493  printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
494  dev->ndev->name, tx_size);
495  }
496 
497  switch(rx_size) {
498  case 16384:
499  ret |= EMAC4_MR1_RFS_16K;
500  break;
501  case 4096:
502  ret |= EMAC4_MR1_RFS_4K;
503  break;
504  case 2048:
505  ret |= EMAC4_MR1_RFS_2K;
506  break;
507  default:
508  printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
509  dev->ndev->name, rx_size);
510  }
511 
512  return ret;
513 }
514 
515 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
516 {
517  return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
518  __emac4_calc_base_mr1(dev, tx_size, rx_size) :
519  __emac_calc_base_mr1(dev, tx_size, rx_size);
520 }
521 
522 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
523 {
524  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
525  return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
526  else
527  return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
528 }
529 
530 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
531  unsigned int low, unsigned int high)
532 {
533  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
534  return (low << 22) | ( (high & 0x3ff) << 6);
535  else
536  return (low << 23) | ( (high & 0x1ff) << 7);
537 }
538 
539 static int emac_configure(struct emac_instance *dev)
540 {
541  struct emac_regs __iomem *p = dev->emacp;
542  struct net_device *ndev = dev->ndev;
543  int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
544  u32 r, mr1 = 0;
545 
546  DBG(dev, "configure" NL);
547 
548  if (!link) {
549  out_be32(&p->mr1, in_be32(&p->mr1)
551  udelay(100);
552  } else if (emac_reset(dev) < 0)
553  return -ETIMEDOUT;
554 
555  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
556  tah_reset(dev->tah_dev);
557 
558  DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
559  link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
560 
561  /* Default fifo sizes */
562  tx_size = dev->tx_fifo_size;
563  rx_size = dev->rx_fifo_size;
564 
565  /* No link, force loopback */
566  if (!link)
567  mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
568 
569  /* Check for full duplex */
570  else if (dev->phy.duplex == DUPLEX_FULL)
572 
573  /* Adjust fifo sizes, mr1 and timeouts based on link speed */
575  switch (dev->phy.speed) {
576  case SPEED_1000:
577  if (emac_phy_gpcs(dev->phy.mode)) {
579  (dev->phy.gpcs_address != 0xffffffff) ?
580  dev->phy.gpcs_address : dev->phy.address);
581 
582  /* Put some arbitrary OUI, Manuf & Rev IDs so we can
583  * identify this GPCS PHY later.
584  */
585  out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
586  } else
587  mr1 |= EMAC_MR1_MF_1000;
588 
589  /* Extended fifo sizes */
590  tx_size = dev->tx_fifo_size_gige;
591  rx_size = dev->rx_fifo_size_gige;
592 
593  if (dev->ndev->mtu > ETH_DATA_LEN) {
594  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
595  mr1 |= EMAC4_MR1_JPSM;
596  else
597  mr1 |= EMAC_MR1_JPSM;
599  } else
601  break;
602  case SPEED_100:
603  mr1 |= EMAC_MR1_MF_100;
605  break;
606  default: /* make gcc happy */
607  break;
608  }
609 
610  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
612  dev->phy.speed);
613  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
614  zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
615 
616  /* on 40x erratum forces us to NOT use integrated flow control,
617  * let's hope it works on 44x ;)
618  */
619  if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
620  dev->phy.duplex == DUPLEX_FULL) {
621  if (dev->phy.pause)
622  mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
623  else if (dev->phy.asym_pause)
624  mr1 |= EMAC_MR1_APP;
625  }
626 
627  /* Add base settings & fifo sizes & program MR1 */
628  mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
629  out_be32(&p->mr1, mr1);
630 
631  /* Set individual MAC address */
632  out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
633  out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
634  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
635  ndev->dev_addr[5]);
636 
637  /* VLAN Tag Protocol ID */
638  out_be32(&p->vtpid, 0x8100);
639 
640  /* Receive mode register */
641  r = emac_iff2rmr(ndev);
642  if (r & EMAC_RMR_MAE)
643  emac_hash_mc(dev);
644  out_be32(&p->rmr, r);
645 
646  /* FIFOs thresholds */
647  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
648  r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
649  tx_size / 2 / dev->fifo_entry_size);
650  else
651  r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
652  tx_size / 2 / dev->fifo_entry_size);
653  out_be32(&p->tmr1, r);
654  out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
655 
656  /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
657  there should be still enough space in FIFO to allow the our link
658  partner time to process this frame and also time to send PAUSE
659  frame itself.
660 
661  Here is the worst case scenario for the RX FIFO "headroom"
662  (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
663 
664  1) One maximum-length frame on TX 1522 bytes
665  2) One PAUSE frame time 64 bytes
666  3) PAUSE frame decode time allowance 64 bytes
667  4) One maximum-length frame on RX 1522 bytes
668  5) Round-trip propagation delay of the link (100Mb) 15 bytes
669  ----------
670  3187 bytes
671 
672  I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
673  low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
674  */
675  r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
676  rx_size / 4 / dev->fifo_entry_size);
677  out_be32(&p->rwmr, r);
678 
679  /* Set PAUSE timer to the maximum */
680  out_be32(&p->ptr, 0xffff);
681 
682  /* IRQ sources */
686  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
687  r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
688  EMAC4_ISR_RXOE | */;
689  out_be32(&p->iser, r);
690 
691  /* We need to take GPCS PHY out of isolate mode after EMAC reset */
692  if (emac_phy_gpcs(dev->phy.mode)) {
693  if (dev->phy.gpcs_address != 0xffffffff)
694  emac_mii_reset_gpcs(&dev->phy);
695  else
696  emac_mii_reset_phy(&dev->phy);
697  }
698 
699  return 0;
700 }
701 
702 static void emac_reinitialize(struct emac_instance *dev)
703 {
704  DBG(dev, "reinitialize" NL);
705 
706  emac_netif_stop(dev);
707  if (!emac_configure(dev)) {
708  emac_tx_enable(dev);
709  emac_rx_enable(dev);
710  }
711  emac_netif_start(dev);
712 }
713 
714 static void emac_full_tx_reset(struct emac_instance *dev)
715 {
716  DBG(dev, "full_tx_reset" NL);
717 
718  emac_tx_disable(dev);
720  emac_clean_tx_ring(dev);
721  dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
722 
723  emac_configure(dev);
724 
726  emac_tx_enable(dev);
727  emac_rx_enable(dev);
728 }
729 
730 static void emac_reset_work(struct work_struct *work)
731 {
732  struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
733 
734  DBG(dev, "reset_work" NL);
735 
736  mutex_lock(&dev->link_lock);
737  if (dev->opened) {
738  emac_netif_stop(dev);
739  emac_full_tx_reset(dev);
740  emac_netif_start(dev);
741  }
742  mutex_unlock(&dev->link_lock);
743 }
744 
745 static void emac_tx_timeout(struct net_device *ndev)
746 {
747  struct emac_instance *dev = netdev_priv(ndev);
748 
749  DBG(dev, "tx_timeout" NL);
750 
751  schedule_work(&dev->reset_work);
752 }
753 
754 
755 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
756 {
757  int done = !!(stacr & EMAC_STACR_OC);
758 
759  if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
760  done = !done;
761 
762  return done;
763 };
764 
765 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
766 {
767  struct emac_regs __iomem *p = dev->emacp;
768  u32 r = 0;
769  int n, err = -ETIMEDOUT;
770 
771  mutex_lock(&dev->mdio_lock);
772 
773  DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
774 
775  /* Enable proper MDIO port */
776  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
777  zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
778  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
779  rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
780 
781  /* Wait for management interface to become idle */
782  n = 20;
783  while (!emac_phy_done(dev, in_be32(&p->stacr))) {
784  udelay(1);
785  if (!--n) {
786  DBG2(dev, " -> timeout wait idle\n");
787  goto bail;
788  }
789  }
790 
791  /* Issue read command */
792  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
793  r = EMAC4_STACR_BASE(dev->opb_bus_freq);
794  else
795  r = EMAC_STACR_BASE(dev->opb_bus_freq);
796  if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
797  r |= EMAC_STACR_OC;
798  if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
800  else
802  r |= (reg & EMAC_STACR_PRA_MASK)
804  out_be32(&p->stacr, r);
805 
806  /* Wait for read to complete */
807  n = 200;
808  while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
809  udelay(1);
810  if (!--n) {
811  DBG2(dev, " -> timeout wait complete\n");
812  goto bail;
813  }
814  }
815 
816  if (unlikely(r & EMAC_STACR_PHYE)) {
817  DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
818  err = -EREMOTEIO;
819  goto bail;
820  }
821 
823 
824  DBG2(dev, "mdio_read -> %04x" NL, r);
825  err = 0;
826  bail:
827  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
828  rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
829  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
830  zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
831  mutex_unlock(&dev->mdio_lock);
832 
833  return err == 0 ? r : err;
834 }
835 
836 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
837  u16 val)
838 {
839  struct emac_regs __iomem *p = dev->emacp;
840  u32 r = 0;
841  int n, err = -ETIMEDOUT;
842 
843  mutex_lock(&dev->mdio_lock);
844 
845  DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
846 
847  /* Enable proper MDIO port */
848  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
849  zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
850  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
851  rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
852 
853  /* Wait for management interface to be idle */
854  n = 20;
855  while (!emac_phy_done(dev, in_be32(&p->stacr))) {
856  udelay(1);
857  if (!--n) {
858  DBG2(dev, " -> timeout wait idle\n");
859  goto bail;
860  }
861  }
862 
863  /* Issue write command */
864  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
865  r = EMAC4_STACR_BASE(dev->opb_bus_freq);
866  else
867  r = EMAC_STACR_BASE(dev->opb_bus_freq);
868  if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
869  r |= EMAC_STACR_OC;
870  if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
872  else
874  r |= (reg & EMAC_STACR_PRA_MASK) |
875  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
876  (val << EMAC_STACR_PHYD_SHIFT);
877  out_be32(&p->stacr, r);
878 
879  /* Wait for write to complete */
880  n = 200;
881  while (!emac_phy_done(dev, in_be32(&p->stacr))) {
882  udelay(1);
883  if (!--n) {
884  DBG2(dev, " -> timeout wait complete\n");
885  goto bail;
886  }
887  }
888  err = 0;
889  bail:
890  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
891  rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
892  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
893  zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
894  mutex_unlock(&dev->mdio_lock);
895 }
896 
897 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
898 {
899  struct emac_instance *dev = netdev_priv(ndev);
900  int res;
901 
902  res = __emac_mdio_read((dev->mdio_instance &&
903  dev->phy.gpcs_address != id) ?
904  dev->mdio_instance : dev,
905  (u8) id, (u8) reg);
906  return res;
907 }
908 
909 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
910 {
911  struct emac_instance *dev = netdev_priv(ndev);
912 
913  __emac_mdio_write((dev->mdio_instance &&
914  dev->phy.gpcs_address != id) ?
915  dev->mdio_instance : dev,
916  (u8) id, (u8) reg, (u16) val);
917 }
918 
919 /* Tx lock BH */
920 static void __emac_set_multicast_list(struct emac_instance *dev)
921 {
922  struct emac_regs __iomem *p = dev->emacp;
923  u32 rmr = emac_iff2rmr(dev->ndev);
924 
925  DBG(dev, "__multicast %08x" NL, rmr);
926 
927  /* I decided to relax register access rules here to avoid
928  * full EMAC reset.
929  *
930  * There is a real problem with EMAC4 core if we use MWSW_001 bit
931  * in MR1 register and do a full EMAC reset.
932  * One TX BD status update is delayed and, after EMAC reset, it
933  * never happens, resulting in TX hung (it'll be recovered by TX
934  * timeout handler eventually, but this is just gross).
935  * So we either have to do full TX reset or try to cheat here :)
936  *
937  * The only required change is to RX mode register, so I *think* all
938  * we need is just to stop RX channel. This seems to work on all
939  * tested SoCs. --ebs
940  *
941  * If we need the full reset, we might just trigger the workqueue
942  * and do it async... a bit nasty but should work --BenH
943  */
944  dev->mcast_pending = 0;
945  emac_rx_disable(dev);
946  if (rmr & EMAC_RMR_MAE)
947  emac_hash_mc(dev);
948  out_be32(&p->rmr, rmr);
949  emac_rx_enable(dev);
950 }
951 
952 /* Tx lock BH */
953 static void emac_set_multicast_list(struct net_device *ndev)
954 {
955  struct emac_instance *dev = netdev_priv(ndev);
956 
957  DBG(dev, "multicast" NL);
958 
959  BUG_ON(!netif_running(dev->ndev));
960 
961  if (dev->no_mcast) {
962  dev->mcast_pending = 1;
963  return;
964  }
965  __emac_set_multicast_list(dev);
966 }
967 
968 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
969 {
970  int rx_sync_size = emac_rx_sync_size(new_mtu);
971  int rx_skb_size = emac_rx_skb_size(new_mtu);
972  int i, ret = 0;
973  int mr1_jumbo_bit_change = 0;
974 
975  mutex_lock(&dev->link_lock);
976  emac_netif_stop(dev);
977  emac_rx_disable(dev);
979 
980  if (dev->rx_sg_skb) {
981  ++dev->estats.rx_dropped_resize;
982  dev_kfree_skb(dev->rx_sg_skb);
983  dev->rx_sg_skb = NULL;
984  }
985 
986  /* Make a first pass over RX ring and mark BDs ready, dropping
987  * non-processed packets on the way. We need this as a separate pass
988  * to simplify error recovery in the case of allocation failure later.
989  */
990  for (i = 0; i < NUM_RX_BUFF; ++i) {
991  if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
992  ++dev->estats.rx_dropped_resize;
993 
994  dev->rx_desc[i].data_len = 0;
995  dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
996  (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
997  }
998 
999  /* Reallocate RX ring only if bigger skb buffers are required */
1000  if (rx_skb_size <= dev->rx_skb_size)
1001  goto skip;
1002 
1003  /* Second pass, allocate new skbs */
1004  for (i = 0; i < NUM_RX_BUFF; ++i) {
1005  struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1006  if (!skb) {
1007  ret = -ENOMEM;
1008  goto oom;
1009  }
1010 
1011  BUG_ON(!dev->rx_skb[i]);
1012  dev_kfree_skb(dev->rx_skb[i]);
1013 
1014  skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1015  dev->rx_desc[i].data_ptr =
1016  dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1017  DMA_FROM_DEVICE) + 2;
1018  dev->rx_skb[i] = skb;
1019  }
1020  skip:
1021  /* Check if we need to change "Jumbo" bit in MR1 */
1022  if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1023  mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1024  (dev->ndev->mtu > ETH_DATA_LEN);
1025  } else {
1026  mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1027  (dev->ndev->mtu > ETH_DATA_LEN);
1028  }
1029 
1030  if (mr1_jumbo_bit_change) {
1031  /* This is to prevent starting RX channel in emac_rx_enable() */
1032  set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1033 
1034  dev->ndev->mtu = new_mtu;
1035  emac_full_tx_reset(dev);
1036  }
1037 
1038  mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1039  oom:
1040  /* Restart RX */
1041  clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1042  dev->rx_slot = 0;
1044  emac_rx_enable(dev);
1045  emac_netif_start(dev);
1046  mutex_unlock(&dev->link_lock);
1047 
1048  return ret;
1049 }
1050 
1051 /* Process ctx, rtnl_lock semaphore */
1052 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1053 {
1054  struct emac_instance *dev = netdev_priv(ndev);
1055  int ret = 0;
1056 
1057  if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1058  return -EINVAL;
1059 
1060  DBG(dev, "change_mtu(%d)" NL, new_mtu);
1061 
1062  if (netif_running(ndev)) {
1063  /* Check if we really need to reinitialize RX ring */
1064  if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1065  ret = emac_resize_rx_ring(dev, new_mtu);
1066  }
1067 
1068  if (!ret) {
1069  ndev->mtu = new_mtu;
1070  dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1071  dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1072  }
1073 
1074  return ret;
1075 }
1076 
1077 static void emac_clean_tx_ring(struct emac_instance *dev)
1078 {
1079  int i;
1080 
1081  for (i = 0; i < NUM_TX_BUFF; ++i) {
1082  if (dev->tx_skb[i]) {
1083  dev_kfree_skb(dev->tx_skb[i]);
1084  dev->tx_skb[i] = NULL;
1085  if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1086  ++dev->estats.tx_dropped;
1087  }
1088  dev->tx_desc[i].ctrl = 0;
1089  dev->tx_desc[i].data_ptr = 0;
1090  }
1091 }
1092 
1093 static void emac_clean_rx_ring(struct emac_instance *dev)
1094 {
1095  int i;
1096 
1097  for (i = 0; i < NUM_RX_BUFF; ++i)
1098  if (dev->rx_skb[i]) {
1099  dev->rx_desc[i].ctrl = 0;
1100  dev_kfree_skb(dev->rx_skb[i]);
1101  dev->rx_skb[i] = NULL;
1102  dev->rx_desc[i].data_ptr = 0;
1103  }
1104 
1105  if (dev->rx_sg_skb) {
1106  dev_kfree_skb(dev->rx_sg_skb);
1107  dev->rx_sg_skb = NULL;
1108  }
1109 }
1110 
1111 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1112  gfp_t flags)
1113 {
1114  struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1115  if (unlikely(!skb))
1116  return -ENOMEM;
1117 
1118  dev->rx_skb[slot] = skb;
1119  dev->rx_desc[slot].data_len = 0;
1120 
1121  skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1122  dev->rx_desc[slot].data_ptr =
1123  dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1124  DMA_FROM_DEVICE) + 2;
1125  wmb();
1126  dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1127  (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1128 
1129  return 0;
1130 }
1131 
1132 static void emac_print_link_status(struct emac_instance *dev)
1133 {
1134  if (netif_carrier_ok(dev->ndev))
1135  printk(KERN_INFO "%s: link is up, %d %s%s\n",
1136  dev->ndev->name, dev->phy.speed,
1137  dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1138  dev->phy.pause ? ", pause enabled" :
1139  dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1140  else
1141  printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1142 }
1143 
1144 /* Process ctx, rtnl_lock semaphore */
1145 static int emac_open(struct net_device *ndev)
1146 {
1147  struct emac_instance *dev = netdev_priv(ndev);
1148  int err, i;
1149 
1150  DBG(dev, "open" NL);
1151 
1152  /* Setup error IRQ handler */
1153  err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1154  if (err) {
1155  printk(KERN_ERR "%s: failed to request IRQ %d\n",
1156  ndev->name, dev->emac_irq);
1157  return err;
1158  }
1159 
1160  /* Allocate RX ring */
1161  for (i = 0; i < NUM_RX_BUFF; ++i)
1162  if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1163  printk(KERN_ERR "%s: failed to allocate RX ring\n",
1164  ndev->name);
1165  goto oom;
1166  }
1167 
1168  dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1169  clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1170  dev->rx_sg_skb = NULL;
1171 
1172  mutex_lock(&dev->link_lock);
1173  dev->opened = 1;
1174 
1175  /* Start PHY polling now.
1176  */
1177  if (dev->phy.address >= 0) {
1178  int link_poll_interval;
1179  if (dev->phy.def->ops->poll_link(&dev->phy)) {
1180  dev->phy.def->ops->read_link(&dev->phy);
1181  emac_rx_clk_default(dev);
1182  netif_carrier_on(dev->ndev);
1183  link_poll_interval = PHY_POLL_LINK_ON;
1184  } else {
1185  emac_rx_clk_tx(dev);
1186  netif_carrier_off(dev->ndev);
1187  link_poll_interval = PHY_POLL_LINK_OFF;
1188  }
1189  dev->link_polling = 1;
1190  wmb();
1191  schedule_delayed_work(&dev->link_work, link_poll_interval);
1192  emac_print_link_status(dev);
1193  } else
1194  netif_carrier_on(dev->ndev);
1195 
1196  /* Required for Pause packet support in EMAC */
1197  dev_mc_add_global(ndev, default_mcast_addr);
1198 
1199  emac_configure(dev);
1200  mal_poll_add(dev->mal, &dev->commac);
1202  mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1204  emac_tx_enable(dev);
1205  emac_rx_enable(dev);
1206  emac_netif_start(dev);
1207 
1208  mutex_unlock(&dev->link_lock);
1209 
1210  return 0;
1211  oom:
1212  emac_clean_rx_ring(dev);
1213  free_irq(dev->emac_irq, dev);
1214 
1215  return -ENOMEM;
1216 }
1217 
1218 /* BHs disabled */
1219 #if 0
1220 static int emac_link_differs(struct emac_instance *dev)
1221 {
1222  u32 r = in_be32(&dev->emacp->mr1);
1223 
1225  int speed, pause, asym_pause;
1226 
1227  if (r & EMAC_MR1_MF_1000)
1228  speed = SPEED_1000;
1229  else if (r & EMAC_MR1_MF_100)
1230  speed = SPEED_100;
1231  else
1232  speed = SPEED_10;
1233 
1234  switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1235  case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1236  pause = 1;
1237  asym_pause = 0;
1238  break;
1239  case EMAC_MR1_APP:
1240  pause = 0;
1241  asym_pause = 1;
1242  break;
1243  default:
1244  pause = asym_pause = 0;
1245  }
1246  return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1247  pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1248 }
1249 #endif
1250 
1251 static void emac_link_timer(struct work_struct *work)
1252 {
1253  struct emac_instance *dev =
1254  container_of(to_delayed_work(work),
1255  struct emac_instance, link_work);
1256  int link_poll_interval;
1257 
1258  mutex_lock(&dev->link_lock);
1259  DBG2(dev, "link timer" NL);
1260 
1261  if (!dev->opened)
1262  goto bail;
1263 
1264  if (dev->phy.def->ops->poll_link(&dev->phy)) {
1265  if (!netif_carrier_ok(dev->ndev)) {
1266  emac_rx_clk_default(dev);
1267  /* Get new link parameters */
1268  dev->phy.def->ops->read_link(&dev->phy);
1269 
1270  netif_carrier_on(dev->ndev);
1271  emac_netif_stop(dev);
1272  emac_full_tx_reset(dev);
1273  emac_netif_start(dev);
1274  emac_print_link_status(dev);
1275  }
1276  link_poll_interval = PHY_POLL_LINK_ON;
1277  } else {
1278  if (netif_carrier_ok(dev->ndev)) {
1279  emac_rx_clk_tx(dev);
1280  netif_carrier_off(dev->ndev);
1281  netif_tx_disable(dev->ndev);
1282  emac_reinitialize(dev);
1283  emac_print_link_status(dev);
1284  }
1285  link_poll_interval = PHY_POLL_LINK_OFF;
1286  }
1287  schedule_delayed_work(&dev->link_work, link_poll_interval);
1288  bail:
1289  mutex_unlock(&dev->link_lock);
1290 }
1291 
1292 static void emac_force_link_update(struct emac_instance *dev)
1293 {
1294  netif_carrier_off(dev->ndev);
1295  smp_rmb();
1296  if (dev->link_polling) {
1298  if (dev->link_polling)
1300  }
1301 }
1302 
1303 /* Process ctx, rtnl_lock semaphore */
1304 static int emac_close(struct net_device *ndev)
1305 {
1306  struct emac_instance *dev = netdev_priv(ndev);
1307 
1308  DBG(dev, "close" NL);
1309 
1310  if (dev->phy.address >= 0) {
1311  dev->link_polling = 0;
1313  }
1314  mutex_lock(&dev->link_lock);
1315  emac_netif_stop(dev);
1316  dev->opened = 0;
1317  mutex_unlock(&dev->link_lock);
1318 
1319  emac_rx_disable(dev);
1320  emac_tx_disable(dev);
1323  mal_poll_del(dev->mal, &dev->commac);
1324 
1325  emac_clean_tx_ring(dev);
1326  emac_clean_rx_ring(dev);
1327 
1328  free_irq(dev->emac_irq, dev);
1329 
1330  netif_carrier_off(ndev);
1331 
1332  return 0;
1333 }
1334 
1335 static inline u16 emac_tx_csum(struct emac_instance *dev,
1336  struct sk_buff *skb)
1337 {
1338  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1339  (skb->ip_summed == CHECKSUM_PARTIAL)) {
1340  ++dev->stats.tx_packets_csum;
1341  return EMAC_TX_CTRL_TAH_CSUM;
1342  }
1343  return 0;
1344 }
1345 
1346 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1347 {
1348  struct emac_regs __iomem *p = dev->emacp;
1349  struct net_device *ndev = dev->ndev;
1350 
1351  /* Send the packet out. If the if makes a significant perf
1352  * difference, then we can store the TMR0 value in "dev"
1353  * instead
1354  */
1355  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1357  else
1359 
1360  if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1361  netif_stop_queue(ndev);
1362  DBG2(dev, "stopped TX queue" NL);
1363  }
1364 
1365  ndev->trans_start = jiffies;
1366  ++dev->stats.tx_packets;
1367  dev->stats.tx_bytes += len;
1368 
1369  return NETDEV_TX_OK;
1370 }
1371 
1372 /* Tx lock BH */
1373 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1374 {
1375  struct emac_instance *dev = netdev_priv(ndev);
1376  unsigned int len = skb->len;
1377  int slot;
1378 
1380  MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1381 
1382  slot = dev->tx_slot++;
1383  if (dev->tx_slot == NUM_TX_BUFF) {
1384  dev->tx_slot = 0;
1385  ctrl |= MAL_TX_CTRL_WRAP;
1386  }
1387 
1388  DBG2(dev, "xmit(%u) %d" NL, len, slot);
1389 
1390  dev->tx_skb[slot] = skb;
1391  dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1392  skb->data, len,
1393  DMA_TO_DEVICE);
1394  dev->tx_desc[slot].data_len = (u16) len;
1395  wmb();
1396  dev->tx_desc[slot].ctrl = ctrl;
1397 
1398  return emac_xmit_finish(dev, len);
1399 }
1400 
1401 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1402  u32 pd, int len, int last, u16 base_ctrl)
1403 {
1404  while (1) {
1405  u16 ctrl = base_ctrl;
1406  int chunk = min(len, MAL_MAX_TX_SIZE);
1407  len -= chunk;
1408 
1409  slot = (slot + 1) % NUM_TX_BUFF;
1410 
1411  if (last && !len)
1412  ctrl |= MAL_TX_CTRL_LAST;
1413  if (slot == NUM_TX_BUFF - 1)
1414  ctrl |= MAL_TX_CTRL_WRAP;
1415 
1416  dev->tx_skb[slot] = NULL;
1417  dev->tx_desc[slot].data_ptr = pd;
1418  dev->tx_desc[slot].data_len = (u16) chunk;
1419  dev->tx_desc[slot].ctrl = ctrl;
1420  ++dev->tx_cnt;
1421 
1422  if (!len)
1423  break;
1424 
1425  pd += chunk;
1426  }
1427  return slot;
1428 }
1429 
1430 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1431 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1432 {
1433  struct emac_instance *dev = netdev_priv(ndev);
1434  int nr_frags = skb_shinfo(skb)->nr_frags;
1435  int len = skb->len, chunk;
1436  int slot, i;
1437  u16 ctrl;
1438  u32 pd;
1439 
1440  /* This is common "fast" path */
1441  if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1442  return emac_start_xmit(skb, ndev);
1443 
1444  len -= skb->data_len;
1445 
1446  /* Note, this is only an *estimation*, we can still run out of empty
1447  * slots because of the additional fragmentation into
1448  * MAL_MAX_TX_SIZE-sized chunks
1449  */
1450  if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1451  goto stop_queue;
1452 
1454  emac_tx_csum(dev, skb);
1455  slot = dev->tx_slot;
1456 
1457  /* skb data */
1458  dev->tx_skb[slot] = NULL;
1459  chunk = min(len, MAL_MAX_TX_SIZE);
1460  dev->tx_desc[slot].data_ptr = pd =
1461  dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1462  dev->tx_desc[slot].data_len = (u16) chunk;
1463  len -= chunk;
1464  if (unlikely(len))
1465  slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1466  ctrl);
1467  /* skb fragments */
1468  for (i = 0; i < nr_frags; ++i) {
1469  struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1470  len = skb_frag_size(frag);
1471 
1472  if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1473  goto undo_frame;
1474 
1475  pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1476  DMA_TO_DEVICE);
1477 
1478  slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1479  ctrl);
1480  }
1481 
1482  DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1483 
1484  /* Attach skb to the last slot so we don't release it too early */
1485  dev->tx_skb[slot] = skb;
1486 
1487  /* Send the packet out */
1488  if (dev->tx_slot == NUM_TX_BUFF - 1)
1489  ctrl |= MAL_TX_CTRL_WRAP;
1490  wmb();
1491  dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1492  dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1493 
1494  return emac_xmit_finish(dev, skb->len);
1495 
1496  undo_frame:
1497  /* Well, too bad. Our previous estimation was overly optimistic.
1498  * Undo everything.
1499  */
1500  while (slot != dev->tx_slot) {
1501  dev->tx_desc[slot].ctrl = 0;
1502  --dev->tx_cnt;
1503  if (--slot < 0)
1504  slot = NUM_TX_BUFF - 1;
1505  }
1506  ++dev->estats.tx_undo;
1507 
1508  stop_queue:
1509  netif_stop_queue(ndev);
1510  DBG2(dev, "stopped TX queue" NL);
1511  return NETDEV_TX_BUSY;
1512 }
1513 
1514 /* Tx lock BHs */
1515 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1516 {
1517  struct emac_error_stats *st = &dev->estats;
1518 
1519  DBG(dev, "BD TX error %04x" NL, ctrl);
1520 
1521  ++st->tx_bd_errors;
1522  if (ctrl & EMAC_TX_ST_BFCS)
1523  ++st->tx_bd_bad_fcs;
1524  if (ctrl & EMAC_TX_ST_LCS)
1525  ++st->tx_bd_carrier_loss;
1526  if (ctrl & EMAC_TX_ST_ED)
1528  if (ctrl & EMAC_TX_ST_EC)
1530  if (ctrl & EMAC_TX_ST_LC)
1531  ++st->tx_bd_late_collision;
1532  if (ctrl & EMAC_TX_ST_MC)
1534  if (ctrl & EMAC_TX_ST_SC)
1535  ++st->tx_bd_single_collision;
1536  if (ctrl & EMAC_TX_ST_UR)
1537  ++st->tx_bd_underrun;
1538  if (ctrl & EMAC_TX_ST_SQE)
1539  ++st->tx_bd_sqe;
1540 }
1541 
1542 static void emac_poll_tx(void *param)
1543 {
1544  struct emac_instance *dev = param;
1545  u32 bad_mask;
1546 
1547  DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1548 
1549  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1550  bad_mask = EMAC_IS_BAD_TX_TAH;
1551  else
1552  bad_mask = EMAC_IS_BAD_TX;
1553 
1554  netif_tx_lock_bh(dev->ndev);
1555  if (dev->tx_cnt) {
1556  u16 ctrl;
1557  int slot = dev->ack_slot, n = 0;
1558  again:
1559  ctrl = dev->tx_desc[slot].ctrl;
1560  if (!(ctrl & MAL_TX_CTRL_READY)) {
1561  struct sk_buff *skb = dev->tx_skb[slot];
1562  ++n;
1563 
1564  if (skb) {
1565  dev_kfree_skb(skb);
1566  dev->tx_skb[slot] = NULL;
1567  }
1568  slot = (slot + 1) % NUM_TX_BUFF;
1569 
1570  if (unlikely(ctrl & bad_mask))
1571  emac_parse_tx_error(dev, ctrl);
1572 
1573  if (--dev->tx_cnt)
1574  goto again;
1575  }
1576  if (n) {
1577  dev->ack_slot = slot;
1578  if (netif_queue_stopped(dev->ndev) &&
1580  netif_wake_queue(dev->ndev);
1581 
1582  DBG2(dev, "tx %d pkts" NL, n);
1583  }
1584  }
1585  netif_tx_unlock_bh(dev->ndev);
1586 }
1587 
1588 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1589  int len)
1590 {
1591  struct sk_buff *skb = dev->rx_skb[slot];
1592 
1593  DBG2(dev, "recycle %d %d" NL, slot, len);
1594 
1595  if (len)
1596  dma_map_single(&dev->ofdev->dev, skb->data - 2,
1597  EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1598 
1599  dev->rx_desc[slot].data_len = 0;
1600  wmb();
1601  dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1602  (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1603 }
1604 
1605 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1606 {
1607  struct emac_error_stats *st = &dev->estats;
1608 
1609  DBG(dev, "BD RX error %04x" NL, ctrl);
1610 
1611  ++st->rx_bd_errors;
1612  if (ctrl & EMAC_RX_ST_OE)
1613  ++st->rx_bd_overrun;
1614  if (ctrl & EMAC_RX_ST_BP)
1615  ++st->rx_bd_bad_packet;
1616  if (ctrl & EMAC_RX_ST_RP)
1617  ++st->rx_bd_runt_packet;
1618  if (ctrl & EMAC_RX_ST_SE)
1619  ++st->rx_bd_short_event;
1620  if (ctrl & EMAC_RX_ST_AE)
1621  ++st->rx_bd_alignment_error;
1622  if (ctrl & EMAC_RX_ST_BFCS)
1623  ++st->rx_bd_bad_fcs;
1624  if (ctrl & EMAC_RX_ST_PTL)
1625  ++st->rx_bd_packet_too_long;
1626  if (ctrl & EMAC_RX_ST_ORE)
1627  ++st->rx_bd_out_of_range;
1628  if (ctrl & EMAC_RX_ST_IRE)
1629  ++st->rx_bd_in_range;
1630 }
1631 
1632 static inline void emac_rx_csum(struct emac_instance *dev,
1633  struct sk_buff *skb, u16 ctrl)
1634 {
1635 #ifdef CONFIG_IBM_EMAC_TAH
1636  if (!ctrl && dev->tah_dev) {
1638  ++dev->stats.rx_packets_csum;
1639  }
1640 #endif
1641 }
1642 
1643 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1644 {
1645  if (likely(dev->rx_sg_skb != NULL)) {
1646  int len = dev->rx_desc[slot].data_len;
1647  int tot_len = dev->rx_sg_skb->len + len;
1648 
1649  if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1650  ++dev->estats.rx_dropped_mtu;
1651  dev_kfree_skb(dev->rx_sg_skb);
1652  dev->rx_sg_skb = NULL;
1653  } else {
1654  cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1655  dev->rx_skb[slot]->data, len);
1656  skb_put(dev->rx_sg_skb, len);
1657  emac_recycle_rx_skb(dev, slot, len);
1658  return 0;
1659  }
1660  }
1661  emac_recycle_rx_skb(dev, slot, 0);
1662  return -1;
1663 }
1664 
1665 /* NAPI poll context */
1666 static int emac_poll_rx(void *param, int budget)
1667 {
1668  struct emac_instance *dev = param;
1669  int slot = dev->rx_slot, received = 0;
1670 
1671  DBG2(dev, "poll_rx(%d)" NL, budget);
1672 
1673  again:
1674  while (budget > 0) {
1675  int len;
1676  struct sk_buff *skb;
1677  u16 ctrl = dev->rx_desc[slot].ctrl;
1678 
1679  if (ctrl & MAL_RX_CTRL_EMPTY)
1680  break;
1681 
1682  skb = dev->rx_skb[slot];
1683  mb();
1684  len = dev->rx_desc[slot].data_len;
1685 
1686  if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1687  goto sg;
1688 
1689  ctrl &= EMAC_BAD_RX_MASK;
1690  if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1691  emac_parse_rx_error(dev, ctrl);
1692  ++dev->estats.rx_dropped_error;
1693  emac_recycle_rx_skb(dev, slot, 0);
1694  len = 0;
1695  goto next;
1696  }
1697 
1698  if (len < ETH_HLEN) {
1699  ++dev->estats.rx_dropped_stack;
1700  emac_recycle_rx_skb(dev, slot, len);
1701  goto next;
1702  }
1703 
1704  if (len && len < EMAC_RX_COPY_THRESH) {
1705  struct sk_buff *copy_skb =
1706  alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1707  if (unlikely(!copy_skb))
1708  goto oom;
1709 
1710  skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1711  cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1712  len + 2);
1713  emac_recycle_rx_skb(dev, slot, len);
1714  skb = copy_skb;
1715  } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1716  goto oom;
1717 
1718  skb_put(skb, len);
1719  push_packet:
1720  skb->protocol = eth_type_trans(skb, dev->ndev);
1721  emac_rx_csum(dev, skb, ctrl);
1722 
1723  if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1724  ++dev->estats.rx_dropped_stack;
1725  next:
1726  ++dev->stats.rx_packets;
1727  skip:
1728  dev->stats.rx_bytes += len;
1729  slot = (slot + 1) % NUM_RX_BUFF;
1730  --budget;
1731  ++received;
1732  continue;
1733  sg:
1734  if (ctrl & MAL_RX_CTRL_FIRST) {
1735  BUG_ON(dev->rx_sg_skb);
1736  if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1737  DBG(dev, "rx OOM %d" NL, slot);
1738  ++dev->estats.rx_dropped_oom;
1739  emac_recycle_rx_skb(dev, slot, 0);
1740  } else {
1741  dev->rx_sg_skb = skb;
1742  skb_put(skb, len);
1743  }
1744  } else if (!emac_rx_sg_append(dev, slot) &&
1745  (ctrl & MAL_RX_CTRL_LAST)) {
1746 
1747  skb = dev->rx_sg_skb;
1748  dev->rx_sg_skb = NULL;
1749 
1750  ctrl &= EMAC_BAD_RX_MASK;
1751  if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1752  emac_parse_rx_error(dev, ctrl);
1753  ++dev->estats.rx_dropped_error;
1754  dev_kfree_skb(skb);
1755  len = 0;
1756  } else
1757  goto push_packet;
1758  }
1759  goto skip;
1760  oom:
1761  DBG(dev, "rx OOM %d" NL, slot);
1762  /* Drop the packet and recycle skb */
1763  ++dev->estats.rx_dropped_oom;
1764  emac_recycle_rx_skb(dev, slot, 0);
1765  goto next;
1766  }
1767 
1768  if (received) {
1769  DBG2(dev, "rx %d BDs" NL, received);
1770  dev->rx_slot = slot;
1771  }
1772 
1773  if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1774  mb();
1775  if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1776  DBG2(dev, "rx restart" NL);
1777  received = 0;
1778  goto again;
1779  }
1780 
1781  if (dev->rx_sg_skb) {
1782  DBG2(dev, "dropping partial rx packet" NL);
1783  ++dev->estats.rx_dropped_error;
1784  dev_kfree_skb(dev->rx_sg_skb);
1785  dev->rx_sg_skb = NULL;
1786  }
1787 
1788  clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1790  emac_rx_enable(dev);
1791  dev->rx_slot = 0;
1792  }
1793  return received;
1794 }
1795 
1796 /* NAPI poll context */
1797 static int emac_peek_rx(void *param)
1798 {
1799  struct emac_instance *dev = param;
1800 
1801  return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1802 }
1803 
1804 /* NAPI poll context */
1805 static int emac_peek_rx_sg(void *param)
1806 {
1807  struct emac_instance *dev = param;
1808 
1809  int slot = dev->rx_slot;
1810  while (1) {
1811  u16 ctrl = dev->rx_desc[slot].ctrl;
1812  if (ctrl & MAL_RX_CTRL_EMPTY)
1813  return 0;
1814  else if (ctrl & MAL_RX_CTRL_LAST)
1815  return 1;
1816 
1817  slot = (slot + 1) % NUM_RX_BUFF;
1818 
1819  /* I'm just being paranoid here :) */
1820  if (unlikely(slot == dev->rx_slot))
1821  return 0;
1822  }
1823 }
1824 
1825 /* Hard IRQ */
1826 static void emac_rxde(void *param)
1827 {
1828  struct emac_instance *dev = param;
1829 
1830  ++dev->estats.rx_stopped;
1831  emac_rx_disable_async(dev);
1832 }
1833 
1834 /* Hard IRQ */
1835 static irqreturn_t emac_irq(int irq, void *dev_instance)
1836 {
1837  struct emac_instance *dev = dev_instance;
1838  struct emac_regs __iomem *p = dev->emacp;
1839  struct emac_error_stats *st = &dev->estats;
1840  u32 isr;
1841 
1842  spin_lock(&dev->lock);
1843 
1844  isr = in_be32(&p->isr);
1845  out_be32(&p->isr, isr);
1846 
1847  DBG(dev, "isr = %08x" NL, isr);
1848 
1849  if (isr & EMAC4_ISR_TXPE)
1850  ++st->tx_parity;
1851  if (isr & EMAC4_ISR_RXPE)
1852  ++st->rx_parity;
1853  if (isr & EMAC4_ISR_TXUE)
1854  ++st->tx_underrun;
1855  if (isr & EMAC4_ISR_RXOE)
1856  ++st->rx_fifo_overrun;
1857  if (isr & EMAC_ISR_OVR)
1858  ++st->rx_overrun;
1859  if (isr & EMAC_ISR_BP)
1860  ++st->rx_bad_packet;
1861  if (isr & EMAC_ISR_RP)
1862  ++st->rx_runt_packet;
1863  if (isr & EMAC_ISR_SE)
1864  ++st->rx_short_event;
1865  if (isr & EMAC_ISR_ALE)
1866  ++st->rx_alignment_error;
1867  if (isr & EMAC_ISR_BFCS)
1868  ++st->rx_bad_fcs;
1869  if (isr & EMAC_ISR_PTLE)
1870  ++st->rx_packet_too_long;
1871  if (isr & EMAC_ISR_ORE)
1872  ++st->rx_out_of_range;
1873  if (isr & EMAC_ISR_IRE)
1874  ++st->rx_in_range;
1875  if (isr & EMAC_ISR_SQE)
1876  ++st->tx_sqe;
1877  if (isr & EMAC_ISR_TE)
1878  ++st->tx_errors;
1879 
1880  spin_unlock(&dev->lock);
1881 
1882  return IRQ_HANDLED;
1883 }
1884 
1885 static struct net_device_stats *emac_stats(struct net_device *ndev)
1886 {
1887  struct emac_instance *dev = netdev_priv(ndev);
1888  struct emac_stats *st = &dev->stats;
1889  struct emac_error_stats *est = &dev->estats;
1890  struct net_device_stats *nst = &dev->nstats;
1891  unsigned long flags;
1892 
1893  DBG2(dev, "stats" NL);
1894 
1895  /* Compute "legacy" statistics */
1896  spin_lock_irqsave(&dev->lock, flags);
1897  nst->rx_packets = (unsigned long)st->rx_packets;
1898  nst->rx_bytes = (unsigned long)st->rx_bytes;
1899  nst->tx_packets = (unsigned long)st->tx_packets;
1900  nst->tx_bytes = (unsigned long)st->tx_bytes;
1901  nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1902  est->rx_dropped_error +
1903  est->rx_dropped_resize +
1904  est->rx_dropped_mtu);
1905  nst->tx_dropped = (unsigned long)est->tx_dropped;
1906 
1907  nst->rx_errors = (unsigned long)est->rx_bd_errors;
1908  nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1909  est->rx_fifo_overrun +
1910  est->rx_overrun);
1911  nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1912  est->rx_alignment_error);
1913  nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1914  est->rx_bad_fcs);
1915  nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1916  est->rx_bd_short_event +
1917  est->rx_bd_packet_too_long +
1918  est->rx_bd_out_of_range +
1919  est->rx_bd_in_range +
1920  est->rx_runt_packet +
1921  est->rx_short_event +
1922  est->rx_packet_too_long +
1923  est->rx_out_of_range +
1924  est->rx_in_range);
1925 
1926  nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1927  nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1928  est->tx_underrun);
1929  nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1930  nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1932  est->tx_bd_late_collision +
1934  spin_unlock_irqrestore(&dev->lock, flags);
1935  return nst;
1936 }
1937 
1938 static struct mal_commac_ops emac_commac_ops = {
1939  .poll_tx = &emac_poll_tx,
1940  .poll_rx = &emac_poll_rx,
1941  .peek_rx = &emac_peek_rx,
1942  .rxde = &emac_rxde,
1943 };
1944 
1945 static struct mal_commac_ops emac_commac_sg_ops = {
1946  .poll_tx = &emac_poll_tx,
1947  .poll_rx = &emac_poll_rx,
1948  .peek_rx = &emac_peek_rx_sg,
1949  .rxde = &emac_rxde,
1950 };
1951 
1952 /* Ethtool support */
1953 static int emac_ethtool_get_settings(struct net_device *ndev,
1954  struct ethtool_cmd *cmd)
1955 {
1956  struct emac_instance *dev = netdev_priv(ndev);
1957 
1958  cmd->supported = dev->phy.features;
1959  cmd->port = PORT_MII;
1960  cmd->phy_address = dev->phy.address;
1961  cmd->transceiver =
1962  dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1963 
1964  mutex_lock(&dev->link_lock);
1965  cmd->advertising = dev->phy.advertising;
1966  cmd->autoneg = dev->phy.autoneg;
1967  cmd->speed = dev->phy.speed;
1968  cmd->duplex = dev->phy.duplex;
1969  mutex_unlock(&dev->link_lock);
1970 
1971  return 0;
1972 }
1973 
1974 static int emac_ethtool_set_settings(struct net_device *ndev,
1975  struct ethtool_cmd *cmd)
1976 {
1977  struct emac_instance *dev = netdev_priv(ndev);
1978  u32 f = dev->phy.features;
1979 
1980  DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1981  cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1982 
1983  /* Basic sanity checks */
1984  if (dev->phy.address < 0)
1985  return -EOPNOTSUPP;
1986  if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1987  return -EINVAL;
1988  if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1989  return -EINVAL;
1990  if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1991  return -EINVAL;
1992 
1993  if (cmd->autoneg == AUTONEG_DISABLE) {
1994  switch (cmd->speed) {
1995  case SPEED_10:
1996  if (cmd->duplex == DUPLEX_HALF &&
1997  !(f & SUPPORTED_10baseT_Half))
1998  return -EINVAL;
1999  if (cmd->duplex == DUPLEX_FULL &&
2000  !(f & SUPPORTED_10baseT_Full))
2001  return -EINVAL;
2002  break;
2003  case SPEED_100:
2004  if (cmd->duplex == DUPLEX_HALF &&
2005  !(f & SUPPORTED_100baseT_Half))
2006  return -EINVAL;
2007  if (cmd->duplex == DUPLEX_FULL &&
2008  !(f & SUPPORTED_100baseT_Full))
2009  return -EINVAL;
2010  break;
2011  case SPEED_1000:
2012  if (cmd->duplex == DUPLEX_HALF &&
2013  !(f & SUPPORTED_1000baseT_Half))
2014  return -EINVAL;
2015  if (cmd->duplex == DUPLEX_FULL &&
2016  !(f & SUPPORTED_1000baseT_Full))
2017  return -EINVAL;
2018  break;
2019  default:
2020  return -EINVAL;
2021  }
2022 
2023  mutex_lock(&dev->link_lock);
2024  dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2025  cmd->duplex);
2026  mutex_unlock(&dev->link_lock);
2027 
2028  } else {
2029  if (!(f & SUPPORTED_Autoneg))
2030  return -EINVAL;
2031 
2032  mutex_lock(&dev->link_lock);
2033  dev->phy.def->ops->setup_aneg(&dev->phy,
2034  (cmd->advertising & f) |
2035  (dev->phy.advertising &
2036  (ADVERTISED_Pause |
2038  mutex_unlock(&dev->link_lock);
2039  }
2040  emac_force_link_update(dev);
2041 
2042  return 0;
2043 }
2044 
2045 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2046  struct ethtool_ringparam *rp)
2047 {
2050 }
2051 
2052 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2053  struct ethtool_pauseparam *pp)
2054 {
2055  struct emac_instance *dev = netdev_priv(ndev);
2056 
2057  mutex_lock(&dev->link_lock);
2058  if ((dev->phy.features & SUPPORTED_Autoneg) &&
2059  (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2060  pp->autoneg = 1;
2061 
2062  if (dev->phy.duplex == DUPLEX_FULL) {
2063  if (dev->phy.pause)
2064  pp->rx_pause = pp->tx_pause = 1;
2065  else if (dev->phy.asym_pause)
2066  pp->tx_pause = 1;
2067  }
2068  mutex_unlock(&dev->link_lock);
2069 }
2070 
2071 static int emac_get_regs_len(struct emac_instance *dev)
2072 {
2073  if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2074  return sizeof(struct emac_ethtool_regs_subhdr) +
2076  else
2077  return sizeof(struct emac_ethtool_regs_subhdr) +
2079 }
2080 
2081 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2082 {
2083  struct emac_instance *dev = netdev_priv(ndev);
2084  int size;
2085 
2086  size = sizeof(struct emac_ethtool_regs_hdr) +
2087  emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2088  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2089  size += zmii_get_regs_len(dev->zmii_dev);
2090  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2091  size += rgmii_get_regs_len(dev->rgmii_dev);
2092  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2093  size += tah_get_regs_len(dev->tah_dev);
2094 
2095  return size;
2096 }
2097 
2098 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2099 {
2100  struct emac_ethtool_regs_subhdr *hdr = buf;
2101 
2102  hdr->index = dev->cell_index;
2103  if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2105  memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2106  return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2107  } else {
2109  memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2110  return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2111  }
2112 }
2113 
2114 static void emac_ethtool_get_regs(struct net_device *ndev,
2115  struct ethtool_regs *regs, void *buf)
2116 {
2117  struct emac_instance *dev = netdev_priv(ndev);
2118  struct emac_ethtool_regs_hdr *hdr = buf;
2119 
2120  hdr->components = 0;
2121  buf = hdr + 1;
2122 
2123  buf = mal_dump_regs(dev->mal, buf);
2124  buf = emac_dump_regs(dev, buf);
2125  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2127  buf = zmii_dump_regs(dev->zmii_dev, buf);
2128  }
2129  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2131  buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2132  }
2133  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2135  buf = tah_dump_regs(dev->tah_dev, buf);
2136  }
2137 }
2138 
2139 static int emac_ethtool_nway_reset(struct net_device *ndev)
2140 {
2141  struct emac_instance *dev = netdev_priv(ndev);
2142  int res = 0;
2143 
2144  DBG(dev, "nway_reset" NL);
2145 
2146  if (dev->phy.address < 0)
2147  return -EOPNOTSUPP;
2148 
2149  mutex_lock(&dev->link_lock);
2150  if (!dev->phy.autoneg) {
2151  res = -EINVAL;
2152  goto out;
2153  }
2154 
2155  dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2156  out:
2157  mutex_unlock(&dev->link_lock);
2158  emac_force_link_update(dev);
2159  return res;
2160 }
2161 
2162 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2163 {
2164  if (stringset == ETH_SS_STATS)
2165  return EMAC_ETHTOOL_STATS_COUNT;
2166  else
2167  return -EINVAL;
2168 }
2169 
2170 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2171  u8 * buf)
2172 {
2173  if (stringset == ETH_SS_STATS)
2174  memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2175 }
2176 
2177 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2178  struct ethtool_stats *estats,
2179  u64 * tmp_stats)
2180 {
2181  struct emac_instance *dev = netdev_priv(ndev);
2182 
2183  memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2184  tmp_stats += sizeof(dev->stats) / sizeof(u64);
2185  memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2186 }
2187 
2188 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2189  struct ethtool_drvinfo *info)
2190 {
2191  struct emac_instance *dev = netdev_priv(ndev);
2192 
2193  strcpy(info->driver, "ibm_emac");
2194  strcpy(info->version, DRV_VERSION);
2195  info->fw_version[0] = '\0';
2196  sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2197  dev->cell_index, dev->ofdev->dev.of_node->full_name);
2198  info->regdump_len = emac_ethtool_get_regs_len(ndev);
2199 }
2200 
2201 static const struct ethtool_ops emac_ethtool_ops = {
2202  .get_settings = emac_ethtool_get_settings,
2203  .set_settings = emac_ethtool_set_settings,
2204  .get_drvinfo = emac_ethtool_get_drvinfo,
2205 
2206  .get_regs_len = emac_ethtool_get_regs_len,
2207  .get_regs = emac_ethtool_get_regs,
2208 
2209  .nway_reset = emac_ethtool_nway_reset,
2210 
2211  .get_ringparam = emac_ethtool_get_ringparam,
2212  .get_pauseparam = emac_ethtool_get_pauseparam,
2213 
2214  .get_strings = emac_ethtool_get_strings,
2215  .get_sset_count = emac_ethtool_get_sset_count,
2216  .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2217 
2218  .get_link = ethtool_op_get_link,
2219 };
2220 
2221 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2222 {
2223  struct emac_instance *dev = netdev_priv(ndev);
2224  struct mii_ioctl_data *data = if_mii(rq);
2225 
2226  DBG(dev, "ioctl %08x" NL, cmd);
2227 
2228  if (dev->phy.address < 0)
2229  return -EOPNOTSUPP;
2230 
2231  switch (cmd) {
2232  case SIOCGMIIPHY:
2233  data->phy_id = dev->phy.address;
2234  /* Fall through */
2235  case SIOCGMIIREG:
2236  data->val_out = emac_mdio_read(ndev, dev->phy.address,
2237  data->reg_num);
2238  return 0;
2239 
2240  case SIOCSMIIREG:
2241  emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2242  data->val_in);
2243  return 0;
2244  default:
2245  return -EOPNOTSUPP;
2246  }
2247 }
2248 
2253  void *drvdata;
2254 };
2255 
2256 #define EMAC_DEP_MAL_IDX 0
2257 #define EMAC_DEP_ZMII_IDX 1
2258 #define EMAC_DEP_RGMII_IDX 2
2259 #define EMAC_DEP_TAH_IDX 3
2260 #define EMAC_DEP_MDIO_IDX 4
2261 #define EMAC_DEP_PREV_IDX 5
2262 #define EMAC_DEP_COUNT 6
2263 
2264 static int __devinit emac_check_deps(struct emac_instance *dev,
2265  struct emac_depentry *deps)
2266 {
2267  int i, there = 0;
2268  struct device_node *np;
2269 
2270  for (i = 0; i < EMAC_DEP_COUNT; i++) {
2271  /* no dependency on that item, allright */
2272  if (deps[i].phandle == 0) {
2273  there++;
2274  continue;
2275  }
2276  /* special case for blist as the dependency might go away */
2277  if (i == EMAC_DEP_PREV_IDX) {
2278  np = *(dev->blist - 1);
2279  if (np == NULL) {
2280  deps[i].phandle = 0;
2281  there++;
2282  continue;
2283  }
2284  if (deps[i].node == NULL)
2285  deps[i].node = of_node_get(np);
2286  }
2287  if (deps[i].node == NULL)
2288  deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2289  if (deps[i].node == NULL)
2290  continue;
2291  if (deps[i].ofdev == NULL)
2292  deps[i].ofdev = of_find_device_by_node(deps[i].node);
2293  if (deps[i].ofdev == NULL)
2294  continue;
2295  if (deps[i].drvdata == NULL)
2296  deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2297  if (deps[i].drvdata != NULL)
2298  there++;
2299  }
2300  return there == EMAC_DEP_COUNT;
2301 }
2302 
2303 static void emac_put_deps(struct emac_instance *dev)
2304 {
2305  if (dev->mal_dev)
2306  of_dev_put(dev->mal_dev);
2307  if (dev->zmii_dev)
2308  of_dev_put(dev->zmii_dev);
2309  if (dev->rgmii_dev)
2310  of_dev_put(dev->rgmii_dev);
2311  if (dev->mdio_dev)
2312  of_dev_put(dev->mdio_dev);
2313  if (dev->tah_dev)
2314  of_dev_put(dev->tah_dev);
2315 }
2316 
2317 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2318  unsigned long action, void *data)
2319 {
2320  /* We are only intereted in device addition */
2321  if (action == BUS_NOTIFY_BOUND_DRIVER)
2322  wake_up_all(&emac_probe_wait);
2323  return 0;
2324 }
2325 
2326 static struct notifier_block emac_of_bus_notifier __devinitdata = {
2327  .notifier_call = emac_of_bus_notify
2328 };
2329 
2330 static int __devinit emac_wait_deps(struct emac_instance *dev)
2331 {
2332  struct emac_depentry deps[EMAC_DEP_COUNT];
2333  int i, err;
2334 
2335  memset(&deps, 0, sizeof(deps));
2336 
2337  deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2338  deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2339  deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2340  if (dev->tah_ph)
2341  deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2342  if (dev->mdio_ph)
2343  deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2344  if (dev->blist && dev->blist > emac_boot_list)
2345  deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2346  bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2347  wait_event_timeout(emac_probe_wait,
2348  emac_check_deps(dev, deps),
2350  bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2351  err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2352  for (i = 0; i < EMAC_DEP_COUNT; i++) {
2353  if (deps[i].node)
2354  of_node_put(deps[i].node);
2355  if (err && deps[i].ofdev)
2356  of_dev_put(deps[i].ofdev);
2357  }
2358  if (err == 0) {
2359  dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2360  dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2361  dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2362  dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2363  dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2364  }
2365  if (deps[EMAC_DEP_PREV_IDX].ofdev)
2366  of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2367  return err;
2368 }
2369 
2370 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2371  u32 *val, int fatal)
2372 {
2373  int len;
2374  const u32 *prop = of_get_property(np, name, &len);
2375  if (prop == NULL || len < sizeof(u32)) {
2376  if (fatal)
2377  printk(KERN_ERR "%s: missing %s property\n",
2378  np->full_name, name);
2379  return -ENODEV;
2380  }
2381  *val = *prop;
2382  return 0;
2383 }
2384 
2385 static int __devinit emac_init_phy(struct emac_instance *dev)
2386 {
2387  struct device_node *np = dev->ofdev->dev.of_node;
2388  struct net_device *ndev = dev->ndev;
2389  u32 phy_map, adv;
2390  int i;
2391 
2392  dev->phy.dev = ndev;
2393  dev->phy.mode = dev->phy_mode;
2394 
2395  /* PHY-less configuration.
2396  * XXX I probably should move these settings to the dev tree
2397  */
2398  if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2399  emac_reset(dev);
2400 
2401  /* PHY-less configuration.
2402  * XXX I probably should move these settings to the dev tree
2403  */
2404  dev->phy.address = -1;
2405  dev->phy.features = SUPPORTED_MII;
2406  if (emac_phy_supports_gige(dev->phy_mode))
2407  dev->phy.features |= SUPPORTED_1000baseT_Full;
2408  else
2409  dev->phy.features |= SUPPORTED_100baseT_Full;
2410  dev->phy.pause = 1;
2411 
2412  return 0;
2413  }
2414 
2415  mutex_lock(&emac_phy_map_lock);
2416  phy_map = dev->phy_map | busy_phy_map;
2417 
2418  DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2419 
2420  dev->phy.mdio_read = emac_mdio_read;
2421  dev->phy.mdio_write = emac_mdio_write;
2422 
2423  /* Enable internal clock source */
2424 #ifdef CONFIG_PPC_DCR_NATIVE
2425  if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2426  dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2427 #endif
2428  /* PHY clock workaround */
2429  emac_rx_clk_tx(dev);
2430 
2431  /* Enable internal clock source on 440GX*/
2432 #ifdef CONFIG_PPC_DCR_NATIVE
2433  if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2434  dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2435 #endif
2436  /* Configure EMAC with defaults so we can at least use MDIO
2437  * This is needed mostly for 440GX
2438  */
2439  if (emac_phy_gpcs(dev->phy.mode)) {
2440  /* XXX
2441  * Make GPCS PHY address equal to EMAC index.
2442  * We probably should take into account busy_phy_map
2443  * and/or phy_map here.
2444  *
2445  * Note that the busy_phy_map is currently global
2446  * while it should probably be per-ASIC...
2447  */
2448  dev->phy.gpcs_address = dev->gpcs_address;
2449  if (dev->phy.gpcs_address == 0xffffffff)
2450  dev->phy.address = dev->cell_index;
2451  }
2452 
2453  emac_configure(dev);
2454 
2455  if (dev->phy_address != 0xffffffff)
2456  phy_map = ~(1 << dev->phy_address);
2457 
2458  for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2459  if (!(phy_map & 1)) {
2460  int r;
2461  busy_phy_map |= 1 << i;
2462 
2463  /* Quick check if there is a PHY at the address */
2464  r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2465  if (r == 0xffff || r < 0)
2466  continue;
2467  if (!emac_mii_phy_probe(&dev->phy, i))
2468  break;
2469  }
2470 
2471  /* Enable external clock source */
2472 #ifdef CONFIG_PPC_DCR_NATIVE
2473  if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2474  dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2475 #endif
2476  mutex_unlock(&emac_phy_map_lock);
2477  if (i == 0x20) {
2478  printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2479  return -ENXIO;
2480  }
2481 
2482  /* Init PHY */
2483  if (dev->phy.def->ops->init)
2484  dev->phy.def->ops->init(&dev->phy);
2485 
2486  /* Disable any PHY features not supported by the platform */
2487  dev->phy.def->features &= ~dev->phy_feat_exc;
2488  dev->phy.features &= ~dev->phy_feat_exc;
2489 
2490  /* Setup initial link parameters */
2491  if (dev->phy.features & SUPPORTED_Autoneg) {
2492  adv = dev->phy.features;
2493  if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2495  /* Restart autonegotiation */
2496  dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2497  } else {
2498  u32 f = dev->phy.def->features;
2499  int speed = SPEED_10, fd = DUPLEX_HALF;
2500 
2501  /* Select highest supported speed/duplex */
2502  if (f & SUPPORTED_1000baseT_Full) {
2503  speed = SPEED_1000;
2504  fd = DUPLEX_FULL;
2505  } else if (f & SUPPORTED_1000baseT_Half)
2506  speed = SPEED_1000;
2507  else if (f & SUPPORTED_100baseT_Full) {
2508  speed = SPEED_100;
2509  fd = DUPLEX_FULL;
2510  } else if (f & SUPPORTED_100baseT_Half)
2511  speed = SPEED_100;
2512  else if (f & SUPPORTED_10baseT_Full)
2513  fd = DUPLEX_FULL;
2514 
2515  /* Force link parameters */
2516  dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2517  }
2518  return 0;
2519 }
2520 
2521 static int __devinit emac_init_config(struct emac_instance *dev)
2522 {
2523  struct device_node *np = dev->ofdev->dev.of_node;
2524  const void *p;
2525 
2526  /* Read config from device-tree */
2527  if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2528  return -ENXIO;
2529  if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2530  return -ENXIO;
2531  if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2532  return -ENXIO;
2533  if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2534  return -ENXIO;
2535  if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2536  dev->max_mtu = 1500;
2537  if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2538  dev->rx_fifo_size = 2048;
2539  if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2540  dev->tx_fifo_size = 2048;
2541  if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2542  dev->rx_fifo_size_gige = dev->rx_fifo_size;
2543  if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2544  dev->tx_fifo_size_gige = dev->tx_fifo_size;
2545  if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2546  dev->phy_address = 0xffffffff;
2547  if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2548  dev->phy_map = 0xffffffff;
2549  if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2550  dev->gpcs_address = 0xffffffff;
2551  if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2552  return -ENXIO;
2553  if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2554  dev->tah_ph = 0;
2555  if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2556  dev->tah_port = 0;
2557  if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2558  dev->mdio_ph = 0;
2559  if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2560  dev->zmii_ph = 0;
2561  if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2562  dev->zmii_port = 0xffffffff;
2563  if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2564  dev->rgmii_ph = 0;
2565  if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2566  dev->rgmii_port = 0xffffffff;
2567  if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2568  dev->fifo_entry_size = 16;
2569  if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2570  dev->mal_burst_size = 256;
2571 
2572  /* PHY mode needs some decoding */
2573  dev->phy_mode = of_get_phy_mode(np);
2574  if (dev->phy_mode < 0)
2575  dev->phy_mode = PHY_MODE_NA;
2576 
2577  /* Check EMAC version */
2578  if (of_device_is_compatible(np, "ibm,emac4sync")) {
2580  if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2581  of_device_is_compatible(np, "ibm,emac-460gt"))
2583  if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2584  of_device_is_compatible(np, "ibm,emac-405exr"))
2586  if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2590  }
2591  } else if (of_device_is_compatible(np, "ibm,emac4")) {
2592  dev->features |= EMAC_FTR_EMAC4;
2593  if (of_device_is_compatible(np, "ibm,emac-440gx"))
2595  } else {
2596  if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2597  of_device_is_compatible(np, "ibm,emac-440gr"))
2599  if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2600 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2602 #else
2603  printk(KERN_ERR "%s: Flow control not disabled!\n",
2604  np->full_name);
2605  return -ENXIO;
2606 #endif
2607  }
2608 
2609  }
2610 
2611  /* Fixup some feature bits based on the device tree */
2612  if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2614  if (of_get_property(np, "has-new-stacr-staopc", NULL))
2616 
2617  /* CAB lacks the appropriate properties */
2618  if (of_device_is_compatible(np, "ibm,emac-axon"))
2621 
2622  /* Enable TAH/ZMII/RGMII features as found */
2623  if (dev->tah_ph != 0) {
2624 #ifdef CONFIG_IBM_EMAC_TAH
2625  dev->features |= EMAC_FTR_HAS_TAH;
2626 #else
2627  printk(KERN_ERR "%s: TAH support not enabled !\n",
2628  np->full_name);
2629  return -ENXIO;
2630 #endif
2631  }
2632 
2633  if (dev->zmii_ph != 0) {
2634 #ifdef CONFIG_IBM_EMAC_ZMII
2635  dev->features |= EMAC_FTR_HAS_ZMII;
2636 #else
2637  printk(KERN_ERR "%s: ZMII support not enabled !\n",
2638  np->full_name);
2639  return -ENXIO;
2640 #endif
2641  }
2642 
2643  if (dev->rgmii_ph != 0) {
2644 #ifdef CONFIG_IBM_EMAC_RGMII
2645  dev->features |= EMAC_FTR_HAS_RGMII;
2646 #else
2647  printk(KERN_ERR "%s: RGMII support not enabled !\n",
2648  np->full_name);
2649  return -ENXIO;
2650 #endif
2651  }
2652 
2653  /* Read MAC-address */
2654  p = of_get_property(np, "local-mac-address", NULL);
2655  if (p == NULL) {
2656  printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2657  np->full_name);
2658  return -ENXIO;
2659  }
2660  memcpy(dev->ndev->dev_addr, p, 6);
2661 
2662  /* IAHT and GAHT filter parameterization */
2663  if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2666  } else {
2669  }
2670 
2671  DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2672  DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2673  DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2674  DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2675  DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2676 
2677  return 0;
2678 }
2679 
2680 static const struct net_device_ops emac_netdev_ops = {
2681  .ndo_open = emac_open,
2682  .ndo_stop = emac_close,
2683  .ndo_get_stats = emac_stats,
2684  .ndo_set_rx_mode = emac_set_multicast_list,
2685  .ndo_do_ioctl = emac_ioctl,
2686  .ndo_tx_timeout = emac_tx_timeout,
2687  .ndo_validate_addr = eth_validate_addr,
2688  .ndo_set_mac_address = eth_mac_addr,
2689  .ndo_start_xmit = emac_start_xmit,
2690  .ndo_change_mtu = eth_change_mtu,
2691 };
2692 
2693 static const struct net_device_ops emac_gige_netdev_ops = {
2694  .ndo_open = emac_open,
2695  .ndo_stop = emac_close,
2696  .ndo_get_stats = emac_stats,
2697  .ndo_set_rx_mode = emac_set_multicast_list,
2698  .ndo_do_ioctl = emac_ioctl,
2699  .ndo_tx_timeout = emac_tx_timeout,
2700  .ndo_validate_addr = eth_validate_addr,
2701  .ndo_set_mac_address = eth_mac_addr,
2702  .ndo_start_xmit = emac_start_xmit_sg,
2703  .ndo_change_mtu = emac_change_mtu,
2704 };
2705 
2706 static int __devinit emac_probe(struct platform_device *ofdev)
2707 {
2708  struct net_device *ndev;
2709  struct emac_instance *dev;
2710  struct device_node *np = ofdev->dev.of_node;
2711  struct device_node **blist = NULL;
2712  int err, i;
2713 
2714  /* Skip unused/unwired EMACS. We leave the check for an unused
2715  * property here for now, but new flat device trees should set a
2716  * status property to "disabled" instead.
2717  */
2718  if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2719  return -ENODEV;
2720 
2721  /* Find ourselves in the bootlist if we are there */
2722  for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2723  if (emac_boot_list[i] == np)
2724  blist = &emac_boot_list[i];
2725 
2726  /* Allocate our net_device structure */
2727  err = -ENOMEM;
2728  ndev = alloc_etherdev(sizeof(struct emac_instance));
2729  if (!ndev)
2730  goto err_gone;
2731 
2732  dev = netdev_priv(ndev);
2733  dev->ndev = ndev;
2734  dev->ofdev = ofdev;
2735  dev->blist = blist;
2736  SET_NETDEV_DEV(ndev, &ofdev->dev);
2737 
2738  /* Initialize some embedded data structures */
2739  mutex_init(&dev->mdio_lock);
2740  mutex_init(&dev->link_lock);
2741  spin_lock_init(&dev->lock);
2742  INIT_WORK(&dev->reset_work, emac_reset_work);
2743 
2744  /* Init various config data based on device-tree */
2745  err = emac_init_config(dev);
2746  if (err != 0)
2747  goto err_free;
2748 
2749  /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2750  dev->emac_irq = irq_of_parse_and_map(np, 0);
2751  dev->wol_irq = irq_of_parse_and_map(np, 1);
2752  if (dev->emac_irq == NO_IRQ) {
2753  printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2754  goto err_free;
2755  }
2756  ndev->irq = dev->emac_irq;
2757 
2758  /* Map EMAC regs */
2759  if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2760  printk(KERN_ERR "%s: Can't get registers address\n",
2761  np->full_name);
2762  goto err_irq_unmap;
2763  }
2764  // TODO : request_mem_region
2765  dev->emacp = ioremap(dev->rsrc_regs.start,
2766  resource_size(&dev->rsrc_regs));
2767  if (dev->emacp == NULL) {
2768  printk(KERN_ERR "%s: Can't map device registers!\n",
2769  np->full_name);
2770  err = -ENOMEM;
2771  goto err_irq_unmap;
2772  }
2773 
2774  /* Wait for dependent devices */
2775  err = emac_wait_deps(dev);
2776  if (err) {
2778  "%s: Timeout waiting for dependent devices\n",
2779  np->full_name);
2780  /* display more info about what's missing ? */
2781  goto err_reg_unmap;
2782  }
2783  dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2784  if (dev->mdio_dev != NULL)
2785  dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2786 
2787  /* Register with MAL */
2788  dev->commac.ops = &emac_commac_ops;
2789  dev->commac.dev = dev;
2790  dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2791  dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2792  err = mal_register_commac(dev->mal, &dev->commac);
2793  if (err) {
2794  printk(KERN_ERR "%s: failed to register with mal %s!\n",
2795  np->full_name, dev->mal_dev->dev.of_node->full_name);
2796  goto err_rel_deps;
2797  }
2798  dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2799  dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2800 
2801  /* Get pointers to BD rings */
2802  dev->tx_desc =
2803  dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2804  dev->rx_desc =
2805  dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2806 
2807  DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2808  DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2809 
2810  /* Clean rings */
2811  memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2812  memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2813  memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2814  memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2815 
2816  /* Attach to ZMII, if needed */
2817  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2818  (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2819  goto err_unreg_commac;
2820 
2821  /* Attach to RGMII, if needed */
2822  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2823  (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2824  goto err_detach_zmii;
2825 
2826  /* Attach to TAH, if needed */
2827  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2828  (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2829  goto err_detach_rgmii;
2830 
2831  /* Set some link defaults before we can find out real parameters */
2832  dev->phy.speed = SPEED_100;
2833  dev->phy.duplex = DUPLEX_FULL;
2834  dev->phy.autoneg = AUTONEG_DISABLE;
2835  dev->phy.pause = dev->phy.asym_pause = 0;
2837  INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2838 
2839  /* Some SoCs like APM821xx does not support Half Duplex mode. */
2840  if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
2841  dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
2842  SUPPORTED_100baseT_Half |
2844  }
2845 
2846  /* Find PHY if any */
2847  err = emac_init_phy(dev);
2848  if (err != 0)
2849  goto err_detach_tah;
2850 
2851  if (dev->tah_dev) {
2853  ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2854  }
2855  ndev->watchdog_timeo = 5 * HZ;
2856  if (emac_phy_supports_gige(dev->phy_mode)) {
2857  ndev->netdev_ops = &emac_gige_netdev_ops;
2858  dev->commac.ops = &emac_commac_sg_ops;
2859  } else
2860  ndev->netdev_ops = &emac_netdev_ops;
2861  SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2862 
2863  netif_carrier_off(ndev);
2864 
2865  err = register_netdev(ndev);
2866  if (err) {
2867  printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2868  np->full_name, err);
2869  goto err_detach_tah;
2870  }
2871 
2872  /* Set our drvdata last as we don't want them visible until we are
2873  * fully initialized
2874  */
2875  wmb();
2876  dev_set_drvdata(&ofdev->dev, dev);
2877 
2878  /* There's a new kid in town ! Let's tell everybody */
2879  wake_up_all(&emac_probe_wait);
2880 
2881 
2882  printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2883  ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2884 
2885  if (dev->phy_mode == PHY_MODE_SGMII)
2886  printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2887 
2888  if (dev->phy.address >= 0)
2889  printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2890  dev->phy.def->name, dev->phy.address);
2891 
2892  emac_dbg_register(dev);
2893 
2894  /* Life is good */
2895  return 0;
2896 
2897  /* I have a bad feeling about this ... */
2898 
2899  err_detach_tah:
2900  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2901  tah_detach(dev->tah_dev, dev->tah_port);
2902  err_detach_rgmii:
2903  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2904  rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2905  err_detach_zmii:
2906  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2907  zmii_detach(dev->zmii_dev, dev->zmii_port);
2908  err_unreg_commac:
2909  mal_unregister_commac(dev->mal, &dev->commac);
2910  err_rel_deps:
2911  emac_put_deps(dev);
2912  err_reg_unmap:
2913  iounmap(dev->emacp);
2914  err_irq_unmap:
2915  if (dev->wol_irq != NO_IRQ)
2917  if (dev->emac_irq != NO_IRQ)
2919  err_free:
2920  free_netdev(ndev);
2921  err_gone:
2922  /* if we were on the bootlist, remove us as we won't show up and
2923  * wake up all waiters to notify them in case they were waiting
2924  * on us
2925  */
2926  if (blist) {
2927  *blist = NULL;
2928  wake_up_all(&emac_probe_wait);
2929  }
2930  return err;
2931 }
2932 
2933 static int __devexit emac_remove(struct platform_device *ofdev)
2934 {
2935  struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2936 
2937  DBG(dev, "remove" NL);
2938 
2939  dev_set_drvdata(&ofdev->dev, NULL);
2940 
2941  unregister_netdev(dev->ndev);
2942 
2944 
2945  if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2946  tah_detach(dev->tah_dev, dev->tah_port);
2947  if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2948  rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2949  if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2950  zmii_detach(dev->zmii_dev, dev->zmii_port);
2951 
2952  busy_phy_map &= ~(1 << dev->phy.address);
2953  DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
2954 
2955  mal_unregister_commac(dev->mal, &dev->commac);
2956  emac_put_deps(dev);
2957 
2958  emac_dbg_unregister(dev);
2959  iounmap(dev->emacp);
2960 
2961  if (dev->wol_irq != NO_IRQ)
2963  if (dev->emac_irq != NO_IRQ)
2965 
2966  free_netdev(dev->ndev);
2967 
2968  return 0;
2969 }
2970 
2971 /* XXX Features in here should be replaced by properties... */
2972 static struct of_device_id emac_match[] =
2973 {
2974  {
2975  .type = "network",
2976  .compatible = "ibm,emac",
2977  },
2978  {
2979  .type = "network",
2980  .compatible = "ibm,emac4",
2981  },
2982  {
2983  .type = "network",
2984  .compatible = "ibm,emac4sync",
2985  },
2986  {},
2987 };
2988 MODULE_DEVICE_TABLE(of, emac_match);
2989 
2990 static struct platform_driver emac_driver = {
2991  .driver = {
2992  .name = "emac",
2993  .owner = THIS_MODULE,
2994  .of_match_table = emac_match,
2995  },
2996  .probe = emac_probe,
2997  .remove = emac_remove,
2998 };
2999 
3000 static void __init emac_make_bootlist(void)
3001 {
3002  struct device_node *np = NULL;
3003  int j, max, i = 0, k;
3004  int cell_indices[EMAC_BOOT_LIST_SIZE];
3005 
3006  /* Collect EMACs */
3007  while((np = of_find_all_nodes(np)) != NULL) {
3008  const u32 *idx;
3009 
3010  if (of_match_node(emac_match, np) == NULL)
3011  continue;
3012  if (of_get_property(np, "unused", NULL))
3013  continue;
3014  idx = of_get_property(np, "cell-index", NULL);
3015  if (idx == NULL)
3016  continue;
3017  cell_indices[i] = *idx;
3018  emac_boot_list[i++] = of_node_get(np);
3019  if (i >= EMAC_BOOT_LIST_SIZE) {
3020  of_node_put(np);
3021  break;
3022  }
3023  }
3024  max = i;
3025 
3026  /* Bubble sort them (doh, what a creative algorithm :-) */
3027  for (i = 0; max > 1 && (i < (max - 1)); i++)
3028  for (j = i; j < max; j++) {
3029  if (cell_indices[i] > cell_indices[j]) {
3030  np = emac_boot_list[i];
3031  emac_boot_list[i] = emac_boot_list[j];
3032  emac_boot_list[j] = np;
3033  k = cell_indices[i];
3034  cell_indices[i] = cell_indices[j];
3035  cell_indices[j] = k;
3036  }
3037  }
3038 }
3039 
3040 static int __init emac_init(void)
3041 {
3042  int rc;
3043 
3044  printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3045 
3046  /* Init debug stuff */
3047  emac_init_debug();
3048 
3049  /* Build EMAC boot list */
3050  emac_make_bootlist();
3051 
3052  /* Init submodules */
3053  rc = mal_init();
3054  if (rc)
3055  goto err;
3056  rc = zmii_init();
3057  if (rc)
3058  goto err_mal;
3059  rc = rgmii_init();
3060  if (rc)
3061  goto err_zmii;
3062  rc = tah_init();
3063  if (rc)
3064  goto err_rgmii;
3065  rc = platform_driver_register(&emac_driver);
3066  if (rc)
3067  goto err_tah;
3068 
3069  return 0;
3070 
3071  err_tah:
3072  tah_exit();
3073  err_rgmii:
3074  rgmii_exit();
3075  err_zmii:
3076  zmii_exit();
3077  err_mal:
3078  mal_exit();
3079  err:
3080  return rc;
3081 }
3082 
3083 static void __exit emac_exit(void)
3084 {
3085  int i;
3086 
3087  platform_driver_unregister(&emac_driver);
3088 
3089  tah_exit();
3090  rgmii_exit();
3091  zmii_exit();
3092  mal_exit();
3093  emac_fini_debug();
3094 
3095  /* Destroy EMAC boot list */
3096  for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3097  if (emac_boot_list[i])
3098  of_node_put(emac_boot_list[i]);
3099 }
3100 
3101 module_init(emac_init);
3102 module_exit(emac_exit);