Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bcm63xx_enet.c
Go to the documentation of this file.
1 /*
2  * Driver for BCM963xx builtin Ethernet mac
3  *
4  * Copyright (C) 2008 Maxime Bizon <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/clk.h>
24 #include <linux/etherdevice.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/ethtool.h>
28 #include <linux/crc32.h>
29 #include <linux/err.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/if_vlan.h>
33 
34 #include <bcm63xx_dev_enet.h>
35 #include "bcm63xx_enet.h"
36 
37 static char bcm_enet_driver_name[] = "bcm63xx_enet";
38 static char bcm_enet_driver_version[] = "1.0";
39 
40 static int copybreak __read_mostly = 128;
41 module_param(copybreak, int, 0);
42 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
43 
44 /* io memory shared between all devices */
45 static void __iomem *bcm_enet_shared_base;
46 
47 /*
48  * io helpers to access mac registers
49  */
50 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
51 {
52  return bcm_readl(priv->base + off);
53 }
54 
55 static inline void enet_writel(struct bcm_enet_priv *priv,
56  u32 val, u32 off)
57 {
58  bcm_writel(val, priv->base + off);
59 }
60 
61 /*
62  * io helpers to access shared registers
63  */
64 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
65 {
66  return bcm_readl(bcm_enet_shared_base + off);
67 }
68 
69 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
70  u32 val, u32 off)
71 {
72  bcm_writel(val, bcm_enet_shared_base + off);
73 }
74 
75 /*
76  * write given data into mii register and wait for transfer to end
77  * with timeout (average measured transfer time is 25us)
78  */
79 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
80 {
81  int limit;
82 
83  /* make sure mii interrupt status is cleared */
84  enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
85 
86  enet_writel(priv, data, ENET_MIIDATA_REG);
87  wmb();
88 
89  /* busy wait on mii interrupt bit, with timeout */
90  limit = 1000;
91  do {
92  if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
93  break;
94  udelay(1);
95  } while (limit-- > 0);
96 
97  return (limit < 0) ? 1 : 0;
98 }
99 
100 /*
101  * MII internal read callback
102  */
103 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
104  int regnum)
105 {
106  u32 tmp, val;
107 
108  tmp = regnum << ENET_MIIDATA_REG_SHIFT;
109  tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
110  tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
112 
113  if (do_mdio_op(priv, tmp))
114  return -1;
115 
116  val = enet_readl(priv, ENET_MIIDATA_REG);
117  val &= 0xffff;
118  return val;
119 }
120 
121 /*
122  * MII internal write callback
123  */
124 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
125  int regnum, u16 value)
126 {
127  u32 tmp;
128 
129  tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
130  tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
131  tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
132  tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
134 
135  (void)do_mdio_op(priv, tmp);
136  return 0;
137 }
138 
139 /*
140  * MII read callback from phylib
141  */
142 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
143  int regnum)
144 {
145  return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
146 }
147 
148 /*
149  * MII write callback from phylib
150  */
151 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
152  int regnum, u16 value)
153 {
154  return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
155 }
156 
157 /*
158  * MII read callback from mii core
159  */
160 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
161  int regnum)
162 {
163  return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
164 }
165 
166 /*
167  * MII write callback from mii core
168  */
169 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
170  int regnum, int value)
171 {
172  bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
173 }
174 
175 /*
176  * refill rx queue
177  */
178 static int bcm_enet_refill_rx(struct net_device *dev)
179 {
180  struct bcm_enet_priv *priv;
181 
182  priv = netdev_priv(dev);
183 
184  while (priv->rx_desc_count < priv->rx_ring_size) {
185  struct bcm_enet_desc *desc;
186  struct sk_buff *skb;
187  dma_addr_t p;
188  int desc_idx;
189  u32 len_stat;
190 
191  desc_idx = priv->rx_dirty_desc;
192  desc = &priv->rx_desc_cpu[desc_idx];
193 
194  if (!priv->rx_skb[desc_idx]) {
195  skb = netdev_alloc_skb(dev, priv->rx_skb_size);
196  if (!skb)
197  break;
198  priv->rx_skb[desc_idx] = skb;
199 
200  p = dma_map_single(&priv->pdev->dev, skb->data,
201  priv->rx_skb_size,
203  desc->address = p;
204  }
205 
206  len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
207  len_stat |= DMADESC_OWNER_MASK;
208  if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
209  len_stat |= DMADESC_WRAP_MASK;
210  priv->rx_dirty_desc = 0;
211  } else {
212  priv->rx_dirty_desc++;
213  }
214  wmb();
215  desc->len_stat = len_stat;
216 
217  priv->rx_desc_count++;
218 
219  /* tell dma engine we allocated one buffer */
220  enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
221  }
222 
223  /* If rx ring is still empty, set a timer to try allocating
224  * again at a later time. */
225  if (priv->rx_desc_count == 0 && netif_running(dev)) {
226  dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
227  priv->rx_timeout.expires = jiffies + HZ;
228  add_timer(&priv->rx_timeout);
229  }
230 
231  return 0;
232 }
233 
234 /*
235  * timer callback to defer refill rx queue in case we're OOM
236  */
237 static void bcm_enet_refill_rx_timer(unsigned long data)
238 {
239  struct net_device *dev;
240  struct bcm_enet_priv *priv;
241 
242  dev = (struct net_device *)data;
243  priv = netdev_priv(dev);
244 
245  spin_lock(&priv->rx_lock);
246  bcm_enet_refill_rx((struct net_device *)data);
247  spin_unlock(&priv->rx_lock);
248 }
249 
250 /*
251  * extract packet from rx queue
252  */
253 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
254 {
255  struct bcm_enet_priv *priv;
256  struct device *kdev;
257  int processed;
258 
259  priv = netdev_priv(dev);
260  kdev = &priv->pdev->dev;
261  processed = 0;
262 
263  /* don't scan ring further than number of refilled
264  * descriptor */
265  if (budget > priv->rx_desc_count)
266  budget = priv->rx_desc_count;
267 
268  do {
269  struct bcm_enet_desc *desc;
270  struct sk_buff *skb;
271  int desc_idx;
272  u32 len_stat;
273  unsigned int len;
274 
275  desc_idx = priv->rx_curr_desc;
276  desc = &priv->rx_desc_cpu[desc_idx];
277 
278  /* make sure we actually read the descriptor status at
279  * each loop */
280  rmb();
281 
282  len_stat = desc->len_stat;
283 
284  /* break if dma ownership belongs to hw */
285  if (len_stat & DMADESC_OWNER_MASK)
286  break;
287 
288  processed++;
289  priv->rx_curr_desc++;
290  if (priv->rx_curr_desc == priv->rx_ring_size)
291  priv->rx_curr_desc = 0;
292  priv->rx_desc_count--;
293 
294  /* if the packet does not have start of packet _and_
295  * end of packet flag set, then just recycle it */
296  if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
297  dev->stats.rx_dropped++;
298  continue;
299  }
300 
301  /* recycle packet if it's marked as bad */
302  if (unlikely(len_stat & DMADESC_ERR_MASK)) {
303  dev->stats.rx_errors++;
304 
305  if (len_stat & DMADESC_OVSIZE_MASK)
306  dev->stats.rx_length_errors++;
307  if (len_stat & DMADESC_CRC_MASK)
308  dev->stats.rx_crc_errors++;
309  if (len_stat & DMADESC_UNDER_MASK)
310  dev->stats.rx_frame_errors++;
311  if (len_stat & DMADESC_OV_MASK)
312  dev->stats.rx_fifo_errors++;
313  continue;
314  }
315 
316  /* valid packet */
317  skb = priv->rx_skb[desc_idx];
318  len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
319  /* don't include FCS */
320  len -= 4;
321 
322  if (len < copybreak) {
323  struct sk_buff *nskb;
324 
325  nskb = netdev_alloc_skb_ip_align(dev, len);
326  if (!nskb) {
327  /* forget packet, just rearm desc */
328  dev->stats.rx_dropped++;
329  continue;
330  }
331 
332  dma_sync_single_for_cpu(kdev, desc->address,
333  len, DMA_FROM_DEVICE);
334  memcpy(nskb->data, skb->data, len);
336  len, DMA_FROM_DEVICE);
337  skb = nskb;
338  } else {
339  dma_unmap_single(&priv->pdev->dev, desc->address,
341  priv->rx_skb[desc_idx] = NULL;
342  }
343 
344  skb_put(skb, len);
345  skb->protocol = eth_type_trans(skb, dev);
346  dev->stats.rx_packets++;
347  dev->stats.rx_bytes += len;
348  netif_receive_skb(skb);
349 
350  } while (--budget > 0);
351 
352  if (processed || !priv->rx_desc_count) {
353  bcm_enet_refill_rx(dev);
354 
355  /* kick rx dma */
356  enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
358  }
359 
360  return processed;
361 }
362 
363 
364 /*
365  * try to or force reclaim of transmitted buffers
366  */
367 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
368 {
369  struct bcm_enet_priv *priv;
370  int released;
371 
372  priv = netdev_priv(dev);
373  released = 0;
374 
375  while (priv->tx_desc_count < priv->tx_ring_size) {
376  struct bcm_enet_desc *desc;
377  struct sk_buff *skb;
378 
379  /* We run in a bh and fight against start_xmit, which
380  * is called with bh disabled */
381  spin_lock(&priv->tx_lock);
382 
383  desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
384 
385  if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
386  spin_unlock(&priv->tx_lock);
387  break;
388  }
389 
390  /* ensure other field of the descriptor were not read
391  * before we checked ownership */
392  rmb();
393 
394  skb = priv->tx_skb[priv->tx_dirty_desc];
395  priv->tx_skb[priv->tx_dirty_desc] = NULL;
396  dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
397  DMA_TO_DEVICE);
398 
399  priv->tx_dirty_desc++;
400  if (priv->tx_dirty_desc == priv->tx_ring_size)
401  priv->tx_dirty_desc = 0;
402  priv->tx_desc_count++;
403 
404  spin_unlock(&priv->tx_lock);
405 
406  if (desc->len_stat & DMADESC_UNDER_MASK)
407  dev->stats.tx_errors++;
408 
409  dev_kfree_skb(skb);
410  released++;
411  }
412 
413  if (netif_queue_stopped(dev) && released)
414  netif_wake_queue(dev);
415 
416  return released;
417 }
418 
419 /*
420  * poll func, called by network core
421  */
422 static int bcm_enet_poll(struct napi_struct *napi, int budget)
423 {
424  struct bcm_enet_priv *priv;
425  struct net_device *dev;
426  int tx_work_done, rx_work_done;
427 
428  priv = container_of(napi, struct bcm_enet_priv, napi);
429  dev = priv->net_dev;
430 
431  /* ack interrupts */
432  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
433  ENETDMA_IR_REG(priv->rx_chan));
434  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
435  ENETDMA_IR_REG(priv->tx_chan));
436 
437  /* reclaim sent skb */
438  tx_work_done = bcm_enet_tx_reclaim(dev, 0);
439 
440  spin_lock(&priv->rx_lock);
441  rx_work_done = bcm_enet_receive_queue(dev, budget);
442  spin_unlock(&priv->rx_lock);
443 
444  if (rx_work_done >= budget || tx_work_done > 0) {
445  /* rx/tx queue is not yet empty/clean */
446  return rx_work_done;
447  }
448 
449  /* no more packet in rx/tx queue, remove device from poll
450  * queue */
451  napi_complete(napi);
452 
453  /* restore rx/tx interrupt */
454  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
455  ENETDMA_IRMASK_REG(priv->rx_chan));
456  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
457  ENETDMA_IRMASK_REG(priv->tx_chan));
458 
459  return rx_work_done;
460 }
461 
462 /*
463  * mac interrupt handler
464  */
465 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
466 {
467  struct net_device *dev;
468  struct bcm_enet_priv *priv;
469  u32 stat;
470 
471  dev = dev_id;
472  priv = netdev_priv(dev);
473 
474  stat = enet_readl(priv, ENET_IR_REG);
475  if (!(stat & ENET_IR_MIB))
476  return IRQ_NONE;
477 
478  /* clear & mask interrupt */
479  enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
480  enet_writel(priv, 0, ENET_IRMASK_REG);
481 
482  /* read mib registers in workqueue */
484 
485  return IRQ_HANDLED;
486 }
487 
488 /*
489  * rx/tx dma interrupt handler
490  */
491 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
492 {
493  struct net_device *dev;
494  struct bcm_enet_priv *priv;
495 
496  dev = dev_id;
497  priv = netdev_priv(dev);
498 
499  /* mask rx/tx interrupts */
500  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
501  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
502 
503  napi_schedule(&priv->napi);
504 
505  return IRQ_HANDLED;
506 }
507 
508 /*
509  * tx request callback
510  */
511 static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
512 {
513  struct bcm_enet_priv *priv;
514  struct bcm_enet_desc *desc;
515  u32 len_stat;
516  int ret;
517 
518  priv = netdev_priv(dev);
519 
520  /* lock against tx reclaim */
521  spin_lock(&priv->tx_lock);
522 
523  /* make sure the tx hw queue is not full, should not happen
524  * since we stop queue before it's the case */
525  if (unlikely(!priv->tx_desc_count)) {
526  netif_stop_queue(dev);
527  dev_err(&priv->pdev->dev, "xmit called with no tx desc "
528  "available?\n");
529  ret = NETDEV_TX_BUSY;
530  goto out_unlock;
531  }
532 
533  /* point to the next available desc */
534  desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
535  priv->tx_skb[priv->tx_curr_desc] = skb;
536 
537  /* fill descriptor */
538  desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
539  DMA_TO_DEVICE);
540 
541  len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
542  len_stat |= DMADESC_ESOP_MASK |
545 
546  priv->tx_curr_desc++;
547  if (priv->tx_curr_desc == priv->tx_ring_size) {
548  priv->tx_curr_desc = 0;
549  len_stat |= DMADESC_WRAP_MASK;
550  }
551  priv->tx_desc_count--;
552 
553  /* dma might be already polling, make sure we update desc
554  * fields in correct order */
555  wmb();
556  desc->len_stat = len_stat;
557  wmb();
558 
559  /* kick tx dma */
560  enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
562 
563  /* stop queue if no more desc available */
564  if (!priv->tx_desc_count)
565  netif_stop_queue(dev);
566 
567  dev->stats.tx_bytes += skb->len;
568  dev->stats.tx_packets++;
569  ret = NETDEV_TX_OK;
570 
571 out_unlock:
572  spin_unlock(&priv->tx_lock);
573  return ret;
574 }
575 
576 /*
577  * Change the interface's mac address.
578  */
579 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
580 {
581  struct bcm_enet_priv *priv;
582  struct sockaddr *addr = p;
583  u32 val;
584 
585  priv = netdev_priv(dev);
586  memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
587 
588  /* use perfect match register 0 to store my mac address */
589  val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
590  (dev->dev_addr[4] << 8) | dev->dev_addr[5];
591  enet_writel(priv, val, ENET_PML_REG(0));
592 
593  val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
595  enet_writel(priv, val, ENET_PMH_REG(0));
596 
597  return 0;
598 }
599 
600 /*
601  * Change rx mode (promiscuous/allmulti) and update multicast list
602  */
603 static void bcm_enet_set_multicast_list(struct net_device *dev)
604 {
605  struct bcm_enet_priv *priv;
606  struct netdev_hw_addr *ha;
607  u32 val;
608  int i;
609 
610  priv = netdev_priv(dev);
611 
612  val = enet_readl(priv, ENET_RXCFG_REG);
613 
614  if (dev->flags & IFF_PROMISC)
616  else
617  val &= ~ENET_RXCFG_PROMISC_MASK;
618 
619  /* only 3 perfect match registers left, first one is used for
620  * own mac address */
621  if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
623  else
624  val &= ~ENET_RXCFG_ALLMCAST_MASK;
625 
626  /* no need to set perfect match registers if we catch all
627  * multicast */
628  if (val & ENET_RXCFG_ALLMCAST_MASK) {
629  enet_writel(priv, val, ENET_RXCFG_REG);
630  return;
631  }
632 
633  i = 0;
634  netdev_for_each_mc_addr(ha, dev) {
635  u8 *dmi_addr;
636  u32 tmp;
637 
638  if (i == 3)
639  break;
640  /* update perfect match registers */
641  dmi_addr = ha->addr;
642  tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
643  (dmi_addr[4] << 8) | dmi_addr[5];
644  enet_writel(priv, tmp, ENET_PML_REG(i + 1));
645 
646  tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
648  enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
649  }
650 
651  for (; i < 3; i++) {
652  enet_writel(priv, 0, ENET_PML_REG(i + 1));
653  enet_writel(priv, 0, ENET_PMH_REG(i + 1));
654  }
655 
656  enet_writel(priv, val, ENET_RXCFG_REG);
657 }
658 
659 /*
660  * set mac duplex parameters
661  */
662 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
663 {
664  u32 val;
665 
666  val = enet_readl(priv, ENET_TXCTL_REG);
667  if (fullduplex)
668  val |= ENET_TXCTL_FD_MASK;
669  else
670  val &= ~ENET_TXCTL_FD_MASK;
671  enet_writel(priv, val, ENET_TXCTL_REG);
672 }
673 
674 /*
675  * set mac flow control parameters
676  */
677 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
678 {
679  u32 val;
680 
681  /* rx flow control (pause frame handling) */
682  val = enet_readl(priv, ENET_RXCFG_REG);
683  if (rx_en)
684  val |= ENET_RXCFG_ENFLOW_MASK;
685  else
686  val &= ~ENET_RXCFG_ENFLOW_MASK;
687  enet_writel(priv, val, ENET_RXCFG_REG);
688 
689  /* tx flow control (pause frame generation) */
690  val = enet_dma_readl(priv, ENETDMA_CFG_REG);
691  if (tx_en)
692  val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
693  else
694  val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
695  enet_dma_writel(priv, val, ENETDMA_CFG_REG);
696 }
697 
698 /*
699  * link changed callback (from phylib)
700  */
701 static void bcm_enet_adjust_phy_link(struct net_device *dev)
702 {
703  struct bcm_enet_priv *priv;
704  struct phy_device *phydev;
705  int status_changed;
706 
707  priv = netdev_priv(dev);
708  phydev = priv->phydev;
709  status_changed = 0;
710 
711  if (priv->old_link != phydev->link) {
712  status_changed = 1;
713  priv->old_link = phydev->link;
714  }
715 
716  /* reflect duplex change in mac configuration */
717  if (phydev->link && phydev->duplex != priv->old_duplex) {
718  bcm_enet_set_duplex(priv,
719  (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
720  status_changed = 1;
721  priv->old_duplex = phydev->duplex;
722  }
723 
724  /* enable flow control if remote advertise it (trust phylib to
725  * check that duplex is full */
726  if (phydev->link && phydev->pause != priv->old_pause) {
727  int rx_pause_en, tx_pause_en;
728 
729  if (phydev->pause) {
730  /* pause was advertised by lpa and us */
731  rx_pause_en = 1;
732  tx_pause_en = 1;
733  } else if (!priv->pause_auto) {
734  /* pause setting overrided by user */
735  rx_pause_en = priv->pause_rx;
736  tx_pause_en = priv->pause_tx;
737  } else {
738  rx_pause_en = 0;
739  tx_pause_en = 0;
740  }
741 
742  bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
743  status_changed = 1;
744  priv->old_pause = phydev->pause;
745  }
746 
747  if (status_changed) {
748  pr_info("%s: link %s", dev->name, phydev->link ?
749  "UP" : "DOWN");
750  if (phydev->link)
751  pr_cont(" - %d/%s - flow control %s", phydev->speed,
752  DUPLEX_FULL == phydev->duplex ? "full" : "half",
753  phydev->pause == 1 ? "rx&tx" : "off");
754 
755  pr_cont("\n");
756  }
757 }
758 
759 /*
760  * link changed callback (if phylib is not used)
761  */
762 static void bcm_enet_adjust_link(struct net_device *dev)
763 {
764  struct bcm_enet_priv *priv;
765 
766  priv = netdev_priv(dev);
767  bcm_enet_set_duplex(priv, priv->force_duplex_full);
768  bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
769  netif_carrier_on(dev);
770 
771  pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
772  dev->name,
773  priv->force_speed_100 ? 100 : 10,
774  priv->force_duplex_full ? "full" : "half",
775  priv->pause_rx ? "rx" : "off",
776  priv->pause_tx ? "tx" : "off");
777 }
778 
779 /*
780  * open callback, allocate dma rings & buffers and start rx operation
781  */
782 static int bcm_enet_open(struct net_device *dev)
783 {
784  struct bcm_enet_priv *priv;
785  struct sockaddr addr;
786  struct device *kdev;
787  struct phy_device *phydev;
788  int i, ret;
789  unsigned int size;
790  char phy_id[MII_BUS_ID_SIZE + 3];
791  void *p;
792  u32 val;
793 
794  priv = netdev_priv(dev);
795  kdev = &priv->pdev->dev;
796 
797  if (priv->has_phy) {
798  /* connect to PHY */
799  snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
800  priv->mii_bus->id, priv->phy_id);
801 
802  phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
804 
805  if (IS_ERR(phydev)) {
806  dev_err(kdev, "could not attach to PHY\n");
807  return PTR_ERR(phydev);
808  }
809 
810  /* mask with MAC supported features */
811  phydev->supported &= (SUPPORTED_10baseT_Half |
817  SUPPORTED_MII);
818  phydev->advertising = phydev->supported;
819 
820  if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
821  phydev->advertising |= SUPPORTED_Pause;
822  else
823  phydev->advertising &= ~SUPPORTED_Pause;
824 
825  dev_info(kdev, "attached PHY at address %d [%s]\n",
826  phydev->addr, phydev->drv->name);
827 
828  priv->old_link = 0;
829  priv->old_duplex = -1;
830  priv->old_pause = -1;
831  priv->phydev = phydev;
832  }
833 
834  /* mask all interrupts and request them */
835  enet_writel(priv, 0, ENET_IRMASK_REG);
836  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
837  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
838 
839  ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
840  if (ret)
841  goto out_phy_disconnect;
842 
843  ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
844  dev->name, dev);
845  if (ret)
846  goto out_freeirq;
847 
848  ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
849  IRQF_DISABLED, dev->name, dev);
850  if (ret)
851  goto out_freeirq_rx;
852 
853  /* initialize perfect match registers */
854  for (i = 0; i < 4; i++) {
855  enet_writel(priv, 0, ENET_PML_REG(i));
856  enet_writel(priv, 0, ENET_PMH_REG(i));
857  }
858 
859  /* write device mac address */
860  memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
861  bcm_enet_set_mac_address(dev, &addr);
862 
863  /* allocate rx dma ring */
864  size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
865  p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
866  if (!p) {
867  dev_err(kdev, "cannot allocate rx ring %u\n", size);
868  ret = -ENOMEM;
869  goto out_freeirq_tx;
870  }
871 
872  memset(p, 0, size);
873  priv->rx_desc_alloc_size = size;
874  priv->rx_desc_cpu = p;
875 
876  /* allocate tx dma ring */
877  size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
878  p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
879  if (!p) {
880  dev_err(kdev, "cannot allocate tx ring\n");
881  ret = -ENOMEM;
882  goto out_free_rx_ring;
883  }
884 
885  memset(p, 0, size);
886  priv->tx_desc_alloc_size = size;
887  priv->tx_desc_cpu = p;
888 
889  priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
890  GFP_KERNEL);
891  if (!priv->tx_skb) {
892  dev_err(kdev, "cannot allocate rx skb queue\n");
893  ret = -ENOMEM;
894  goto out_free_tx_ring;
895  }
896 
897  priv->tx_desc_count = priv->tx_ring_size;
898  priv->tx_dirty_desc = 0;
899  priv->tx_curr_desc = 0;
900  spin_lock_init(&priv->tx_lock);
901 
902  /* init & fill rx ring with skbs */
903  priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
904  GFP_KERNEL);
905  if (!priv->rx_skb) {
906  dev_err(kdev, "cannot allocate rx skb queue\n");
907  ret = -ENOMEM;
908  goto out_free_tx_skb;
909  }
910 
911  priv->rx_desc_count = 0;
912  priv->rx_dirty_desc = 0;
913  priv->rx_curr_desc = 0;
914 
915  /* initialize flow control buffer allocation */
916  enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
918 
919  if (bcm_enet_refill_rx(dev)) {
920  dev_err(kdev, "cannot allocate rx skb queue\n");
921  ret = -ENOMEM;
922  goto out;
923  }
924 
925  /* write rx & tx ring addresses */
926  enet_dma_writel(priv, priv->rx_desc_dma,
927  ENETDMA_RSTART_REG(priv->rx_chan));
928  enet_dma_writel(priv, priv->tx_desc_dma,
929  ENETDMA_RSTART_REG(priv->tx_chan));
930 
931  /* clear remaining state ram for rx & tx channel */
932  enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
933  enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
934  enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
935  enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
936  enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
937  enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
938 
939  /* set max rx/tx length */
940  enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
941  enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
942 
943  /* set dma maximum burst len */
944  enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
946  enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
948 
949  /* set correct transmit fifo watermark */
950  enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
951 
952  /* set flow control low/high threshold to 1/3 / 2/3 */
953  val = priv->rx_ring_size / 3;
954  enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
955  val = (priv->rx_ring_size * 2) / 3;
956  enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
957 
958  /* all set, enable mac and interrupts, start dma engine and
959  * kick rx dma channel */
960  wmb();
961  val = enet_readl(priv, ENET_CTL_REG);
962  val |= ENET_CTL_ENABLE_MASK;
963  enet_writel(priv, val, ENET_CTL_REG);
964  enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
965  enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
967 
968  /* watch "mib counters about to overflow" interrupt */
969  enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
970  enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
971 
972  /* watch "packet transferred" interrupt in rx and tx */
973  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
974  ENETDMA_IR_REG(priv->rx_chan));
975  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
976  ENETDMA_IR_REG(priv->tx_chan));
977 
978  /* make sure we enable napi before rx interrupt */
979  napi_enable(&priv->napi);
980 
981  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
982  ENETDMA_IRMASK_REG(priv->rx_chan));
983  enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
984  ENETDMA_IRMASK_REG(priv->tx_chan));
985 
986  if (priv->has_phy)
987  phy_start(priv->phydev);
988  else
989  bcm_enet_adjust_link(dev);
990 
991  netif_start_queue(dev);
992  return 0;
993 
994 out:
995  for (i = 0; i < priv->rx_ring_size; i++) {
996  struct bcm_enet_desc *desc;
997 
998  if (!priv->rx_skb[i])
999  continue;
1000 
1001  desc = &priv->rx_desc_cpu[i];
1002  dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1003  DMA_FROM_DEVICE);
1004  kfree_skb(priv->rx_skb[i]);
1005  }
1006  kfree(priv->rx_skb);
1007 
1008 out_free_tx_skb:
1009  kfree(priv->tx_skb);
1010 
1011 out_free_tx_ring:
1013  priv->tx_desc_cpu, priv->tx_desc_dma);
1014 
1015 out_free_rx_ring:
1017  priv->rx_desc_cpu, priv->rx_desc_dma);
1018 
1019 out_freeirq_tx:
1020  free_irq(priv->irq_tx, dev);
1021 
1022 out_freeirq_rx:
1023  free_irq(priv->irq_rx, dev);
1024 
1025 out_freeirq:
1026  free_irq(dev->irq, dev);
1027 
1028 out_phy_disconnect:
1029  phy_disconnect(priv->phydev);
1030 
1031  return ret;
1032 }
1033 
1034 /*
1035  * disable mac
1036  */
1037 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1038 {
1039  int limit;
1040  u32 val;
1041 
1042  val = enet_readl(priv, ENET_CTL_REG);
1043  val |= ENET_CTL_DISABLE_MASK;
1044  enet_writel(priv, val, ENET_CTL_REG);
1045 
1046  limit = 1000;
1047  do {
1048  u32 val;
1049 
1050  val = enet_readl(priv, ENET_CTL_REG);
1051  if (!(val & ENET_CTL_DISABLE_MASK))
1052  break;
1053  udelay(1);
1054  } while (limit--);
1055 }
1056 
1057 /*
1058  * disable dma in given channel
1059  */
1060 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1061 {
1062  int limit;
1063 
1064  enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
1065 
1066  limit = 1000;
1067  do {
1068  u32 val;
1069 
1070  val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
1071  if (!(val & ENETDMA_CHANCFG_EN_MASK))
1072  break;
1073  udelay(1);
1074  } while (limit--);
1075 }
1076 
1077 /*
1078  * stop callback
1079  */
1080 static int bcm_enet_stop(struct net_device *dev)
1081 {
1082  struct bcm_enet_priv *priv;
1083  struct device *kdev;
1084  int i;
1085 
1086  priv = netdev_priv(dev);
1087  kdev = &priv->pdev->dev;
1088 
1089  netif_stop_queue(dev);
1090  napi_disable(&priv->napi);
1091  if (priv->has_phy)
1092  phy_stop(priv->phydev);
1093  del_timer_sync(&priv->rx_timeout);
1094 
1095  /* mask all interrupts */
1096  enet_writel(priv, 0, ENET_IRMASK_REG);
1097  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
1098  enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1099 
1100  /* make sure no mib update is scheduled */
1102 
1103  /* disable dma & mac */
1104  bcm_enet_disable_dma(priv, priv->tx_chan);
1105  bcm_enet_disable_dma(priv, priv->rx_chan);
1106  bcm_enet_disable_mac(priv);
1107 
1108  /* force reclaim of all tx buffers */
1109  bcm_enet_tx_reclaim(dev, 1);
1110 
1111  /* free the rx skb ring */
1112  for (i = 0; i < priv->rx_ring_size; i++) {
1113  struct bcm_enet_desc *desc;
1114 
1115  if (!priv->rx_skb[i])
1116  continue;
1117 
1118  desc = &priv->rx_desc_cpu[i];
1119  dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1120  DMA_FROM_DEVICE);
1121  kfree_skb(priv->rx_skb[i]);
1122  }
1123 
1124  /* free remaining allocated memory */
1125  kfree(priv->rx_skb);
1126  kfree(priv->tx_skb);
1128  priv->rx_desc_cpu, priv->rx_desc_dma);
1130  priv->tx_desc_cpu, priv->tx_desc_dma);
1131  free_irq(priv->irq_tx, dev);
1132  free_irq(priv->irq_rx, dev);
1133  free_irq(dev->irq, dev);
1134 
1135  /* release phy */
1136  if (priv->has_phy) {
1137  phy_disconnect(priv->phydev);
1138  priv->phydev = NULL;
1139  }
1140 
1141  return 0;
1142 }
1143 
1144 /*
1145  * ethtool callbacks
1146  */
1151  int mib_reg;
1152 };
1153 
1154 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1155  offsetof(struct bcm_enet_priv, m)
1156 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1157  offsetof(struct net_device_stats, m)
1158 
1159 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1160  { "rx_packets", DEV_STAT(rx_packets), -1 },
1161  { "tx_packets", DEV_STAT(tx_packets), -1 },
1162  { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1163  { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1164  { "rx_errors", DEV_STAT(rx_errors), -1 },
1165  { "tx_errors", DEV_STAT(tx_errors), -1 },
1166  { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1167  { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1168 
1169  { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1170  { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1171  { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1172  { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1173  { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1174  { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1175  { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1176  { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1177  { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1178  { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1179  { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1180  { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1181  { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1182  { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1183  { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1184  { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1185  { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1186  { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1187  { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1188  { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1189  { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1190 
1191  { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1192  { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1193  { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1194  { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1195  { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1196  { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1197  { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1198  { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1199  { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1200  { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1201  { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1202  { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1203  { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1204  { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1205  { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1206  { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1207  { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1208  { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1209  { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1210  { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1211  { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1212  { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1213 
1214 };
1215 
1216 #define BCM_ENET_STATS_LEN \
1217  (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
1218 
1219 static const u32 unused_mib_regs[] = {
1224 };
1225 
1226 
1227 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1228  struct ethtool_drvinfo *drvinfo)
1229 {
1230  strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
1231  strncpy(drvinfo->version, bcm_enet_driver_version, 32);
1232  strncpy(drvinfo->fw_version, "N/A", 32);
1233  strncpy(drvinfo->bus_info, "bcm63xx", 32);
1234  drvinfo->n_stats = BCM_ENET_STATS_LEN;
1235 }
1236 
1237 static int bcm_enet_get_sset_count(struct net_device *netdev,
1238  int string_set)
1239 {
1240  switch (string_set) {
1241  case ETH_SS_STATS:
1242  return BCM_ENET_STATS_LEN;
1243  default:
1244  return -EINVAL;
1245  }
1246 }
1247 
1248 static void bcm_enet_get_strings(struct net_device *netdev,
1249  u32 stringset, u8 *data)
1250 {
1251  int i;
1252 
1253  switch (stringset) {
1254  case ETH_SS_STATS:
1255  for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1256  memcpy(data + i * ETH_GSTRING_LEN,
1257  bcm_enet_gstrings_stats[i].stat_string,
1258  ETH_GSTRING_LEN);
1259  }
1260  break;
1261  }
1262 }
1263 
1264 static void update_mib_counters(struct bcm_enet_priv *priv)
1265 {
1266  int i;
1267 
1268  for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1269  const struct bcm_enet_stats *s;
1270  u32 val;
1271  char *p;
1272 
1273  s = &bcm_enet_gstrings_stats[i];
1274  if (s->mib_reg == -1)
1275  continue;
1276 
1277  val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1278  p = (char *)priv + s->stat_offset;
1279 
1280  if (s->sizeof_stat == sizeof(u64))
1281  *(u64 *)p += val;
1282  else
1283  *(u32 *)p += val;
1284  }
1285 
1286  /* also empty unused mib counters to make sure mib counter
1287  * overflow interrupt is cleared */
1288  for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1289  (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1290 }
1291 
1292 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1293 {
1294  struct bcm_enet_priv *priv;
1295 
1296  priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1297  mutex_lock(&priv->mib_update_lock);
1298  update_mib_counters(priv);
1299  mutex_unlock(&priv->mib_update_lock);
1300 
1301  /* reenable mib interrupt */
1302  if (netif_running(priv->net_dev))
1303  enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1304 }
1305 
1306 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1307  struct ethtool_stats *stats,
1308  u64 *data)
1309 {
1310  struct bcm_enet_priv *priv;
1311  int i;
1312 
1313  priv = netdev_priv(netdev);
1314 
1315  mutex_lock(&priv->mib_update_lock);
1316  update_mib_counters(priv);
1317 
1318  for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1319  const struct bcm_enet_stats *s;
1320  char *p;
1321 
1322  s = &bcm_enet_gstrings_stats[i];
1323  if (s->mib_reg == -1)
1324  p = (char *)&netdev->stats;
1325  else
1326  p = (char *)priv;
1327  p += s->stat_offset;
1328  data[i] = (s->sizeof_stat == sizeof(u64)) ?
1329  *(u64 *)p : *(u32 *)p;
1330  }
1331  mutex_unlock(&priv->mib_update_lock);
1332 }
1333 
1334 static int bcm_enet_get_settings(struct net_device *dev,
1335  struct ethtool_cmd *cmd)
1336 {
1337  struct bcm_enet_priv *priv;
1338 
1339  priv = netdev_priv(dev);
1340 
1341  cmd->maxrxpkt = 0;
1342  cmd->maxtxpkt = 0;
1343 
1344  if (priv->has_phy) {
1345  if (!priv->phydev)
1346  return -ENODEV;
1347  return phy_ethtool_gset(priv->phydev, cmd);
1348  } else {
1349  cmd->autoneg = 0;
1350  ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
1351  ? SPEED_100 : SPEED_10));
1352  cmd->duplex = (priv->force_duplex_full) ?
1358  cmd->advertising = 0;
1359  cmd->port = PORT_MII;
1360  cmd->transceiver = XCVR_EXTERNAL;
1361  }
1362  return 0;
1363 }
1364 
1365 static int bcm_enet_set_settings(struct net_device *dev,
1366  struct ethtool_cmd *cmd)
1367 {
1368  struct bcm_enet_priv *priv;
1369 
1370  priv = netdev_priv(dev);
1371  if (priv->has_phy) {
1372  if (!priv->phydev)
1373  return -ENODEV;
1374  return phy_ethtool_sset(priv->phydev, cmd);
1375  } else {
1376 
1377  if (cmd->autoneg ||
1378  (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
1379  cmd->port != PORT_MII)
1380  return -EINVAL;
1381 
1382  priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
1383  priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
1384 
1385  if (netif_running(dev))
1386  bcm_enet_adjust_link(dev);
1387  return 0;
1388  }
1389 }
1390 
1391 static void bcm_enet_get_ringparam(struct net_device *dev,
1392  struct ethtool_ringparam *ering)
1393 {
1394  struct bcm_enet_priv *priv;
1395 
1396  priv = netdev_priv(dev);
1397 
1398  /* rx/tx ring is actually only limited by memory */
1399  ering->rx_max_pending = 8192;
1400  ering->tx_max_pending = 8192;
1401  ering->rx_pending = priv->rx_ring_size;
1402  ering->tx_pending = priv->tx_ring_size;
1403 }
1404 
1405 static int bcm_enet_set_ringparam(struct net_device *dev,
1406  struct ethtool_ringparam *ering)
1407 {
1408  struct bcm_enet_priv *priv;
1409  int was_running;
1410 
1411  priv = netdev_priv(dev);
1412 
1413  was_running = 0;
1414  if (netif_running(dev)) {
1415  bcm_enet_stop(dev);
1416  was_running = 1;
1417  }
1418 
1419  priv->rx_ring_size = ering->rx_pending;
1420  priv->tx_ring_size = ering->tx_pending;
1421 
1422  if (was_running) {
1423  int err;
1424 
1425  err = bcm_enet_open(dev);
1426  if (err)
1427  dev_close(dev);
1428  else
1429  bcm_enet_set_multicast_list(dev);
1430  }
1431  return 0;
1432 }
1433 
1434 static void bcm_enet_get_pauseparam(struct net_device *dev,
1435  struct ethtool_pauseparam *ecmd)
1436 {
1437  struct bcm_enet_priv *priv;
1438 
1439  priv = netdev_priv(dev);
1440  ecmd->autoneg = priv->pause_auto;
1441  ecmd->rx_pause = priv->pause_rx;
1442  ecmd->tx_pause = priv->pause_tx;
1443 }
1444 
1445 static int bcm_enet_set_pauseparam(struct net_device *dev,
1446  struct ethtool_pauseparam *ecmd)
1447 {
1448  struct bcm_enet_priv *priv;
1449 
1450  priv = netdev_priv(dev);
1451 
1452  if (priv->has_phy) {
1453  if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1454  /* asymetric pause mode not supported,
1455  * actually possible but integrated PHY has RO
1456  * asym_pause bit */
1457  return -EINVAL;
1458  }
1459  } else {
1460  /* no pause autoneg on direct mii connection */
1461  if (ecmd->autoneg)
1462  return -EINVAL;
1463  }
1464 
1465  priv->pause_auto = ecmd->autoneg;
1466  priv->pause_rx = ecmd->rx_pause;
1467  priv->pause_tx = ecmd->tx_pause;
1468 
1469  return 0;
1470 }
1471 
1472 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1473  .get_strings = bcm_enet_get_strings,
1474  .get_sset_count = bcm_enet_get_sset_count,
1475  .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1476  .get_settings = bcm_enet_get_settings,
1477  .set_settings = bcm_enet_set_settings,
1478  .get_drvinfo = bcm_enet_get_drvinfo,
1479  .get_link = ethtool_op_get_link,
1480  .get_ringparam = bcm_enet_get_ringparam,
1481  .set_ringparam = bcm_enet_set_ringparam,
1482  .get_pauseparam = bcm_enet_get_pauseparam,
1483  .set_pauseparam = bcm_enet_set_pauseparam,
1484 };
1485 
1486 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487 {
1488  struct bcm_enet_priv *priv;
1489 
1490  priv = netdev_priv(dev);
1491  if (priv->has_phy) {
1492  if (!priv->phydev)
1493  return -ENODEV;
1494  return phy_mii_ioctl(priv->phydev, rq, cmd);
1495  } else {
1496  struct mii_if_info mii;
1497 
1498  mii.dev = dev;
1499  mii.mdio_read = bcm_enet_mdio_read_mii;
1500  mii.mdio_write = bcm_enet_mdio_write_mii;
1501  mii.phy_id = 0;
1502  mii.phy_id_mask = 0x3f;
1503  mii.reg_num_mask = 0x1f;
1504  return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1505  }
1506 }
1507 
1508 /*
1509  * calculate actual hardware mtu
1510  */
1511 static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1512 {
1513  int actual_mtu;
1514 
1515  actual_mtu = mtu;
1516 
1517  /* add ethernet header + vlan tag size */
1518  actual_mtu += VLAN_ETH_HLEN;
1519 
1520  if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
1521  return -EINVAL;
1522 
1523  /*
1524  * setup maximum size before we get overflow mark in
1525  * descriptor, note that this will not prevent reception of
1526  * big frames, they will be split into multiple buffers
1527  * anyway
1528  */
1529  priv->hw_mtu = actual_mtu;
1530 
1531  /*
1532  * align rx buffer size to dma burst len, account FCS since
1533  * it's appended
1534  */
1535  priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1536  BCMENET_DMA_MAXBURST * 4);
1537  return 0;
1538 }
1539 
1540 /*
1541  * adjust mtu, can't be called while device is running
1542  */
1543 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1544 {
1545  int ret;
1546 
1547  if (netif_running(dev))
1548  return -EBUSY;
1549 
1550  ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
1551  if (ret)
1552  return ret;
1553  dev->mtu = new_mtu;
1554  return 0;
1555 }
1556 
1557 /*
1558  * preinit hardware to allow mii operation while device is down
1559  */
1560 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1561 {
1562  u32 val;
1563  int limit;
1564 
1565  /* make sure mac is disabled */
1566  bcm_enet_disable_mac(priv);
1567 
1568  /* soft reset mac */
1569  val = ENET_CTL_SRESET_MASK;
1570  enet_writel(priv, val, ENET_CTL_REG);
1571  wmb();
1572 
1573  limit = 1000;
1574  do {
1575  val = enet_readl(priv, ENET_CTL_REG);
1576  if (!(val & ENET_CTL_SRESET_MASK))
1577  break;
1578  udelay(1);
1579  } while (limit--);
1580 
1581  /* select correct mii interface */
1582  val = enet_readl(priv, ENET_CTL_REG);
1583  if (priv->use_external_mii)
1584  val |= ENET_CTL_EPHYSEL_MASK;
1585  else
1586  val &= ~ENET_CTL_EPHYSEL_MASK;
1587  enet_writel(priv, val, ENET_CTL_REG);
1588 
1589  /* turn on mdc clock */
1590  enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1592 
1593  /* set mib counters to self-clear when read */
1594  val = enet_readl(priv, ENET_MIBCTL_REG);
1595  val |= ENET_MIBCTL_RDCLEAR_MASK;
1596  enet_writel(priv, val, ENET_MIBCTL_REG);
1597 }
1598 
1599 static const struct net_device_ops bcm_enet_ops = {
1600  .ndo_open = bcm_enet_open,
1601  .ndo_stop = bcm_enet_stop,
1602  .ndo_start_xmit = bcm_enet_start_xmit,
1603  .ndo_set_mac_address = bcm_enet_set_mac_address,
1604  .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1605  .ndo_do_ioctl = bcm_enet_ioctl,
1606  .ndo_change_mtu = bcm_enet_change_mtu,
1607 #ifdef CONFIG_NET_POLL_CONTROLLER
1608  .ndo_poll_controller = bcm_enet_netpoll,
1609 #endif
1610 };
1611 
1612 /*
1613  * allocate netdevice, request register memory and register device.
1614  */
1615 static int __devinit bcm_enet_probe(struct platform_device *pdev)
1616 {
1617  struct bcm_enet_priv *priv;
1618  struct net_device *dev;
1620  struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1621  struct mii_bus *bus;
1622  const char *clk_name;
1623  unsigned int iomem_size;
1624  int i, ret;
1625 
1626  /* stop if shared driver failed, assume driver->probe will be
1627  * called in the same order we register devices (correct ?) */
1628  if (!bcm_enet_shared_base)
1629  return -ENODEV;
1630 
1631  res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1632  res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1633  res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1634  res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1635  if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
1636  return -ENODEV;
1637 
1638  ret = 0;
1639  dev = alloc_etherdev(sizeof(*priv));
1640  if (!dev)
1641  return -ENOMEM;
1642  priv = netdev_priv(dev);
1643 
1644  ret = compute_hw_mtu(priv, dev->mtu);
1645  if (ret)
1646  goto out;
1647 
1648  iomem_size = resource_size(res_mem);
1649  if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1650  ret = -EBUSY;
1651  goto out;
1652  }
1653 
1654  priv->base = ioremap(res_mem->start, iomem_size);
1655  if (priv->base == NULL) {
1656  ret = -ENOMEM;
1657  goto out_release_mem;
1658  }
1659  dev->irq = priv->irq = res_irq->start;
1660  priv->irq_rx = res_irq_rx->start;
1661  priv->irq_tx = res_irq_tx->start;
1662  priv->mac_id = pdev->id;
1663 
1664  /* get rx & tx dma channel id for this mac */
1665  if (priv->mac_id == 0) {
1666  priv->rx_chan = 0;
1667  priv->tx_chan = 1;
1668  clk_name = "enet0";
1669  } else {
1670  priv->rx_chan = 2;
1671  priv->tx_chan = 3;
1672  clk_name = "enet1";
1673  }
1674 
1675  priv->mac_clk = clk_get(&pdev->dev, clk_name);
1676  if (IS_ERR(priv->mac_clk)) {
1677  ret = PTR_ERR(priv->mac_clk);
1678  goto out_unmap;
1679  }
1680  clk_enable(priv->mac_clk);
1681 
1682  /* initialize default and fetch platform data */
1685 
1686  pd = pdev->dev.platform_data;
1687  if (pd) {
1688  memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1689  priv->has_phy = pd->has_phy;
1690  priv->phy_id = pd->phy_id;
1692  priv->phy_interrupt = pd->phy_interrupt;
1693  priv->use_external_mii = !pd->use_internal_phy;
1694  priv->pause_auto = pd->pause_auto;
1695  priv->pause_rx = pd->pause_rx;
1696  priv->pause_tx = pd->pause_tx;
1698  priv->force_speed_100 = pd->force_speed_100;
1699  }
1700 
1701  if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
1702  /* using internal PHY, enable clock */
1703  priv->phy_clk = clk_get(&pdev->dev, "ephy");
1704  if (IS_ERR(priv->phy_clk)) {
1705  ret = PTR_ERR(priv->phy_clk);
1706  priv->phy_clk = NULL;
1707  goto out_put_clk_mac;
1708  }
1709  clk_enable(priv->phy_clk);
1710  }
1711 
1712  /* do minimal hardware init to be able to probe mii bus */
1713  bcm_enet_hw_preinit(priv);
1714 
1715  /* MII bus registration */
1716  if (priv->has_phy) {
1717 
1718  priv->mii_bus = mdiobus_alloc();
1719  if (!priv->mii_bus) {
1720  ret = -ENOMEM;
1721  goto out_uninit_hw;
1722  }
1723 
1724  bus = priv->mii_bus;
1725  bus->name = "bcm63xx_enet MII bus";
1726  bus->parent = &pdev->dev;
1727  bus->priv = priv;
1728  bus->read = bcm_enet_mdio_read_phylib;
1729  bus->write = bcm_enet_mdio_write_phylib;
1730  sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
1731 
1732  /* only probe bus where we think the PHY is, because
1733  * the mdio read operation return 0 instead of 0xffff
1734  * if a slave is not present on hw */
1735  bus->phy_mask = ~(1 << priv->phy_id);
1736 
1737  bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1738  if (!bus->irq) {
1739  ret = -ENOMEM;
1740  goto out_free_mdio;
1741  }
1742 
1743  if (priv->has_phy_interrupt)
1744  bus->irq[priv->phy_id] = priv->phy_interrupt;
1745  else
1746  bus->irq[priv->phy_id] = PHY_POLL;
1747 
1748  ret = mdiobus_register(bus);
1749  if (ret) {
1750  dev_err(&pdev->dev, "unable to register mdio bus\n");
1751  goto out_free_mdio;
1752  }
1753  } else {
1754 
1755  /* run platform code to initialize PHY device */
1756  if (pd->mii_config &&
1757  pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1758  bcm_enet_mdio_write_mii)) {
1759  dev_err(&pdev->dev, "unable to configure mdio bus\n");
1760  goto out_uninit_hw;
1761  }
1762  }
1763 
1764  spin_lock_init(&priv->rx_lock);
1765 
1766  /* init rx timeout (used for oom) */
1767  init_timer(&priv->rx_timeout);
1768  priv->rx_timeout.function = bcm_enet_refill_rx_timer;
1769  priv->rx_timeout.data = (unsigned long)dev;
1770 
1771  /* init the mib update lock&work */
1772  mutex_init(&priv->mib_update_lock);
1773  INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1774 
1775  /* zero mib counters */
1776  for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1777  enet_writel(priv, 0, ENET_MIB_REG(i));
1778 
1779  /* register netdevice */
1780  dev->netdev_ops = &bcm_enet_ops;
1781  netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1782 
1783  SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
1784  SET_NETDEV_DEV(dev, &pdev->dev);
1785 
1786  ret = register_netdev(dev);
1787  if (ret)
1788  goto out_unregister_mdio;
1789 
1790  netif_carrier_off(dev);
1791  platform_set_drvdata(pdev, dev);
1792  priv->pdev = pdev;
1793  priv->net_dev = dev;
1794 
1795  return 0;
1796 
1797 out_unregister_mdio:
1798  if (priv->mii_bus) {
1799  mdiobus_unregister(priv->mii_bus);
1800  kfree(priv->mii_bus->irq);
1801  }
1802 
1803 out_free_mdio:
1804  if (priv->mii_bus)
1805  mdiobus_free(priv->mii_bus);
1806 
1807 out_uninit_hw:
1808  /* turn off mdc clock */
1809  enet_writel(priv, 0, ENET_MIISC_REG);
1810  if (priv->phy_clk) {
1811  clk_disable(priv->phy_clk);
1812  clk_put(priv->phy_clk);
1813  }
1814 
1815 out_put_clk_mac:
1816  clk_disable(priv->mac_clk);
1817  clk_put(priv->mac_clk);
1818 
1819 out_unmap:
1820  iounmap(priv->base);
1821 
1822 out_release_mem:
1823  release_mem_region(res_mem->start, iomem_size);
1824 out:
1825  free_netdev(dev);
1826  return ret;
1827 }
1828 
1829 
1830 /*
1831  * exit func, stops hardware and unregisters netdevice
1832  */
1833 static int __devexit bcm_enet_remove(struct platform_device *pdev)
1834 {
1835  struct bcm_enet_priv *priv;
1836  struct net_device *dev;
1837  struct resource *res;
1838 
1839  /* stop netdevice */
1840  dev = platform_get_drvdata(pdev);
1841  priv = netdev_priv(dev);
1842  unregister_netdev(dev);
1843 
1844  /* turn off mdc clock */
1845  enet_writel(priv, 0, ENET_MIISC_REG);
1846 
1847  if (priv->has_phy) {
1848  mdiobus_unregister(priv->mii_bus);
1849  kfree(priv->mii_bus->irq);
1850  mdiobus_free(priv->mii_bus);
1851  } else {
1852  struct bcm63xx_enet_platform_data *pd;
1853 
1854  pd = pdev->dev.platform_data;
1855  if (pd && pd->mii_config)
1856  pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1857  bcm_enet_mdio_write_mii);
1858  }
1859 
1860  /* release device resources */
1861  iounmap(priv->base);
1862  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1863  release_mem_region(res->start, resource_size(res));
1864 
1865  /* disable hw block clocks */
1866  if (priv->phy_clk) {
1867  clk_disable(priv->phy_clk);
1868  clk_put(priv->phy_clk);
1869  }
1870  clk_disable(priv->mac_clk);
1871  clk_put(priv->mac_clk);
1872 
1873  platform_set_drvdata(pdev, NULL);
1874  free_netdev(dev);
1875  return 0;
1876 }
1877 
1879  .probe = bcm_enet_probe,
1880  .remove = __devexit_p(bcm_enet_remove),
1881  .driver = {
1882  .name = "bcm63xx_enet",
1883  .owner = THIS_MODULE,
1884  },
1885 };
1886 
1887 /*
1888  * reserve & remap memory space shared between all macs
1889  */
1890 static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1891 {
1892  struct resource *res;
1893  unsigned int iomem_size;
1894 
1895  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1896  if (!res)
1897  return -ENODEV;
1898 
1899  iomem_size = resource_size(res);
1900  if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
1901  return -EBUSY;
1902 
1903  bcm_enet_shared_base = ioremap(res->start, iomem_size);
1904  if (!bcm_enet_shared_base) {
1905  release_mem_region(res->start, iomem_size);
1906  return -ENOMEM;
1907  }
1908  return 0;
1909 }
1910 
1911 static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1912 {
1913  struct resource *res;
1914 
1915  iounmap(bcm_enet_shared_base);
1916  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1917  release_mem_region(res->start, resource_size(res));
1918  return 0;
1919 }
1920 
1921 /*
1922  * this "shared" driver is needed because both macs share a single
1923  * address space
1924  */
1926  .probe = bcm_enet_shared_probe,
1927  .remove = __devexit_p(bcm_enet_shared_remove),
1928  .driver = {
1929  .name = "bcm63xx_enet_shared",
1930  .owner = THIS_MODULE,
1931  },
1932 };
1933 
1934 /*
1935  * entry point
1936  */
1937 static int __init bcm_enet_init(void)
1938 {
1939  int ret;
1940 
1941  ret = platform_driver_register(&bcm63xx_enet_shared_driver);
1942  if (ret)
1943  return ret;
1944 
1945  ret = platform_driver_register(&bcm63xx_enet_driver);
1946  if (ret)
1947  platform_driver_unregister(&bcm63xx_enet_shared_driver);
1948 
1949  return ret;
1950 }
1951 
1952 static void __exit bcm_enet_exit(void)
1953 {
1954  platform_driver_unregister(&bcm63xx_enet_driver);
1955  platform_driver_unregister(&bcm63xx_enet_shared_driver);
1956 }
1957 
1958 
1959 module_init(bcm_enet_init);
1960 module_exit(bcm_enet_exit);
1961 
1962 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
1963 MODULE_AUTHOR("Maxime Bizon <[email protected]>");
1964 MODULE_LICENSE("GPL");