Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
b44.c
Go to the documentation of this file.
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2  *
3  * Copyright (C) 2002 David S. Miller ([email protected])
4  * Copyright (C) 2004 Pekka Pietikainen ([email protected])
5  * Copyright (C) 2004 Florian Schirmer ([email protected])
6  * Copyright (C) 2006 Felix Fietkau ([email protected])
7  * Copyright (C) 2006 Broadcom Corporation.
8  * Copyright (C) 2007 Michael Buesch <[email protected]>
9  *
10  * Distribute under GPL.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/ssb/ssb.h>
31 #include <linux/slab.h>
32 
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 
37 
38 #include "b44.h"
39 
40 #define DRV_MODULE_NAME "b44"
41 #define DRV_MODULE_VERSION "2.0"
42 #define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
43 
44 #define B44_DEF_MSG_ENABLE \
45  (NETIF_MSG_DRV | \
46  NETIF_MSG_PROBE | \
47  NETIF_MSG_LINK | \
48  NETIF_MSG_TIMER | \
49  NETIF_MSG_IFDOWN | \
50  NETIF_MSG_IFUP | \
51  NETIF_MSG_RX_ERR | \
52  NETIF_MSG_TX_ERR)
53 
54 /* length of time before we decide the hardware is borked,
55  * and dev->tx_timeout() should be called to fix the problem
56  */
57 #define B44_TX_TIMEOUT (5 * HZ)
58 
59 /* hardware minimum and maximum for a single frame's data payload */
60 #define B44_MIN_MTU 60
61 #define B44_MAX_MTU 1500
62 
63 #define B44_RX_RING_SIZE 512
64 #define B44_DEF_RX_RING_PENDING 200
65 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
66  B44_RX_RING_SIZE)
67 #define B44_TX_RING_SIZE 512
68 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
70  B44_TX_RING_SIZE)
71 
72 #define TX_RING_GAP(BP) \
73  (B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP) \
75  (((BP)->tx_cons <= (BP)->tx_prod) ? \
76  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
79 
80 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
82 
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
85 
86 /* b44 internal pattern match filter info */
87 #define B44_PATTERN_BASE 0x400
88 #define B44_PATTERN_SIZE 0x80
89 #define B44_PMASK_BASE 0x600
90 #define B44_PMASK_SIZE 0x10
91 #define B44_MAX_PATTERNS 16
92 #define B44_ETHIPV6UDP_HLEN 62
93 #define B44_ETHIPV4UDP_HLEN 42
94 
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_LICENSE("GPL");
99 
100 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103 
104 
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
110  { 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 
114 static struct pci_driver b44_pci_driver = {
116  .id_table = b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119 
120 static const struct ssb_device_id b44_ssb_tbl[] = {
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128 
129 #define B44_FULL_RESET 1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET 3
132 #define B44_CHIP_RESET_FULL 4
133 #define B44_CHIP_RESET_PARTIAL 5
134 
135 static void b44_init_hw(struct b44 *, int);
136 
137 static int dma_desc_sync_size;
138 static int instance;
139 
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
143 #undef _B44
144 };
145 
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147  dma_addr_t dma_base,
148  unsigned long offset,
149  enum dma_data_direction dir)
150 {
151  dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152  dma_desc_sync_size, dir);
153 }
154 
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156  dma_addr_t dma_base,
157  unsigned long offset,
158  enum dma_data_direction dir)
159 {
160  dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161  dma_desc_sync_size, dir);
162 }
163 
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165 {
166  return ssb_read32(bp->sdev, reg);
167 }
168 
169 static inline void bw32(const struct b44 *bp,
170  unsigned long reg, unsigned long val)
171 {
172  ssb_write32(bp->sdev, reg, val);
173 }
174 
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176  u32 bit, unsigned long timeout, const int clear)
177 {
178  unsigned long i;
179 
180  for (i = 0; i < timeout; i++) {
181  u32 val = br32(bp, reg);
182 
183  if (clear && !(val & bit))
184  break;
185  if (!clear && (val & bit))
186  break;
187  udelay(10);
188  }
189  if (i == timeout) {
190  if (net_ratelimit())
191  netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192  bit, reg, clear ? "clear" : "set");
193 
194  return -ENODEV;
195  }
196  return 0;
197 }
198 
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200 {
201  u32 val;
202 
203  bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204  (index << CAM_CTRL_INDEX_SHIFT)));
205 
206  b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207 
208  val = br32(bp, B44_CAM_DATA_LO);
209 
210  data[2] = (val >> 24) & 0xFF;
211  data[3] = (val >> 16) & 0xFF;
212  data[4] = (val >> 8) & 0xFF;
213  data[5] = (val >> 0) & 0xFF;
214 
215  val = br32(bp, B44_CAM_DATA_HI);
216 
217  data[0] = (val >> 8) & 0xFF;
218  data[1] = (val >> 0) & 0xFF;
219 }
220 
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222 {
223  u32 val;
224 
225  val = ((u32) data[2]) << 24;
226  val |= ((u32) data[3]) << 16;
227  val |= ((u32) data[4]) << 8;
228  val |= ((u32) data[5]) << 0;
229  bw32(bp, B44_CAM_DATA_LO, val);
230  val = (CAM_DATA_HI_VALID |
231  (((u32) data[0]) << 8) |
232  (((u32) data[1]) << 0));
233  bw32(bp, B44_CAM_DATA_HI, val);
234  bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235  (index << CAM_CTRL_INDEX_SHIFT)));
236  b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237 }
238 
239 static inline void __b44_disable_ints(struct b44 *bp)
240 {
241  bw32(bp, B44_IMASK, 0);
242 }
243 
244 static void b44_disable_ints(struct b44 *bp)
245 {
246  __b44_disable_ints(bp);
247 
248  /* Flush posted writes. */
249  br32(bp, B44_IMASK);
250 }
251 
252 static void b44_enable_ints(struct b44 *bp)
253 {
254  bw32(bp, B44_IMASK, bp->imask);
255 }
256 
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258 {
259  int err;
260 
261  bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262  bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
264  (phy_addr << MDIO_DATA_PMD_SHIFT) |
265  (reg << MDIO_DATA_RA_SHIFT) |
267  err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268  *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269 
270  return err;
271 }
272 
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274 {
275  bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276  bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
278  (phy_addr << MDIO_DATA_PMD_SHIFT) |
279  (reg << MDIO_DATA_RA_SHIFT) |
281  (val & MDIO_DATA_DATA)));
282  return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283 }
284 
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286 {
287  if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
288  return 0;
289 
290  return __b44_readphy(bp, bp->phy_addr, reg, val);
291 }
292 
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294 {
295  if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
296  return 0;
297 
298  return __b44_writephy(bp, bp->phy_addr, reg, val);
299 }
300 
301 /* miilib interface */
302 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303 {
304  u32 val;
305  struct b44 *bp = netdev_priv(dev);
306  int rc = __b44_readphy(bp, phy_id, location, &val);
307  if (rc)
308  return 0xffffffff;
309  return val;
310 }
311 
312 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313  int val)
314 {
315  struct b44 *bp = netdev_priv(dev);
316  __b44_writephy(bp, phy_id, location, val);
317 }
318 
319 static int b44_phy_reset(struct b44 *bp)
320 {
321  u32 val;
322  int err;
323 
324  if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
325  return 0;
326  err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327  if (err)
328  return err;
329  udelay(100);
330  err = b44_readphy(bp, MII_BMCR, &val);
331  if (!err) {
332  if (val & BMCR_RESET) {
333  netdev_err(bp->dev, "PHY Reset would not complete\n");
334  err = -ENODEV;
335  }
336  }
337 
338  return err;
339 }
340 
341 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342 {
343  u32 val;
344 
346  bp->flags |= pause_flags;
347 
348  val = br32(bp, B44_RXCONFIG);
349  if (pause_flags & B44_FLAG_RX_PAUSE)
350  val |= RXCONFIG_FLOW;
351  else
352  val &= ~RXCONFIG_FLOW;
353  bw32(bp, B44_RXCONFIG, val);
354 
355  val = br32(bp, B44_MAC_FLOW);
356  if (pause_flags & B44_FLAG_TX_PAUSE)
357  val |= (MAC_FLOW_PAUSE_ENAB |
358  (0xc0 & MAC_FLOW_RX_HI_WATER));
359  else
360  val &= ~MAC_FLOW_PAUSE_ENAB;
361  bw32(bp, B44_MAC_FLOW, val);
362 }
363 
364 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365 {
366  u32 pause_enab = 0;
367 
368  /* The driver supports only rx pause by default because
369  the b44 mac tx pause mechanism generates excessive
370  pause frames.
371  Use ethtool to turn on b44 tx pause if necessary.
372  */
373  if ((local & ADVERTISE_PAUSE_CAP) &&
374  (local & ADVERTISE_PAUSE_ASYM)){
375  if ((remote & LPA_PAUSE_ASYM) &&
376  !(remote & LPA_PAUSE_CAP))
377  pause_enab |= B44_FLAG_RX_PAUSE;
378  }
379 
380  __b44_set_flow_ctrl(bp, pause_enab);
381 }
382 
383 #ifdef CONFIG_BCM47XX
384 #include <asm/mach-bcm47xx/nvram.h>
385 static void b44_wap54g10_workaround(struct b44 *bp)
386 {
387  char buf[20];
388  u32 val;
389  int err;
390 
391  /*
392  * workaround for bad hardware design in Linksys WAP54G v1.0
393  * see https://dev.openwrt.org/ticket/146
394  * check and reset bit "isolate"
395  */
396  if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
397  return;
398  if (simple_strtoul(buf, NULL, 0) == 2) {
399  err = __b44_readphy(bp, 0, MII_BMCR, &val);
400  if (err)
401  goto error;
402  if (!(val & BMCR_ISOLATE))
403  return;
404  val &= ~BMCR_ISOLATE;
405  err = __b44_writephy(bp, 0, MII_BMCR, val);
406  if (err)
407  goto error;
408  }
409  return;
410 error:
411  pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
412 }
413 #else
414 static inline void b44_wap54g10_workaround(struct b44 *bp)
415 {
416 }
417 #endif
418 
419 static int b44_setup_phy(struct b44 *bp)
420 {
421  u32 val;
422  int err;
423 
424  b44_wap54g10_workaround(bp);
425 
426  if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
427  return 0;
428  if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429  goto out;
430  if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431  val & MII_ALEDCTRL_ALLMSK)) != 0)
432  goto out;
433  if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434  goto out;
435  if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436  val | MII_TLEDCTRL_ENABLE)) != 0)
437  goto out;
438 
439  if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440  u32 adv = ADVERTISE_CSMA;
441 
442  if (bp->flags & B44_FLAG_ADV_10HALF)
443  adv |= ADVERTISE_10HALF;
444  if (bp->flags & B44_FLAG_ADV_10FULL)
445  adv |= ADVERTISE_10FULL;
446  if (bp->flags & B44_FLAG_ADV_100HALF)
447  adv |= ADVERTISE_100HALF;
448  if (bp->flags & B44_FLAG_ADV_100FULL)
449  adv |= ADVERTISE_100FULL;
450 
451  if (bp->flags & B44_FLAG_PAUSE_AUTO)
452  adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453 
454  if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455  goto out;
456  if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457  BMCR_ANRESTART))) != 0)
458  goto out;
459  } else {
460  u32 bmcr;
461 
462  if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463  goto out;
465  if (bp->flags & B44_FLAG_100_BASE_T)
466  bmcr |= BMCR_SPEED100;
467  if (bp->flags & B44_FLAG_FULL_DUPLEX)
468  bmcr |= BMCR_FULLDPLX;
469  if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470  goto out;
471 
472  /* Since we will not be negotiating there is no safe way
473  * to determine if the link partner supports flow control
474  * or not. So just disable it completely in this case.
475  */
476  b44_set_flow_ctrl(bp, 0, 0);
477  }
478 
479 out:
480  return err;
481 }
482 
483 static void b44_stats_update(struct b44 *bp)
484 {
485  unsigned long reg;
486  u64 *val;
487 
488  val = &bp->hw_stats.tx_good_octets;
489  u64_stats_update_begin(&bp->hw_stats.syncp);
490 
491  for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492  *val++ += br32(bp, reg);
493  }
494 
495  /* Pad */
496  reg += 8*4UL;
497 
498  for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499  *val++ += br32(bp, reg);
500  }
501 
502  u64_stats_update_end(&bp->hw_stats.syncp);
503 }
504 
505 static void b44_link_report(struct b44 *bp)
506 {
507  if (!netif_carrier_ok(bp->dev)) {
508  netdev_info(bp->dev, "Link is down\n");
509  } else {
510  netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511  (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512  (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
513 
514  netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515  (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516  (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
517  }
518 }
519 
520 static void b44_check_phy(struct b44 *bp)
521 {
522  u32 bmsr, aux;
523 
524  if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
525  bp->flags |= B44_FLAG_100_BASE_T;
527  if (!netif_carrier_ok(bp->dev)) {
528  u32 val = br32(bp, B44_TX_CTRL);
529  val |= TX_CTRL_DUPLEX;
530  bw32(bp, B44_TX_CTRL, val);
531  netif_carrier_on(bp->dev);
532  b44_link_report(bp);
533  }
534  return;
535  }
536 
537  if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538  !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
539  (bmsr != 0xffff)) {
540  if (aux & MII_AUXCTRL_SPEED)
541  bp->flags |= B44_FLAG_100_BASE_T;
542  else
543  bp->flags &= ~B44_FLAG_100_BASE_T;
544  if (aux & MII_AUXCTRL_DUPLEX)
546  else
547  bp->flags &= ~B44_FLAG_FULL_DUPLEX;
548 
549  if (!netif_carrier_ok(bp->dev) &&
550  (bmsr & BMSR_LSTATUS)) {
551  u32 val = br32(bp, B44_TX_CTRL);
552  u32 local_adv, remote_adv;
553 
554  if (bp->flags & B44_FLAG_FULL_DUPLEX)
555  val |= TX_CTRL_DUPLEX;
556  else
557  val &= ~TX_CTRL_DUPLEX;
558  bw32(bp, B44_TX_CTRL, val);
559 
560  if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561  !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562  !b44_readphy(bp, MII_LPA, &remote_adv))
563  b44_set_flow_ctrl(bp, local_adv, remote_adv);
564 
565  /* Link now up */
566  netif_carrier_on(bp->dev);
567  b44_link_report(bp);
568  } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
569  /* Link now down */
570  netif_carrier_off(bp->dev);
571  b44_link_report(bp);
572  }
573 
574  if (bmsr & BMSR_RFAULT)
575  netdev_warn(bp->dev, "Remote fault detected in PHY\n");
576  if (bmsr & BMSR_JCD)
577  netdev_warn(bp->dev, "Jabber detected in PHY\n");
578  }
579 }
580 
581 static void b44_timer(unsigned long __opaque)
582 {
583  struct b44 *bp = (struct b44 *) __opaque;
584 
585  spin_lock_irq(&bp->lock);
586 
587  b44_check_phy(bp);
588 
589  b44_stats_update(bp);
590 
591  spin_unlock_irq(&bp->lock);
592 
593  mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
594 }
595 
596 static void b44_tx(struct b44 *bp)
597 {
598  u32 cur, cons;
599 
600  cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
601  cur /= sizeof(struct dma_desc);
602 
603  /* XXX needs updating when NETIF_F_SG is supported */
604  for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
605  struct ring_info *rp = &bp->tx_buffers[cons];
606  struct sk_buff *skb = rp->skb;
607 
608  BUG_ON(skb == NULL);
609 
610  dma_unmap_single(bp->sdev->dma_dev,
611  rp->mapping,
612  skb->len,
613  DMA_TO_DEVICE);
614  rp->skb = NULL;
615  dev_kfree_skb_irq(skb);
616  }
617 
618  bp->tx_cons = cons;
619  if (netif_queue_stopped(bp->dev) &&
621  netif_wake_queue(bp->dev);
622 
623  bw32(bp, B44_GPTIMER, 0);
624 }
625 
626 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
627  * before the DMA address you give it. So we allocate 30 more bytes
628  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
629  * point the chip at 30 bytes past where the rx_header will go.
630  */
631 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
632 {
633  struct dma_desc *dp;
634  struct ring_info *src_map, *map;
635  struct rx_header *rh;
636  struct sk_buff *skb;
638  int dest_idx;
639  u32 ctrl;
640 
641  src_map = NULL;
642  if (src_idx >= 0)
643  src_map = &bp->rx_buffers[src_idx];
644  dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
645  map = &bp->rx_buffers[dest_idx];
646  skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
647  if (skb == NULL)
648  return -ENOMEM;
649 
650  mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
653 
654  /* Hardware bug work-around, the chip is unable to do PCI DMA
655  to/from anything above 1GB :-( */
656  if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
657  mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
658  /* Sigh... */
659  if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
660  dma_unmap_single(bp->sdev->dma_dev, mapping,
662  dev_kfree_skb_any(skb);
663  skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
664  if (skb == NULL)
665  return -ENOMEM;
666  mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
669  if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
670  mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
671  if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
672  dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
673  dev_kfree_skb_any(skb);
674  return -ENOMEM;
675  }
676  bp->force_copybreak = 1;
677  }
678 
679  rh = (struct rx_header *) skb->data;
680 
681  rh->len = 0;
682  rh->flags = 0;
683 
684  map->skb = skb;
685  map->mapping = mapping;
686 
687  if (src_map != NULL)
688  src_map->skb = NULL;
689 
690  ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
691  if (dest_idx == (B44_RX_RING_SIZE - 1))
692  ctrl |= DESC_CTRL_EOT;
693 
694  dp = &bp->rx_ring[dest_idx];
695  dp->ctrl = cpu_to_le32(ctrl);
696  dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
697 
698  if (bp->flags & B44_FLAG_RX_RING_HACK)
699  b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
700  dest_idx * sizeof(*dp),
702 
703  return RX_PKT_BUF_SZ;
704 }
705 
706 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
707 {
708  struct dma_desc *src_desc, *dest_desc;
709  struct ring_info *src_map, *dest_map;
710  struct rx_header *rh;
711  int dest_idx;
712  __le32 ctrl;
713 
714  dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
715  dest_desc = &bp->rx_ring[dest_idx];
716  dest_map = &bp->rx_buffers[dest_idx];
717  src_desc = &bp->rx_ring[src_idx];
718  src_map = &bp->rx_buffers[src_idx];
719 
720  dest_map->skb = src_map->skb;
721  rh = (struct rx_header *) src_map->skb->data;
722  rh->len = 0;
723  rh->flags = 0;
724  dest_map->mapping = src_map->mapping;
725 
727  b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
728  src_idx * sizeof(*src_desc),
730 
731  ctrl = src_desc->ctrl;
732  if (dest_idx == (B44_RX_RING_SIZE - 1))
733  ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734  else
735  ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736 
737  dest_desc->ctrl = ctrl;
738  dest_desc->addr = src_desc->addr;
739 
740  src_map->skb = NULL;
741 
742  if (bp->flags & B44_FLAG_RX_RING_HACK)
743  b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
744  dest_idx * sizeof(*dest_desc),
746 
747  dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
750 }
751 
752 static int b44_rx(struct b44 *bp, int budget)
753 {
754  int received;
755  u32 cons, prod;
756 
757  received = 0;
758  prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
759  prod /= sizeof(struct dma_desc);
760  cons = bp->rx_cons;
761 
762  while (cons != prod && budget > 0) {
763  struct ring_info *rp = &bp->rx_buffers[cons];
764  struct sk_buff *skb = rp->skb;
765  dma_addr_t map = rp->mapping;
766  struct rx_header *rh;
767  u16 len;
768 
769  dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
772  rh = (struct rx_header *) skb->data;
773  len = le16_to_cpu(rh->len);
774  if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
775  (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776  drop_it:
777  b44_recycle_rx(bp, cons, bp->rx_prod);
778  drop_it_no_recycle:
779  bp->dev->stats.rx_dropped++;
780  goto next_pkt;
781  }
782 
783  if (len == 0) {
784  int i = 0;
785 
786  do {
787  udelay(2);
788  barrier();
789  len = le16_to_cpu(rh->len);
790  } while (len == 0 && i++ < 5);
791  if (len == 0)
792  goto drop_it;
793  }
794 
795  /* Omit CRC. */
796  len -= 4;
797 
798  if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
799  int skb_size;
800  skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
801  if (skb_size < 0)
802  goto drop_it;
803  dma_unmap_single(bp->sdev->dma_dev, map,
804  skb_size, DMA_FROM_DEVICE);
805  /* Leave out rx_header */
806  skb_put(skb, len + RX_PKT_OFFSET);
807  skb_pull(skb, RX_PKT_OFFSET);
808  } else {
809  struct sk_buff *copy_skb;
810 
811  b44_recycle_rx(bp, cons, bp->rx_prod);
812  copy_skb = netdev_alloc_skb(bp->dev, len + 2);
813  if (copy_skb == NULL)
814  goto drop_it_no_recycle;
815 
816  skb_reserve(copy_skb, 2);
817  skb_put(copy_skb, len);
818  /* DMA sync done above, copy just the actual packet */
819  skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820  copy_skb->data, len);
821  skb = copy_skb;
822  }
823  skb_checksum_none_assert(skb);
824  skb->protocol = eth_type_trans(skb, bp->dev);
825  netif_receive_skb(skb);
826  received++;
827  budget--;
828  next_pkt:
829  bp->rx_prod = (bp->rx_prod + 1) &
830  (B44_RX_RING_SIZE - 1);
831  cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832  }
833 
834  bp->rx_cons = cons;
835  bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836 
837  return received;
838 }
839 
840 static int b44_poll(struct napi_struct *napi, int budget)
841 {
842  struct b44 *bp = container_of(napi, struct b44, napi);
843  int work_done;
844  unsigned long flags;
845 
846  spin_lock_irqsave(&bp->lock, flags);
847 
848  if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849  /* spin_lock(&bp->tx_lock); */
850  b44_tx(bp);
851  /* spin_unlock(&bp->tx_lock); */
852  }
853  if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
854  bp->istat &= ~ISTAT_RFO;
855  b44_disable_ints(bp);
856  ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
857  b44_init_rings(bp);
858  b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
859  netif_wake_queue(bp->dev);
860  }
861 
862  spin_unlock_irqrestore(&bp->lock, flags);
863 
864  work_done = 0;
865  if (bp->istat & ISTAT_RX)
866  work_done += b44_rx(bp, budget);
867 
868  if (bp->istat & ISTAT_ERRORS) {
869  spin_lock_irqsave(&bp->lock, flags);
870  b44_halt(bp);
871  b44_init_rings(bp);
872  b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
873  netif_wake_queue(bp->dev);
874  spin_unlock_irqrestore(&bp->lock, flags);
875  work_done = 0;
876  }
877 
878  if (work_done < budget) {
879  napi_complete(napi);
880  b44_enable_ints(bp);
881  }
882 
883  return work_done;
884 }
885 
886 static irqreturn_t b44_interrupt(int irq, void *dev_id)
887 {
888  struct net_device *dev = dev_id;
889  struct b44 *bp = netdev_priv(dev);
890  u32 istat, imask;
891  int handled = 0;
892 
893  spin_lock(&bp->lock);
894 
895  istat = br32(bp, B44_ISTAT);
896  imask = br32(bp, B44_IMASK);
897 
898  /* The interrupt mask register controls which interrupt bits
899  * will actually raise an interrupt to the CPU when set by hw/firmware,
900  * but doesn't mask off the bits.
901  */
902  istat &= imask;
903  if (istat) {
904  handled = 1;
905 
906  if (unlikely(!netif_running(dev))) {
907  netdev_info(dev, "late interrupt\n");
908  goto irq_ack;
909  }
910 
911  if (napi_schedule_prep(&bp->napi)) {
912  /* NOTE: These writes are posted by the readback of
913  * the ISTAT register below.
914  */
915  bp->istat = istat;
916  __b44_disable_ints(bp);
917  __napi_schedule(&bp->napi);
918  }
919 
920 irq_ack:
921  bw32(bp, B44_ISTAT, istat);
922  br32(bp, B44_ISTAT);
923  }
924  spin_unlock(&bp->lock);
925  return IRQ_RETVAL(handled);
926 }
927 
928 static void b44_tx_timeout(struct net_device *dev)
929 {
930  struct b44 *bp = netdev_priv(dev);
931 
932  netdev_err(dev, "transmit timed out, resetting\n");
933 
934  spin_lock_irq(&bp->lock);
935 
936  b44_halt(bp);
937  b44_init_rings(bp);
938  b44_init_hw(bp, B44_FULL_RESET);
939 
940  spin_unlock_irq(&bp->lock);
941 
942  b44_enable_ints(bp);
943 
944  netif_wake_queue(dev);
945 }
946 
947 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948 {
949  struct b44 *bp = netdev_priv(dev);
950  int rc = NETDEV_TX_OK;
952  u32 len, entry, ctrl;
953  unsigned long flags;
954 
955  len = skb->len;
956  spin_lock_irqsave(&bp->lock, flags);
957 
958  /* This is a hard error, log it. */
959  if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960  netif_stop_queue(dev);
961  netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
962  goto err_out;
963  }
964 
965  mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
966  if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
967  struct sk_buff *bounce_skb;
968 
969  /* Chip can't handle DMA to/from >1GB, use bounce buffer */
970  if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
971  dma_unmap_single(bp->sdev->dma_dev, mapping, len,
972  DMA_TO_DEVICE);
973 
974  bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
975  if (!bounce_skb)
976  goto err_out;
977 
978  mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
979  len, DMA_TO_DEVICE);
980  if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
981  if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
982  dma_unmap_single(bp->sdev->dma_dev, mapping,
983  len, DMA_TO_DEVICE);
984  dev_kfree_skb_any(bounce_skb);
985  goto err_out;
986  }
987 
988  skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
989  dev_kfree_skb_any(skb);
990  skb = bounce_skb;
991  }
992 
993  entry = bp->tx_prod;
994  bp->tx_buffers[entry].skb = skb;
995  bp->tx_buffers[entry].mapping = mapping;
996 
997  ctrl = (len & DESC_CTRL_LEN);
999  if (entry == (B44_TX_RING_SIZE - 1))
1000  ctrl |= DESC_CTRL_EOT;
1001 
1002  bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1003  bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1004 
1005  if (bp->flags & B44_FLAG_TX_RING_HACK)
1006  b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1007  entry * sizeof(bp->tx_ring[0]),
1008  DMA_TO_DEVICE);
1009 
1010  entry = NEXT_TX(entry);
1011 
1012  bp->tx_prod = entry;
1013 
1014  wmb();
1015 
1016  bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017  if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1018  bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019  if (bp->flags & B44_FLAG_REORDER_BUG)
1020  br32(bp, B44_DMATX_PTR);
1021 
1022  if (TX_BUFFS_AVAIL(bp) < 1)
1023  netif_stop_queue(dev);
1024 
1025 out_unlock:
1026  spin_unlock_irqrestore(&bp->lock, flags);
1027 
1028  return rc;
1029 
1030 err_out:
1031  rc = NETDEV_TX_BUSY;
1032  goto out_unlock;
1033 }
1034 
1035 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1036 {
1037  struct b44 *bp = netdev_priv(dev);
1038 
1039  if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1040  return -EINVAL;
1041 
1042  if (!netif_running(dev)) {
1043  /* We'll just catch it later when the
1044  * device is up'd.
1045  */
1046  dev->mtu = new_mtu;
1047  return 0;
1048  }
1049 
1050  spin_lock_irq(&bp->lock);
1051  b44_halt(bp);
1052  dev->mtu = new_mtu;
1053  b44_init_rings(bp);
1054  b44_init_hw(bp, B44_FULL_RESET);
1055  spin_unlock_irq(&bp->lock);
1056 
1057  b44_enable_ints(bp);
1058 
1059  return 0;
1060 }
1061 
1062 /* Free up pending packets in all rx/tx rings.
1063  *
1064  * The chip has been shut down and the driver detached from
1065  * the networking, so no interrupts or new tx packets will
1066  * end up in the driver. bp->lock is not held and we are not
1067  * in an interrupt context and thus may sleep.
1068  */
1069 static void b44_free_rings(struct b44 *bp)
1070 {
1071  struct ring_info *rp;
1072  int i;
1073 
1074  for (i = 0; i < B44_RX_RING_SIZE; i++) {
1075  rp = &bp->rx_buffers[i];
1076 
1077  if (rp->skb == NULL)
1078  continue;
1079  dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1080  DMA_FROM_DEVICE);
1081  dev_kfree_skb_any(rp->skb);
1082  rp->skb = NULL;
1083  }
1084 
1085  /* XXX needs changes once NETIF_F_SG is set... */
1086  for (i = 0; i < B44_TX_RING_SIZE; i++) {
1087  rp = &bp->tx_buffers[i];
1088 
1089  if (rp->skb == NULL)
1090  continue;
1091  dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1092  DMA_TO_DEVICE);
1093  dev_kfree_skb_any(rp->skb);
1094  rp->skb = NULL;
1095  }
1096 }
1097 
1098 /* Initialize tx/rx rings for packet processing.
1099  *
1100  * The chip has been shut down and the driver detached from
1101  * the networking, so no interrupts or new tx packets will
1102  * end up in the driver.
1103  */
1104 static void b44_init_rings(struct b44 *bp)
1105 {
1106  int i;
1107 
1108  b44_free_rings(bp);
1109 
1110  memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1111  memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1112 
1113  if (bp->flags & B44_FLAG_RX_RING_HACK)
1114  dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1116 
1117  if (bp->flags & B44_FLAG_TX_RING_HACK)
1118  dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1120 
1121  for (i = 0; i < bp->rx_pending; i++) {
1122  if (b44_alloc_rx_skb(bp, -1, i) < 0)
1123  break;
1124  }
1125 }
1126 
1127 /*
1128  * Must not be invoked with interrupt sources disabled and
1129  * the hardware shutdown down.
1130  */
1131 static void b44_free_consistent(struct b44 *bp)
1132 {
1133  kfree(bp->rx_buffers);
1134  bp->rx_buffers = NULL;
1135  kfree(bp->tx_buffers);
1136  bp->tx_buffers = NULL;
1137  if (bp->rx_ring) {
1138  if (bp->flags & B44_FLAG_RX_RING_HACK) {
1139  dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1141  kfree(bp->rx_ring);
1142  } else
1143  dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1144  bp->rx_ring, bp->rx_ring_dma);
1145  bp->rx_ring = NULL;
1146  bp->flags &= ~B44_FLAG_RX_RING_HACK;
1147  }
1148  if (bp->tx_ring) {
1149  if (bp->flags & B44_FLAG_TX_RING_HACK) {
1150  dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1152  kfree(bp->tx_ring);
1153  } else
1154  dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1155  bp->tx_ring, bp->tx_ring_dma);
1156  bp->tx_ring = NULL;
1157  bp->flags &= ~B44_FLAG_TX_RING_HACK;
1158  }
1159 }
1160 
1161 /*
1162  * Must not be invoked with interrupt sources disabled and
1163  * the hardware shutdown down. Can sleep.
1164  */
1165 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1166 {
1167  int size;
1168 
1169  size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1170  bp->rx_buffers = kzalloc(size, gfp);
1171  if (!bp->rx_buffers)
1172  goto out_err;
1173 
1174  size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1175  bp->tx_buffers = kzalloc(size, gfp);
1176  if (!bp->tx_buffers)
1177  goto out_err;
1178 
1179  size = DMA_TABLE_BYTES;
1180  bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1181  &bp->rx_ring_dma, gfp);
1182  if (!bp->rx_ring) {
1183  /* Allocation may have failed due to pci_alloc_consistent
1184  insisting on use of GFP_DMA, which is more restrictive
1185  than necessary... */
1186  struct dma_desc *rx_ring;
1187  dma_addr_t rx_ring_dma;
1188 
1189  rx_ring = kzalloc(size, gfp);
1190  if (!rx_ring)
1191  goto out_err;
1192 
1193  rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1196 
1197  if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1198  rx_ring_dma + size > DMA_BIT_MASK(30)) {
1199  kfree(rx_ring);
1200  goto out_err;
1201  }
1202 
1203  bp->rx_ring = rx_ring;
1204  bp->rx_ring_dma = rx_ring_dma;
1206  }
1207 
1208  bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1209  &bp->tx_ring_dma, gfp);
1210  if (!bp->tx_ring) {
1211  /* Allocation may have failed due to ssb_dma_alloc_consistent
1212  insisting on use of GFP_DMA, which is more restrictive
1213  than necessary... */
1214  struct dma_desc *tx_ring;
1215  dma_addr_t tx_ring_dma;
1216 
1217  tx_ring = kzalloc(size, gfp);
1218  if (!tx_ring)
1219  goto out_err;
1220 
1221  tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1223  DMA_TO_DEVICE);
1224 
1225  if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1226  tx_ring_dma + size > DMA_BIT_MASK(30)) {
1227  kfree(tx_ring);
1228  goto out_err;
1229  }
1230 
1231  bp->tx_ring = tx_ring;
1232  bp->tx_ring_dma = tx_ring_dma;
1234  }
1235 
1236  return 0;
1237 
1238 out_err:
1239  b44_free_consistent(bp);
1240  return -ENOMEM;
1241 }
1242 
1243 /* bp->lock is held. */
1244 static void b44_clear_stats(struct b44 *bp)
1245 {
1246  unsigned long reg;
1247 
1249  for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1250  br32(bp, reg);
1251  for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1252  br32(bp, reg);
1253 }
1254 
1255 /* bp->lock is held. */
1256 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1257 {
1258  struct ssb_device *sdev = bp->sdev;
1259  bool was_enabled;
1260 
1261  was_enabled = ssb_device_is_enabled(bp->sdev);
1262 
1263  ssb_device_enable(bp->sdev, 0);
1264  ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1265 
1266  if (was_enabled) {
1267  bw32(bp, B44_RCV_LAZY, 0);
1268  bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1269  b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1270  bw32(bp, B44_DMATX_CTRL, 0);
1271  bp->tx_prod = bp->tx_cons = 0;
1272  if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1273  b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1274  100, 0);
1275  }
1276  bw32(bp, B44_DMARX_CTRL, 0);
1277  bp->rx_prod = bp->rx_cons = 0;
1278  }
1279 
1280  b44_clear_stats(bp);
1281 
1282  /*
1283  * Don't enable PHY if we are doing a partial reset
1284  * we are probably going to power down
1285  */
1286  if (reset_kind == B44_CHIP_RESET_PARTIAL)
1287  return;
1288 
1289  switch (sdev->bus->bustype) {
1290  case SSB_BUSTYPE_SSB:
1291  bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1293  B44_MDC_RATIO)
1294  & MDIO_CTRL_MAXF_MASK)));
1295  break;
1296  case SSB_BUSTYPE_PCI:
1297  bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1298  (0x0d & MDIO_CTRL_MAXF_MASK)));
1299  break;
1300  case SSB_BUSTYPE_PCMCIA:
1301  case SSB_BUSTYPE_SDIO:
1302  WARN_ON(1); /* A device with this bus does not exist. */
1303  break;
1304  }
1305 
1306  br32(bp, B44_MDIO_CTRL);
1307 
1308  if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1309  bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1310  br32(bp, B44_ENET_CTRL);
1311  bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1312  } else {
1313  u32 val = br32(bp, B44_DEVCTRL);
1314 
1315  if (val & DEVCTRL_EPR) {
1316  bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1317  br32(bp, B44_DEVCTRL);
1318  udelay(100);
1319  }
1321  }
1322 }
1323 
1324 /* bp->lock is held. */
1325 static void b44_halt(struct b44 *bp)
1326 {
1327  b44_disable_ints(bp);
1328  /* reset PHY */
1329  b44_phy_reset(bp);
1330  /* power down PHY */
1331  netdev_info(bp->dev, "powering down PHY\n");
1332  bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1333  /* now reset the chip, but without enabling the MAC&PHY
1334  * part of it. This has to be done _after_ we shut down the PHY */
1335  b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1336 }
1337 
1338 /* bp->lock is held. */
1339 static void __b44_set_mac_addr(struct b44 *bp)
1340 {
1341  bw32(bp, B44_CAM_CTRL, 0);
1342  if (!(bp->dev->flags & IFF_PROMISC)) {
1343  u32 val;
1344 
1345  __b44_cam_write(bp, bp->dev->dev_addr, 0);
1346  val = br32(bp, B44_CAM_CTRL);
1347  bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1348  }
1349 }
1350 
1351 static int b44_set_mac_addr(struct net_device *dev, void *p)
1352 {
1353  struct b44 *bp = netdev_priv(dev);
1354  struct sockaddr *addr = p;
1355  u32 val;
1356 
1357  if (netif_running(dev))
1358  return -EBUSY;
1359 
1360  if (!is_valid_ether_addr(addr->sa_data))
1361  return -EINVAL;
1362 
1363  memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1364 
1365  spin_lock_irq(&bp->lock);
1366 
1367  val = br32(bp, B44_RXCONFIG);
1368  if (!(val & RXCONFIG_CAM_ABSENT))
1369  __b44_set_mac_addr(bp);
1370 
1371  spin_unlock_irq(&bp->lock);
1372 
1373  return 0;
1374 }
1375 
1376 /* Called at device open time to get the chip ready for
1377  * packet processing. Invoked with bp->lock held.
1378  */
1379 static void __b44_set_rx_mode(struct net_device *);
1380 static void b44_init_hw(struct b44 *bp, int reset_kind)
1381 {
1382  u32 val;
1383 
1384  b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1385  if (reset_kind == B44_FULL_RESET) {
1386  b44_phy_reset(bp);
1387  b44_setup_phy(bp);
1388  }
1389 
1390  /* Enable CRC32, set proper LED modes and power on PHY */
1392  bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1393 
1394  /* This sets the MAC address too. */
1395  __b44_set_rx_mode(bp->dev);
1396 
1397  /* MTU + eth header + possible VLAN tag + struct rx_header */
1398  bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1399  bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1400 
1401  bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1402  if (reset_kind == B44_PARTIAL_RESET) {
1403  bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405  } else {
1406  bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1407  bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1408  bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1410  bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1411 
1412  bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1413  bp->rx_prod = bp->rx_pending;
1414 
1416  }
1417 
1418  val = br32(bp, B44_ENET_CTRL);
1419  bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1420 }
1421 
1422 static int b44_open(struct net_device *dev)
1423 {
1424  struct b44 *bp = netdev_priv(dev);
1425  int err;
1426 
1427  err = b44_alloc_consistent(bp, GFP_KERNEL);
1428  if (err)
1429  goto out;
1430 
1431  napi_enable(&bp->napi);
1432 
1433  b44_init_rings(bp);
1434  b44_init_hw(bp, B44_FULL_RESET);
1435 
1436  b44_check_phy(bp);
1437 
1438  err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1439  if (unlikely(err < 0)) {
1440  napi_disable(&bp->napi);
1441  b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1442  b44_free_rings(bp);
1443  b44_free_consistent(bp);
1444  goto out;
1445  }
1446 
1447  init_timer(&bp->timer);
1448  bp->timer.expires = jiffies + HZ;
1449  bp->timer.data = (unsigned long) bp;
1450  bp->timer.function = b44_timer;
1451  add_timer(&bp->timer);
1452 
1453  b44_enable_ints(bp);
1454  netif_start_queue(dev);
1455 out:
1456  return err;
1457 }
1458 
1459 #ifdef CONFIG_NET_POLL_CONTROLLER
1460 /*
1461  * Polling receive - used by netconsole and other diagnostic tools
1462  * to allow network i/o with interrupts disabled.
1463  */
1464 static void b44_poll_controller(struct net_device *dev)
1465 {
1466  disable_irq(dev->irq);
1467  b44_interrupt(dev->irq, dev);
1468  enable_irq(dev->irq);
1469 }
1470 #endif
1471 
1472 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1473 {
1474  u32 i;
1475  u32 *pattern = (u32 *) pp;
1476 
1477  for (i = 0; i < bytes; i += sizeof(u32)) {
1478  bw32(bp, B44_FILT_ADDR, table_offset + i);
1479  bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1480  }
1481 }
1482 
1483 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1484 {
1485  int magicsync = 6;
1486  int k, j, len = offset;
1487  int ethaddr_bytes = ETH_ALEN;
1488 
1489  memset(ppattern + offset, 0xff, magicsync);
1490  for (j = 0; j < magicsync; j++)
1491  set_bit(len++, (unsigned long *) pmask);
1492 
1493  for (j = 0; j < B44_MAX_PATTERNS; j++) {
1494  if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1495  ethaddr_bytes = ETH_ALEN;
1496  else
1497  ethaddr_bytes = B44_PATTERN_SIZE - len;
1498  if (ethaddr_bytes <=0)
1499  break;
1500  for (k = 0; k< ethaddr_bytes; k++) {
1501  ppattern[offset + magicsync +
1502  (j * ETH_ALEN) + k] = macaddr[k];
1503  set_bit(len++, (unsigned long *) pmask);
1504  }
1505  }
1506  return len - 1;
1507 }
1508 
1509 /* Setup magic packet patterns in the b44 WOL
1510  * pattern matching filter.
1511  */
1512 static void b44_setup_pseudo_magicp(struct b44 *bp)
1513 {
1514 
1515  u32 val;
1516  int plen0, plen1, plen2;
1517  u8 *pwol_pattern;
1518  u8 pwol_mask[B44_PMASK_SIZE];
1519 
1520  pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1521  if (!pwol_pattern) {
1522  pr_err("Memory not available for WOL\n");
1523  return;
1524  }
1525 
1526  /* Ipv4 magic packet pattern - pattern 0.*/
1527  memset(pwol_mask, 0, B44_PMASK_SIZE);
1528  plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1530 
1531  bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532  bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1533 
1534  /* Raw ethernet II magic packet pattern - pattern 1 */
1535  memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536  memset(pwol_mask, 0, B44_PMASK_SIZE);
1537  plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538  ETH_HLEN);
1539 
1540  bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1542  bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1544 
1545  /* Ipv6 magic packet pattern - pattern 2 */
1546  memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547  memset(pwol_mask, 0, B44_PMASK_SIZE);
1548  plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1550 
1551  bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1553  bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1555 
1556  kfree(pwol_pattern);
1557 
1558  /* set these pattern's lengths: one less than each real length */
1559  val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560  bw32(bp, B44_WKUP_LEN, val);
1561 
1562  /* enable wakeup pattern matching */
1563  val = br32(bp, B44_DEVCTRL);
1564  bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1565 
1566 }
1567 
1568 #ifdef CONFIG_B44_PCI
1569 static void b44_setup_wol_pci(struct b44 *bp)
1570 {
1571  u16 val;
1572 
1573  if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1574  bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1575  pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1576  pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1577  }
1578 }
1579 #else
1580 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1581 #endif /* CONFIG_B44_PCI */
1582 
1583 static void b44_setup_wol(struct b44 *bp)
1584 {
1585  u32 val;
1586 
1587  bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1588 
1589  if (bp->flags & B44_FLAG_B0_ANDLATER) {
1590 
1591  bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1592 
1593  val = bp->dev->dev_addr[2] << 24 |
1594  bp->dev->dev_addr[3] << 16 |
1595  bp->dev->dev_addr[4] << 8 |
1596  bp->dev->dev_addr[5];
1597  bw32(bp, B44_ADDR_LO, val);
1598 
1599  val = bp->dev->dev_addr[0] << 8 |
1600  bp->dev->dev_addr[1];
1601  bw32(bp, B44_ADDR_HI, val);
1602 
1603  val = br32(bp, B44_DEVCTRL);
1604  bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1605 
1606  } else {
1607  b44_setup_pseudo_magicp(bp);
1608  }
1609  b44_setup_wol_pci(bp);
1610 }
1611 
1612 static int b44_close(struct net_device *dev)
1613 {
1614  struct b44 *bp = netdev_priv(dev);
1615 
1616  netif_stop_queue(dev);
1617 
1618  napi_disable(&bp->napi);
1619 
1620  del_timer_sync(&bp->timer);
1621 
1622  spin_lock_irq(&bp->lock);
1623 
1624  b44_halt(bp);
1625  b44_free_rings(bp);
1626  netif_carrier_off(dev);
1627 
1628  spin_unlock_irq(&bp->lock);
1629 
1630  free_irq(dev->irq, dev);
1631 
1632  if (bp->flags & B44_FLAG_WOL_ENABLE) {
1633  b44_init_hw(bp, B44_PARTIAL_RESET);
1634  b44_setup_wol(bp);
1635  }
1636 
1637  b44_free_consistent(bp);
1638 
1639  return 0;
1640 }
1641 
1642 static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1643  struct rtnl_link_stats64 *nstat)
1644 {
1645  struct b44 *bp = netdev_priv(dev);
1646  struct b44_hw_stats *hwstat = &bp->hw_stats;
1647  unsigned int start;
1648 
1649  do {
1650  start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1651 
1652  /* Convert HW stats into rtnl_link_stats64 stats. */
1653  nstat->rx_packets = hwstat->rx_pkts;
1654  nstat->tx_packets = hwstat->tx_pkts;
1655  nstat->rx_bytes = hwstat->rx_octets;
1656  nstat->tx_bytes = hwstat->tx_octets;
1657  nstat->tx_errors = (hwstat->tx_jabber_pkts +
1658  hwstat->tx_oversize_pkts +
1659  hwstat->tx_underruns +
1660  hwstat->tx_excessive_cols +
1661  hwstat->tx_late_cols);
1662  nstat->multicast = hwstat->tx_multicast_pkts;
1663  nstat->collisions = hwstat->tx_total_cols;
1664 
1665  nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1666  hwstat->rx_undersize);
1667  nstat->rx_over_errors = hwstat->rx_missed_pkts;
1668  nstat->rx_frame_errors = hwstat->rx_align_errs;
1669  nstat->rx_crc_errors = hwstat->rx_crc_errs;
1670  nstat->rx_errors = (hwstat->rx_jabber_pkts +
1671  hwstat->rx_oversize_pkts +
1672  hwstat->rx_missed_pkts +
1673  hwstat->rx_crc_align_errs +
1674  hwstat->rx_undersize +
1675  hwstat->rx_crc_errs +
1676  hwstat->rx_align_errs +
1677  hwstat->rx_symbol_errs);
1678 
1679  nstat->tx_aborted_errors = hwstat->tx_underruns;
1680 #if 0
1681  /* Carrier lost counter seems to be broken for some devices */
1682  nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1683 #endif
1684  } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1685 
1686  return nstat;
1687 }
1688 
1689 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1690 {
1691  struct netdev_hw_addr *ha;
1692  int i, num_ents;
1693 
1694  num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1695  i = 0;
1696  netdev_for_each_mc_addr(ha, dev) {
1697  if (i == num_ents)
1698  break;
1699  __b44_cam_write(bp, ha->addr, i++ + 1);
1700  }
1701  return i+1;
1702 }
1703 
1704 static void __b44_set_rx_mode(struct net_device *dev)
1705 {
1706  struct b44 *bp = netdev_priv(dev);
1707  u32 val;
1708 
1709  val = br32(bp, B44_RXCONFIG);
1710  val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1711  if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1712  val |= RXCONFIG_PROMISC;
1713  bw32(bp, B44_RXCONFIG, val);
1714  } else {
1715  unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1716  int i = 1;
1717 
1718  __b44_set_mac_addr(bp);
1719 
1720  if ((dev->flags & IFF_ALLMULTI) ||
1722  val |= RXCONFIG_ALLMULTI;
1723  else
1724  i = __b44_load_mcast(bp, dev);
1725 
1726  for (; i < 64; i++)
1727  __b44_cam_write(bp, zero, i);
1728 
1729  bw32(bp, B44_RXCONFIG, val);
1730  val = br32(bp, B44_CAM_CTRL);
1731  bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1732  }
1733 }
1734 
1735 static void b44_set_rx_mode(struct net_device *dev)
1736 {
1737  struct b44 *bp = netdev_priv(dev);
1738 
1739  spin_lock_irq(&bp->lock);
1740  __b44_set_rx_mode(dev);
1741  spin_unlock_irq(&bp->lock);
1742 }
1743 
1744 static u32 b44_get_msglevel(struct net_device *dev)
1745 {
1746  struct b44 *bp = netdev_priv(dev);
1747  return bp->msg_enable;
1748 }
1749 
1750 static void b44_set_msglevel(struct net_device *dev, u32 value)
1751 {
1752  struct b44 *bp = netdev_priv(dev);
1753  bp->msg_enable = value;
1754 }
1755 
1756 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1757 {
1758  struct b44 *bp = netdev_priv(dev);
1759  struct ssb_bus *bus = bp->sdev->bus;
1760 
1761  strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1762  strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1763  switch (bus->bustype) {
1764  case SSB_BUSTYPE_PCI:
1765  strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1766  break;
1767  case SSB_BUSTYPE_SSB:
1768  strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1769  break;
1770  case SSB_BUSTYPE_PCMCIA:
1771  case SSB_BUSTYPE_SDIO:
1772  WARN_ON(1); /* A device with this bus does not exist. */
1773  break;
1774  }
1775 }
1776 
1777 static int b44_nway_reset(struct net_device *dev)
1778 {
1779  struct b44 *bp = netdev_priv(dev);
1780  u32 bmcr;
1781  int r;
1782 
1783  spin_lock_irq(&bp->lock);
1784  b44_readphy(bp, MII_BMCR, &bmcr);
1785  b44_readphy(bp, MII_BMCR, &bmcr);
1786  r = -EINVAL;
1787  if (bmcr & BMCR_ANENABLE) {
1788  b44_writephy(bp, MII_BMCR,
1789  bmcr | BMCR_ANRESTART);
1790  r = 0;
1791  }
1792  spin_unlock_irq(&bp->lock);
1793 
1794  return r;
1795 }
1796 
1797 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1798 {
1799  struct b44 *bp = netdev_priv(dev);
1800 
1801  cmd->supported = (SUPPORTED_Autoneg);
1806  SUPPORTED_MII);
1807 
1808  cmd->advertising = 0;
1809  if (bp->flags & B44_FLAG_ADV_10HALF)
1811  if (bp->flags & B44_FLAG_ADV_10FULL)
1813  if (bp->flags & B44_FLAG_ADV_100HALF)
1815  if (bp->flags & B44_FLAG_ADV_100FULL)
1818  ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1819  SPEED_100 : SPEED_10));
1820  cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1822  cmd->port = 0;
1823  cmd->phy_address = bp->phy_addr;
1824  cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1826  cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1828  if (cmd->autoneg == AUTONEG_ENABLE)
1830  if (!netif_running(dev)){
1831  ethtool_cmd_speed_set(cmd, 0);
1832  cmd->duplex = 0xff;
1833  }
1834  cmd->maxtxpkt = 0;
1835  cmd->maxrxpkt = 0;
1836  return 0;
1837 }
1838 
1839 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1840 {
1841  struct b44 *bp = netdev_priv(dev);
1842  u32 speed = ethtool_cmd_speed(cmd);
1843 
1844  /* We do not support gigabit. */
1845  if (cmd->autoneg == AUTONEG_ENABLE) {
1846  if (cmd->advertising &
1849  return -EINVAL;
1850  } else if ((speed != SPEED_100 &&
1851  speed != SPEED_10) ||
1852  (cmd->duplex != DUPLEX_HALF &&
1853  cmd->duplex != DUPLEX_FULL)) {
1854  return -EINVAL;
1855  }
1856 
1857  spin_lock_irq(&bp->lock);
1858 
1859  if (cmd->autoneg == AUTONEG_ENABLE) {
1860  bp->flags &= ~(B44_FLAG_FORCE_LINK |
1867  if (cmd->advertising == 0) {
1868  bp->flags |= (B44_FLAG_ADV_10HALF |
1872  } else {
1874  bp->flags |= B44_FLAG_ADV_10HALF;
1876  bp->flags |= B44_FLAG_ADV_10FULL;
1878  bp->flags |= B44_FLAG_ADV_100HALF;
1880  bp->flags |= B44_FLAG_ADV_100FULL;
1881  }
1882  } else {
1883  bp->flags |= B44_FLAG_FORCE_LINK;
1885  if (speed == SPEED_100)
1886  bp->flags |= B44_FLAG_100_BASE_T;
1887  if (cmd->duplex == DUPLEX_FULL)
1888  bp->flags |= B44_FLAG_FULL_DUPLEX;
1889  }
1890 
1891  if (netif_running(dev))
1892  b44_setup_phy(bp);
1893 
1894  spin_unlock_irq(&bp->lock);
1895 
1896  return 0;
1897 }
1898 
1899 static void b44_get_ringparam(struct net_device *dev,
1900  struct ethtool_ringparam *ering)
1901 {
1902  struct b44 *bp = netdev_priv(dev);
1903 
1904  ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1905  ering->rx_pending = bp->rx_pending;
1906 
1907  /* XXX ethtool lacks a tx_max_pending, oops... */
1908 }
1909 
1910 static int b44_set_ringparam(struct net_device *dev,
1911  struct ethtool_ringparam *ering)
1912 {
1913  struct b44 *bp = netdev_priv(dev);
1914 
1915  if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1916  (ering->rx_mini_pending != 0) ||
1917  (ering->rx_jumbo_pending != 0) ||
1918  (ering->tx_pending > B44_TX_RING_SIZE - 1))
1919  return -EINVAL;
1920 
1921  spin_lock_irq(&bp->lock);
1922 
1923  bp->rx_pending = ering->rx_pending;
1924  bp->tx_pending = ering->tx_pending;
1925 
1926  b44_halt(bp);
1927  b44_init_rings(bp);
1928  b44_init_hw(bp, B44_FULL_RESET);
1929  netif_wake_queue(bp->dev);
1930  spin_unlock_irq(&bp->lock);
1931 
1932  b44_enable_ints(bp);
1933 
1934  return 0;
1935 }
1936 
1937 static void b44_get_pauseparam(struct net_device *dev,
1938  struct ethtool_pauseparam *epause)
1939 {
1940  struct b44 *bp = netdev_priv(dev);
1941 
1942  epause->autoneg =
1943  (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1944  epause->rx_pause =
1945  (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1946  epause->tx_pause =
1947  (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1948 }
1949 
1950 static int b44_set_pauseparam(struct net_device *dev,
1951  struct ethtool_pauseparam *epause)
1952 {
1953  struct b44 *bp = netdev_priv(dev);
1954 
1955  spin_lock_irq(&bp->lock);
1956  if (epause->autoneg)
1957  bp->flags |= B44_FLAG_PAUSE_AUTO;
1958  else
1959  bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1960  if (epause->rx_pause)
1961  bp->flags |= B44_FLAG_RX_PAUSE;
1962  else
1963  bp->flags &= ~B44_FLAG_RX_PAUSE;
1964  if (epause->tx_pause)
1965  bp->flags |= B44_FLAG_TX_PAUSE;
1966  else
1967  bp->flags &= ~B44_FLAG_TX_PAUSE;
1968  if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1969  b44_halt(bp);
1970  b44_init_rings(bp);
1971  b44_init_hw(bp, B44_FULL_RESET);
1972  } else {
1973  __b44_set_flow_ctrl(bp, bp->flags);
1974  }
1975  spin_unlock_irq(&bp->lock);
1976 
1977  b44_enable_ints(bp);
1978 
1979  return 0;
1980 }
1981 
1982 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1983 {
1984  switch(stringset) {
1985  case ETH_SS_STATS:
1986  memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1987  break;
1988  }
1989 }
1990 
1991 static int b44_get_sset_count(struct net_device *dev, int sset)
1992 {
1993  switch (sset) {
1994  case ETH_SS_STATS:
1995  return ARRAY_SIZE(b44_gstrings);
1996  default:
1997  return -EOPNOTSUPP;
1998  }
1999 }
2000 
2001 static void b44_get_ethtool_stats(struct net_device *dev,
2002  struct ethtool_stats *stats, u64 *data)
2003 {
2004  struct b44 *bp = netdev_priv(dev);
2005  struct b44_hw_stats *hwstat = &bp->hw_stats;
2006  u64 *data_src, *data_dst;
2007  unsigned int start;
2008  u32 i;
2009 
2010  spin_lock_irq(&bp->lock);
2011  b44_stats_update(bp);
2012  spin_unlock_irq(&bp->lock);
2013 
2014  do {
2015  data_src = &hwstat->tx_good_octets;
2016  data_dst = data;
2017  start = u64_stats_fetch_begin_bh(&hwstat->syncp);
2018 
2019  for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2020  *data_dst++ = *data_src++;
2021 
2022  } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
2023 }
2024 
2025 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2026 {
2027  struct b44 *bp = netdev_priv(dev);
2028 
2029  wol->supported = WAKE_MAGIC;
2030  if (bp->flags & B44_FLAG_WOL_ENABLE)
2031  wol->wolopts = WAKE_MAGIC;
2032  else
2033  wol->wolopts = 0;
2034  memset(&wol->sopass, 0, sizeof(wol->sopass));
2035 }
2036 
2037 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2038 {
2039  struct b44 *bp = netdev_priv(dev);
2040 
2041  spin_lock_irq(&bp->lock);
2042  if (wol->wolopts & WAKE_MAGIC)
2043  bp->flags |= B44_FLAG_WOL_ENABLE;
2044  else
2045  bp->flags &= ~B44_FLAG_WOL_ENABLE;
2046  spin_unlock_irq(&bp->lock);
2047 
2048  return 0;
2049 }
2050 
2051 static const struct ethtool_ops b44_ethtool_ops = {
2052  .get_drvinfo = b44_get_drvinfo,
2053  .get_settings = b44_get_settings,
2054  .set_settings = b44_set_settings,
2055  .nway_reset = b44_nway_reset,
2056  .get_link = ethtool_op_get_link,
2057  .get_wol = b44_get_wol,
2058  .set_wol = b44_set_wol,
2059  .get_ringparam = b44_get_ringparam,
2060  .set_ringparam = b44_set_ringparam,
2061  .get_pauseparam = b44_get_pauseparam,
2062  .set_pauseparam = b44_set_pauseparam,
2063  .get_msglevel = b44_get_msglevel,
2064  .set_msglevel = b44_set_msglevel,
2065  .get_strings = b44_get_strings,
2066  .get_sset_count = b44_get_sset_count,
2067  .get_ethtool_stats = b44_get_ethtool_stats,
2068 };
2069 
2070 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2071 {
2072  struct mii_ioctl_data *data = if_mii(ifr);
2073  struct b44 *bp = netdev_priv(dev);
2074  int err = -EINVAL;
2075 
2076  if (!netif_running(dev))
2077  goto out;
2078 
2079  spin_lock_irq(&bp->lock);
2080  err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2081  spin_unlock_irq(&bp->lock);
2082 out:
2083  return err;
2084 }
2085 
2086 static int __devinit b44_get_invariants(struct b44 *bp)
2087 {
2088  struct ssb_device *sdev = bp->sdev;
2089  int err = 0;
2090  u8 *addr;
2091 
2092  bp->dma_offset = ssb_dma_translation(sdev);
2093 
2094  if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2095  instance > 1) {
2096  addr = sdev->bus->sprom.et1mac;
2097  bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2098  } else {
2099  addr = sdev->bus->sprom.et0mac;
2100  bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2101  }
2102  /* Some ROMs have buggy PHY addresses with the high
2103  * bits set (sign extension?). Truncate them to a
2104  * valid PHY address. */
2105  bp->phy_addr &= 0x1F;
2106 
2107  memcpy(bp->dev->dev_addr, addr, 6);
2108 
2109  if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2110  pr_err("Invalid MAC address found in EEPROM\n");
2111  return -EINVAL;
2112  }
2113 
2114  memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2115 
2116  bp->imask = IMASK_DEF;
2117 
2118  /* XXX - really required?
2119  bp->flags |= B44_FLAG_BUGGY_TXPTR;
2120  */
2121 
2122  if (bp->sdev->id.revision >= 7)
2123  bp->flags |= B44_FLAG_B0_ANDLATER;
2124 
2125  return err;
2126 }
2127 
2128 static const struct net_device_ops b44_netdev_ops = {
2129  .ndo_open = b44_open,
2130  .ndo_stop = b44_close,
2131  .ndo_start_xmit = b44_start_xmit,
2132  .ndo_get_stats64 = b44_get_stats64,
2133  .ndo_set_rx_mode = b44_set_rx_mode,
2134  .ndo_set_mac_address = b44_set_mac_addr,
2135  .ndo_validate_addr = eth_validate_addr,
2136  .ndo_do_ioctl = b44_ioctl,
2137  .ndo_tx_timeout = b44_tx_timeout,
2138  .ndo_change_mtu = b44_change_mtu,
2139 #ifdef CONFIG_NET_POLL_CONTROLLER
2140  .ndo_poll_controller = b44_poll_controller,
2141 #endif
2142 };
2143 
2144 static int __devinit b44_init_one(struct ssb_device *sdev,
2145  const struct ssb_device_id *ent)
2146 {
2147  struct net_device *dev;
2148  struct b44 *bp;
2149  int err;
2150 
2151  instance++;
2152 
2153  pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2154 
2155  dev = alloc_etherdev(sizeof(*bp));
2156  if (!dev) {
2157  err = -ENOMEM;
2158  goto out;
2159  }
2160 
2161  SET_NETDEV_DEV(dev, sdev->dev);
2162 
2163  /* No interesting netdevice features in this card... */
2164  dev->features |= 0;
2165 
2166  bp = netdev_priv(dev);
2167  bp->sdev = sdev;
2168  bp->dev = dev;
2169  bp->force_copybreak = 0;
2170 
2171  bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2172 
2173  spin_lock_init(&bp->lock);
2174 
2177 
2178  dev->netdev_ops = &b44_netdev_ops;
2179  netif_napi_add(dev, &bp->napi, b44_poll, 64);
2181  dev->irq = sdev->irq;
2182  SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2183 
2184  err = ssb_bus_powerup(sdev->bus, 0);
2185  if (err) {
2186  dev_err(sdev->dev,
2187  "Failed to powerup the bus\n");
2188  goto err_out_free_dev;
2189  }
2190 
2191  if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2193  dev_err(sdev->dev,
2194  "Required 30BIT DMA mask unsupported by the system\n");
2195  goto err_out_powerdown;
2196  }
2197 
2198  err = b44_get_invariants(bp);
2199  if (err) {
2200  dev_err(sdev->dev,
2201  "Problem fetching invariants of chip, aborting\n");
2202  goto err_out_powerdown;
2203  }
2204 
2205  bp->mii_if.dev = dev;
2206  bp->mii_if.mdio_read = b44_mii_read;
2207  bp->mii_if.mdio_write = b44_mii_write;
2208  bp->mii_if.phy_id = bp->phy_addr;
2209  bp->mii_if.phy_id_mask = 0x1f;
2210  bp->mii_if.reg_num_mask = 0x1f;
2211 
2212  /* By default, advertise all speed/duplex settings. */
2215 
2216  /* By default, auto-negotiate PAUSE. */
2217  bp->flags |= B44_FLAG_PAUSE_AUTO;
2218 
2219  err = register_netdev(dev);
2220  if (err) {
2221  dev_err(sdev->dev, "Cannot register net device, aborting\n");
2222  goto err_out_powerdown;
2223  }
2224 
2225  netif_carrier_off(dev);
2226 
2227  ssb_set_drvdata(sdev, dev);
2228 
2229  /* Chip reset provides power to the b44 MAC & PCI cores, which
2230  * is necessary for MAC register access.
2231  */
2232  b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2233 
2234  /* do a phy reset to test if there is an active phy */
2235  if (b44_phy_reset(bp) < 0)
2237 
2238  netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2239 
2240  return 0;
2241 
2242 err_out_powerdown:
2243  ssb_bus_may_powerdown(sdev->bus);
2244 
2245 err_out_free_dev:
2246  free_netdev(dev);
2247 
2248 out:
2249  return err;
2250 }
2251 
2252 static void __devexit b44_remove_one(struct ssb_device *sdev)
2253 {
2254  struct net_device *dev = ssb_get_drvdata(sdev);
2255 
2256  unregister_netdev(dev);
2257  ssb_device_disable(sdev, 0);
2258  ssb_bus_may_powerdown(sdev->bus);
2259  free_netdev(dev);
2260  ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2261  ssb_set_drvdata(sdev, NULL);
2262 }
2263 
2264 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2265 {
2266  struct net_device *dev = ssb_get_drvdata(sdev);
2267  struct b44 *bp = netdev_priv(dev);
2268 
2269  if (!netif_running(dev))
2270  return 0;
2271 
2272  del_timer_sync(&bp->timer);
2273 
2274  spin_lock_irq(&bp->lock);
2275 
2276  b44_halt(bp);
2277  netif_carrier_off(bp->dev);
2278  netif_device_detach(bp->dev);
2279  b44_free_rings(bp);
2280 
2281  spin_unlock_irq(&bp->lock);
2282 
2283  free_irq(dev->irq, dev);
2284  if (bp->flags & B44_FLAG_WOL_ENABLE) {
2285  b44_init_hw(bp, B44_PARTIAL_RESET);
2286  b44_setup_wol(bp);
2287  }
2288 
2289  ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2290  return 0;
2291 }
2292 
2293 static int b44_resume(struct ssb_device *sdev)
2294 {
2295  struct net_device *dev = ssb_get_drvdata(sdev);
2296  struct b44 *bp = netdev_priv(dev);
2297  int rc = 0;
2298 
2299  rc = ssb_bus_powerup(sdev->bus, 0);
2300  if (rc) {
2301  dev_err(sdev->dev,
2302  "Failed to powerup the bus\n");
2303  return rc;
2304  }
2305 
2306  if (!netif_running(dev))
2307  return 0;
2308 
2309  spin_lock_irq(&bp->lock);
2310  b44_init_rings(bp);
2311  b44_init_hw(bp, B44_FULL_RESET);
2312  spin_unlock_irq(&bp->lock);
2313 
2314  /*
2315  * As a shared interrupt, the handler can be called immediately. To be
2316  * able to check the interrupt status the hardware must already be
2317  * powered back on (b44_init_hw).
2318  */
2319  rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2320  if (rc) {
2321  netdev_err(dev, "request_irq failed\n");
2322  spin_lock_irq(&bp->lock);
2323  b44_halt(bp);
2324  b44_free_rings(bp);
2325  spin_unlock_irq(&bp->lock);
2326  return rc;
2327  }
2328 
2329  netif_device_attach(bp->dev);
2330 
2331  b44_enable_ints(bp);
2332  netif_wake_queue(dev);
2333 
2334  mod_timer(&bp->timer, jiffies + 1);
2335 
2336  return 0;
2337 }
2338 
2339 static struct ssb_driver b44_ssb_driver = {
2340  .name = DRV_MODULE_NAME,
2341  .id_table = b44_ssb_tbl,
2342  .probe = b44_init_one,
2343  .remove = __devexit_p(b44_remove_one),
2344  .suspend = b44_suspend,
2345  .resume = b44_resume,
2346 };
2347 
2348 static inline int __init b44_pci_init(void)
2349 {
2350  int err = 0;
2351 #ifdef CONFIG_B44_PCI
2352  err = ssb_pcihost_register(&b44_pci_driver);
2353 #endif
2354  return err;
2355 }
2356 
2357 static inline void b44_pci_exit(void)
2358 {
2359 #ifdef CONFIG_B44_PCI
2360  ssb_pcihost_unregister(&b44_pci_driver);
2361 #endif
2362 }
2363 
2364 static int __init b44_init(void)
2365 {
2366  unsigned int dma_desc_align_size = dma_get_cache_alignment();
2367  int err;
2368 
2369  /* Setup paramaters for syncing RX/TX DMA descriptors */
2370  dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2371 
2372  err = b44_pci_init();
2373  if (err)
2374  return err;
2375  err = ssb_driver_register(&b44_ssb_driver);
2376  if (err)
2377  b44_pci_exit();
2378  return err;
2379 }
2380 
2381 static void __exit b44_cleanup(void)
2382 {
2383  ssb_driver_unregister(&b44_ssb_driver);
2384  b44_pci_exit();
2385 }
2386 
2387 module_init(b44_init);
2388 module_exit(b44_cleanup);
2389