Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2.c
Go to the documentation of this file.
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan ([email protected])
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.2.3"
62 #define DRV_MODULE_RELDATE "June 27, 2012"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73 
74 static char version[] __devinitdata =
75  "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <[email protected]>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93  BCM5706 = 0,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108  char *name;
110  { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111  { "HP NC370T Multifunction Gigabit Server Adapter" },
112  { "HP NC370i Multifunction Gigabit Server Adapter" },
113  { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114  { "HP NC370F Multifunction Gigabit Server Adapter" },
115  { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116  { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117  { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118  { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119  { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120  { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121  };
122 
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142  { PCI_VENDOR_ID_BROADCOM, 0x163b,
143  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144  { PCI_VENDOR_ID_BROADCOM, 0x163c,
145  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146  { 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153  /* Slow EEPROM */
154  {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
157  "EEPROM - slow"},
158  /* Expansion entry 0001 */
159  {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
162  "Entry 0001"},
163  /* Saifun SA25F010 (non-buffered flash) */
164  /* strap, cfg1, & write1 need updates */
165  {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
168  "Non-buffered flash (128kB)"},
169  /* Saifun SA25F020 (non-buffered flash) */
170  /* strap, cfg1, & write1 need updates */
171  {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
174  "Non-buffered flash (256kB)"},
175  /* Expansion entry 0100 */
176  {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
179  "Entry 0100"},
180  /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181  {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
184  "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185  /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186  {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
189  "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190  /* Saifun SA25F005 (non-buffered flash) */
191  /* strap, cfg1, & write1 need updates */
192  {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
195  "Non-buffered flash (64kB)"},
196  /* Fast EEPROM */
197  {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
200  "EEPROM - fast"},
201  /* Expansion entry 1001 */
202  {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
205  "Entry 1001"},
206  /* Expansion entry 1010 */
207  {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
210  "Entry 1010"},
211  /* ATMEL AT45DB011B (buffered flash) */
212  {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
215  "Buffered flash (128kB)"},
216  /* Expansion entry 1100 */
217  {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
220  "Entry 1100"},
221  /* Expansion entry 1101 */
222  {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
225  "Entry 1101"},
226  /* Ateml Expansion entry 1110 */
227  {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
230  "Entry 1110 (Atmel)"},
231  /* ATMEL AT45DB021B (buffered flash) */
232  {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
235  "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239  .flags = BNX2_NV_BUFFERED,
240  .page_bits = BCM5709_FLASH_PAGE_BITS,
241  .page_size = BCM5709_FLASH_PAGE_SIZE,
242  .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243  .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244  .name = "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254  u32 diff;
255 
256  /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257  barrier();
258 
259  /* The ring uses 256 indices for 255 entries, one of them
260  * needs to be skipped.
261  */
262  diff = txr->tx_prod - txr->tx_cons;
263  if (unlikely(diff >= TX_DESC_CNT)) {
264  diff &= 0xffff;
265  if (diff == TX_DESC_CNT)
266  diff = MAX_TX_DESC_CNT;
267  }
268  return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274  u32 val;
275 
276  spin_lock_bh(&bp->indirect_lock);
278  val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279  spin_unlock_bh(&bp->indirect_lock);
280  return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286  spin_lock_bh(&bp->indirect_lock);
288  REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289  spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295  bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301  return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307  offset += cid_addr;
308  spin_lock_bh(&bp->indirect_lock);
309  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310  int i;
311 
312  REG_WR(bp, BNX2_CTX_CTX_DATA, val);
314  offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315  for (i = 0; i < 5; i++) {
316  val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317  if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318  break;
319  udelay(5);
320  }
321  } else {
322  REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323  REG_WR(bp, BNX2_CTX_DATA, val);
324  }
325  spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332  struct bnx2 *bp = netdev_priv(dev);
333  struct drv_ctl_io *io = &info->data.io;
334 
335  switch (info->cmd) {
336  case DRV_CTL_IO_WR_CMD:
337  bnx2_reg_wr_ind(bp, io->offset, io->data);
338  break;
339  case DRV_CTL_IO_RD_CMD:
340  io->data = bnx2_reg_rd_ind(bp, io->offset);
341  break;
342  case DRV_CTL_CTX_WR_CMD:
343  bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344  break;
345  default:
346  return -EINVAL;
347  }
348  return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355  int sb_id;
356 
357  if (bp->flags & BNX2_FLAG_USING_MSIX) {
359  bnapi->cnic_present = 0;
360  sb_id = bp->irq_nvecs;
361  cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362  } else {
364  bnapi->cnic_tag = bnapi->last_status_idx;
365  bnapi->cnic_present = 1;
366  sb_id = 0;
367  cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368  }
369 
370  cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371  cp->irq_arr[0].status_blk = (void *)
372  ((unsigned long) bnapi->status_blk.msi +
373  (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374  cp->irq_arr[0].status_blk_num = sb_id;
375  cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379  void *data)
380 {
381  struct bnx2 *bp = netdev_priv(dev);
382  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384  if (ops == NULL)
385  return -EINVAL;
386 
387  if (cp->drv_state & CNIC_DRV_STATE_REGD)
388  return -EBUSY;
389 
390  if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391  return -ENODEV;
392 
393  bp->cnic_data = data;
394  rcu_assign_pointer(bp->cnic_ops, ops);
395 
396  cp->num_irq = 0;
398 
399  bnx2_setup_cnic_irq_info(bp);
400 
401  return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406  struct bnx2 *bp = netdev_priv(dev);
407  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410  mutex_lock(&bp->cnic_lock);
411  cp->drv_state = 0;
412  bnapi->cnic_present = 0;
413  RCU_INIT_POINTER(bp->cnic_ops, NULL);
414  mutex_unlock(&bp->cnic_lock);
415  synchronize_rcu();
416  return 0;
417 }
418 
419 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421  struct bnx2 *bp = netdev_priv(dev);
422  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424  if (!cp->max_iscsi_conn)
425  return NULL;
426 
427  cp->drv_owner = THIS_MODULE;
428  cp->chip_id = bp->chip_id;
429  cp->pdev = bp->pdev;
430  cp->io_base = bp->regview;
431  cp->drv_ctl = bnx2_drv_ctl;
432  cp->drv_register_cnic = bnx2_register_cnic;
433  cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435  return cp;
436 }
438 
439 static void
440 bnx2_cnic_stop(struct bnx2 *bp)
441 {
442  struct cnic_ops *c_ops;
443  struct cnic_ctl_info info;
444 
445  mutex_lock(&bp->cnic_lock);
446  c_ops = rcu_dereference_protected(bp->cnic_ops,
447  lockdep_is_held(&bp->cnic_lock));
448  if (c_ops) {
449  info.cmd = CNIC_CTL_STOP_CMD;
450  c_ops->cnic_ctl(bp->cnic_data, &info);
451  }
452  mutex_unlock(&bp->cnic_lock);
453 }
454 
455 static void
456 bnx2_cnic_start(struct bnx2 *bp)
457 {
458  struct cnic_ops *c_ops;
459  struct cnic_ctl_info info;
460 
461  mutex_lock(&bp->cnic_lock);
462  c_ops = rcu_dereference_protected(bp->cnic_ops,
463  lockdep_is_held(&bp->cnic_lock));
464  if (c_ops) {
465  if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
466  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467 
468  bnapi->cnic_tag = bnapi->last_status_idx;
469  }
470  info.cmd = CNIC_CTL_START_CMD;
471  c_ops->cnic_ctl(bp->cnic_data, &info);
472  }
473  mutex_unlock(&bp->cnic_lock);
474 }
475 
476 #else
477 
478 static void
479 bnx2_cnic_stop(struct bnx2 *bp)
480 {
481 }
482 
483 static void
484 bnx2_cnic_start(struct bnx2 *bp)
485 {
486 }
487 
488 #endif
489 
490 static int
491 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
492 {
493  u32 val1;
494  int i, ret;
495 
497  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
499 
500  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
502 
503  udelay(40);
504  }
505 
506  val1 = (bp->phy_addr << 21) | (reg << 16) |
509  REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 
511  for (i = 0; i < 50; i++) {
512  udelay(10);
513 
514  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
515  if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516  udelay(5);
517 
518  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
519  val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520 
521  break;
522  }
523  }
524 
525  if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
526  *val = 0x0;
527  ret = -EBUSY;
528  }
529  else {
530  *val = val1;
531  ret = 0;
532  }
533 
535  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
537 
538  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
540 
541  udelay(40);
542  }
543 
544  return ret;
545 }
546 
547 static int
548 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
549 {
550  u32 val1;
551  int i, ret;
552 
554  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
556 
557  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
559 
560  udelay(40);
561  }
562 
563  val1 = (bp->phy_addr << 21) | (reg << 16) | val |
565  BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566  REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 
568  for (i = 0; i < 50; i++) {
569  udelay(10);
570 
571  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
572  if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573  udelay(5);
574  break;
575  }
576  }
577 
578  if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
579  ret = -EBUSY;
580  else
581  ret = 0;
582 
584  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
586 
587  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
589 
590  udelay(40);
591  }
592 
593  return ret;
594 }
595 
596 static void
597 bnx2_disable_int(struct bnx2 *bp)
598 {
599  int i;
600  struct bnx2_napi *bnapi;
601 
602  for (i = 0; i < bp->irq_nvecs; i++) {
603  bnapi = &bp->bnx2_napi[i];
606  }
608 }
609 
610 static void
611 bnx2_enable_int(struct bnx2 *bp)
612 {
613  int i;
614  struct bnx2_napi *bnapi;
615 
616  for (i = 0; i < bp->irq_nvecs; i++) {
617  bnapi = &bp->bnx2_napi[i];
618 
622  bnapi->last_status_idx);
623 
626  bnapi->last_status_idx);
627  }
629 }
630 
631 static void
632 bnx2_disable_int_sync(struct bnx2 *bp)
633 {
634  int i;
635 
636  atomic_inc(&bp->intr_sem);
637  if (!netif_running(bp->dev))
638  return;
639 
640  bnx2_disable_int(bp);
641  for (i = 0; i < bp->irq_nvecs; i++)
642  synchronize_irq(bp->irq_tbl[i].vector);
643 }
644 
645 static void
646 bnx2_napi_disable(struct bnx2 *bp)
647 {
648  int i;
649 
650  for (i = 0; i < bp->irq_nvecs; i++)
651  napi_disable(&bp->bnx2_napi[i].napi);
652 }
653 
654 static void
655 bnx2_napi_enable(struct bnx2 *bp)
656 {
657  int i;
658 
659  for (i = 0; i < bp->irq_nvecs; i++)
660  napi_enable(&bp->bnx2_napi[i].napi);
661 }
662 
663 static void
664 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
665 {
666  if (stop_cnic)
667  bnx2_cnic_stop(bp);
668  if (netif_running(bp->dev)) {
669  bnx2_napi_disable(bp);
670  netif_tx_disable(bp->dev);
671  }
672  bnx2_disable_int_sync(bp);
673  netif_carrier_off(bp->dev); /* prevent tx timeout */
674 }
675 
676 static void
677 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 {
679  if (atomic_dec_and_test(&bp->intr_sem)) {
680  if (netif_running(bp->dev)) {
681  netif_tx_wake_all_queues(bp->dev);
682  spin_lock_bh(&bp->phy_lock);
683  if (bp->link_up)
684  netif_carrier_on(bp->dev);
685  spin_unlock_bh(&bp->phy_lock);
686  bnx2_napi_enable(bp);
687  bnx2_enable_int(bp);
688  if (start_cnic)
689  bnx2_cnic_start(bp);
690  }
691  }
692 }
693 
694 static void
695 bnx2_free_tx_mem(struct bnx2 *bp)
696 {
697  int i;
698 
699  for (i = 0; i < bp->num_tx_rings; i++) {
700  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
701  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702 
703  if (txr->tx_desc_ring) {
705  txr->tx_desc_ring,
706  txr->tx_desc_mapping);
707  txr->tx_desc_ring = NULL;
708  }
709  kfree(txr->tx_buf_ring);
710  txr->tx_buf_ring = NULL;
711  }
712 }
713 
714 static void
715 bnx2_free_rx_mem(struct bnx2 *bp)
716 {
717  int i;
718 
719  for (i = 0; i < bp->num_rx_rings; i++) {
720  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
721  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
722  int j;
723 
724  for (j = 0; j < bp->rx_max_ring; j++) {
725  if (rxr->rx_desc_ring[j])
727  rxr->rx_desc_ring[j],
728  rxr->rx_desc_mapping[j]);
729  rxr->rx_desc_ring[j] = NULL;
730  }
731  vfree(rxr->rx_buf_ring);
732  rxr->rx_buf_ring = NULL;
733 
734  for (j = 0; j < bp->rx_max_pg_ring; j++) {
735  if (rxr->rx_pg_desc_ring[j])
737  rxr->rx_pg_desc_ring[j],
738  rxr->rx_pg_desc_mapping[j]);
739  rxr->rx_pg_desc_ring[j] = NULL;
740  }
741  vfree(rxr->rx_pg_ring);
742  rxr->rx_pg_ring = NULL;
743  }
744 }
745 
746 static int
747 bnx2_alloc_tx_mem(struct bnx2 *bp)
748 {
749  int i;
750 
751  for (i = 0; i < bp->num_tx_rings; i++) {
752  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754 
755  txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
756  if (txr->tx_buf_ring == NULL)
757  return -ENOMEM;
758 
759  txr->tx_desc_ring =
761  &txr->tx_desc_mapping, GFP_KERNEL);
762  if (txr->tx_desc_ring == NULL)
763  return -ENOMEM;
764  }
765  return 0;
766 }
767 
768 static int
769 bnx2_alloc_rx_mem(struct bnx2 *bp)
770 {
771  int i;
772 
773  for (i = 0; i < bp->num_rx_rings; i++) {
774  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
775  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
776  int j;
777 
778  rxr->rx_buf_ring =
780  if (rxr->rx_buf_ring == NULL)
781  return -ENOMEM;
782 
783  for (j = 0; j < bp->rx_max_ring; j++) {
784  rxr->rx_desc_ring[j] =
785  dma_alloc_coherent(&bp->pdev->dev,
787  &rxr->rx_desc_mapping[j],
788  GFP_KERNEL);
789  if (rxr->rx_desc_ring[j] == NULL)
790  return -ENOMEM;
791 
792  }
793 
794  if (bp->rx_pg_ring_size) {
796  bp->rx_max_pg_ring);
797  if (rxr->rx_pg_ring == NULL)
798  return -ENOMEM;
799 
800  }
801 
802  for (j = 0; j < bp->rx_max_pg_ring; j++) {
803  rxr->rx_pg_desc_ring[j] =
804  dma_alloc_coherent(&bp->pdev->dev,
806  &rxr->rx_pg_desc_mapping[j],
807  GFP_KERNEL);
808  if (rxr->rx_pg_desc_ring[j] == NULL)
809  return -ENOMEM;
810 
811  }
812  }
813  return 0;
814 }
815 
816 static void
817 bnx2_free_mem(struct bnx2 *bp)
818 {
819  int i;
820  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821 
822  bnx2_free_tx_mem(bp);
823  bnx2_free_rx_mem(bp);
824 
825  for (i = 0; i < bp->ctx_pages; i++) {
826  if (bp->ctx_blk[i]) {
828  bp->ctx_blk[i],
829  bp->ctx_blk_mapping[i]);
830  bp->ctx_blk[i] = NULL;
831  }
832  }
833  if (bnapi->status_blk.msi) {
835  bnapi->status_blk.msi,
836  bp->status_blk_mapping);
837  bnapi->status_blk.msi = NULL;
838  bp->stats_blk = NULL;
839  }
840 }
841 
842 static int
843 bnx2_alloc_mem(struct bnx2 *bp)
844 {
845  int i, status_blk_size, err;
846  struct bnx2_napi *bnapi;
847  void *status_blk;
848 
849  /* Combine status and statistics blocks into one allocation. */
850  status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
851  if (bp->flags & BNX2_FLAG_MSIX_CAP)
852  status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
854  bp->status_stats_size = status_blk_size +
855  sizeof(struct statistics_block);
856 
857  status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
859  if (status_blk == NULL)
860  goto alloc_mem_err;
861 
862  memset(status_blk, 0, bp->status_stats_size);
863 
864  bnapi = &bp->bnx2_napi[0];
865  bnapi->status_blk.msi = status_blk;
866  bnapi->hw_tx_cons_ptr =
867  &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
868  bnapi->hw_rx_cons_ptr =
869  &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
870  if (bp->flags & BNX2_FLAG_MSIX_CAP) {
871  for (i = 1; i < bp->irq_nvecs; i++) {
872  struct status_block_msix *sblk;
873 
874  bnapi = &bp->bnx2_napi[i];
875 
876  sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877  bnapi->status_blk.msix = sblk;
878  bnapi->hw_tx_cons_ptr =
879  &sblk->status_tx_quick_consumer_index;
880  bnapi->hw_rx_cons_ptr =
881  &sblk->status_rx_quick_consumer_index;
882  bnapi->int_num = i << 24;
883  }
884  }
885 
886  bp->stats_blk = status_blk + status_blk_size;
887 
888  bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 
890  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891  bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892  if (bp->ctx_pages == 0)
893  bp->ctx_pages = 1;
894  for (i = 0; i < bp->ctx_pages; i++) {
895  bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
897  &bp->ctx_blk_mapping[i],
898  GFP_KERNEL);
899  if (bp->ctx_blk[i] == NULL)
900  goto alloc_mem_err;
901  }
902  }
903 
904  err = bnx2_alloc_rx_mem(bp);
905  if (err)
906  goto alloc_mem_err;
907 
908  err = bnx2_alloc_tx_mem(bp);
909  if (err)
910  goto alloc_mem_err;
911 
912  return 0;
913 
914 alloc_mem_err:
915  bnx2_free_mem(bp);
916  return -ENOMEM;
917 }
918 
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922  u32 fw_link_status = 0;
923 
925  return;
926 
927  if (bp->link_up) {
928  u32 bmsr;
929 
930  switch (bp->line_speed) {
931  case SPEED_10:
932  if (bp->duplex == DUPLEX_HALF)
933  fw_link_status = BNX2_LINK_STATUS_10HALF;
934  else
935  fw_link_status = BNX2_LINK_STATUS_10FULL;
936  break;
937  case SPEED_100:
938  if (bp->duplex == DUPLEX_HALF)
939  fw_link_status = BNX2_LINK_STATUS_100HALF;
940  else
941  fw_link_status = BNX2_LINK_STATUS_100FULL;
942  break;
943  case SPEED_1000:
944  if (bp->duplex == DUPLEX_HALF)
945  fw_link_status = BNX2_LINK_STATUS_1000HALF;
946  else
947  fw_link_status = BNX2_LINK_STATUS_1000FULL;
948  break;
949  case SPEED_2500:
950  if (bp->duplex == DUPLEX_HALF)
951  fw_link_status = BNX2_LINK_STATUS_2500HALF;
952  else
953  fw_link_status = BNX2_LINK_STATUS_2500FULL;
954  break;
955  }
956 
957  fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958 
959  if (bp->autoneg) {
960  fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961 
962  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964 
965  if (!(bmsr & BMSR_ANEGCOMPLETE) ||
967  fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968  else
969  fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970  }
971  }
972  else
973  fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974 
975  bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977 
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981  return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982  ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983  "Copper");
984 }
985 
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989  if (bp->link_up) {
990  netif_carrier_on(bp->dev);
991  netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992  bnx2_xceiver_str(bp),
993  bp->line_speed,
994  bp->duplex == DUPLEX_FULL ? "full" : "half");
995 
996  if (bp->flow_ctrl) {
997  if (bp->flow_ctrl & FLOW_CTRL_RX) {
998  pr_cont(", receive ");
999  if (bp->flow_ctrl & FLOW_CTRL_TX)
1000  pr_cont("& transmit ");
1001  }
1002  else {
1003  pr_cont(", transmit ");
1004  }
1005  pr_cont("flow control ON");
1006  }
1007  pr_cont("\n");
1008  } else {
1009  netif_carrier_off(bp->dev);
1010  netdev_err(bp->dev, "NIC %s Link is Down\n",
1011  bnx2_xceiver_str(bp));
1012  }
1013 
1014  bnx2_report_fw_link(bp);
1015 }
1016 
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020  u32 local_adv, remote_adv;
1021 
1022  bp->flow_ctrl = 0;
1023  if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1025 
1026  if (bp->duplex == DUPLEX_FULL) {
1027  bp->flow_ctrl = bp->req_flow_ctrl;
1028  }
1029  return;
1030  }
1031 
1032  if (bp->duplex != DUPLEX_FULL) {
1033  return;
1034  }
1035 
1036  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037  (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038  u32 val;
1039 
1040  bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1042  bp->flow_ctrl |= FLOW_CTRL_TX;
1044  bp->flow_ctrl |= FLOW_CTRL_RX;
1045  return;
1046  }
1047 
1048  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 
1051  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052  u32 new_local_adv = 0;
1053  u32 new_remote_adv = 0;
1054 
1055  if (local_adv & ADVERTISE_1000XPAUSE)
1056  new_local_adv |= ADVERTISE_PAUSE_CAP;
1057  if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058  new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059  if (remote_adv & ADVERTISE_1000XPAUSE)
1060  new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061  if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062  new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063 
1064  local_adv = new_local_adv;
1065  remote_adv = new_remote_adv;
1066  }
1067 
1068  /* See Table 28B-3 of 802.3ab-1999 spec. */
1069  if (local_adv & ADVERTISE_PAUSE_CAP) {
1070  if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071  if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073  }
1074  else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075  bp->flow_ctrl = FLOW_CTRL_RX;
1076  }
1077  }
1078  else {
1079  if (remote_adv & ADVERTISE_PAUSE_CAP) {
1081  }
1082  }
1083  }
1084  else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085  if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086  (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087 
1088  bp->flow_ctrl = FLOW_CTRL_TX;
1089  }
1090  }
1091 }
1092 
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096  u32 val, speed;
1097 
1098  bp->link_up = 1;
1099 
1100  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101  bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1103 
1104  if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105  bp->line_speed = bp->req_line_speed;
1106  bp->duplex = bp->req_duplex;
1107  return 0;
1108  }
1109  speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110  switch (speed) {
1112  bp->line_speed = SPEED_10;
1113  break;
1115  bp->line_speed = SPEED_100;
1116  break;
1119  bp->line_speed = SPEED_1000;
1120  break;
1122  bp->line_speed = SPEED_2500;
1123  break;
1124  }
1125  if (val & MII_BNX2_GP_TOP_AN_FD)
1126  bp->duplex = DUPLEX_FULL;
1127  else
1128  bp->duplex = DUPLEX_HALF;
1129  return 0;
1130 }
1131 
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135  u32 val;
1136 
1137  bp->link_up = 1;
1138  bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139  switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1141  bp->line_speed = SPEED_10;
1142  break;
1144  bp->line_speed = SPEED_100;
1145  break;
1147  bp->line_speed = SPEED_1000;
1148  break;
1150  bp->line_speed = SPEED_2500;
1151  break;
1152  }
1153  if (val & BCM5708S_1000X_STAT1_FD)
1154  bp->duplex = DUPLEX_FULL;
1155  else
1156  bp->duplex = DUPLEX_HALF;
1157 
1158  return 0;
1159 }
1160 
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164  u32 bmcr, local_adv, remote_adv, common;
1165 
1166  bp->link_up = 1;
1167  bp->line_speed = SPEED_1000;
1168 
1169  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170  if (bmcr & BMCR_FULLDPLX) {
1171  bp->duplex = DUPLEX_FULL;
1172  }
1173  else {
1174  bp->duplex = DUPLEX_HALF;
1175  }
1176 
1177  if (!(bmcr & BMCR_ANENABLE)) {
1178  return 0;
1179  }
1180 
1181  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183 
1184  common = local_adv & remote_adv;
1185  if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186 
1187  if (common & ADVERTISE_1000XFULL) {
1188  bp->duplex = DUPLEX_FULL;
1189  }
1190  else {
1191  bp->duplex = DUPLEX_HALF;
1192  }
1193  }
1194 
1195  return 0;
1196 }
1197 
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201  u32 bmcr;
1202 
1203  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204  if (bmcr & BMCR_ANENABLE) {
1205  u32 local_adv, remote_adv, common;
1206 
1207  bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208  bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209 
1210  common = local_adv & (remote_adv >> 2);
1211  if (common & ADVERTISE_1000FULL) {
1212  bp->line_speed = SPEED_1000;
1213  bp->duplex = DUPLEX_FULL;
1214  }
1215  else if (common & ADVERTISE_1000HALF) {
1216  bp->line_speed = SPEED_1000;
1217  bp->duplex = DUPLEX_HALF;
1218  }
1219  else {
1220  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222 
1223  common = local_adv & remote_adv;
1224  if (common & ADVERTISE_100FULL) {
1225  bp->line_speed = SPEED_100;
1226  bp->duplex = DUPLEX_FULL;
1227  }
1228  else if (common & ADVERTISE_100HALF) {
1229  bp->line_speed = SPEED_100;
1230  bp->duplex = DUPLEX_HALF;
1231  }
1232  else if (common & ADVERTISE_10FULL) {
1233  bp->line_speed = SPEED_10;
1234  bp->duplex = DUPLEX_FULL;
1235  }
1236  else if (common & ADVERTISE_10HALF) {
1237  bp->line_speed = SPEED_10;
1238  bp->duplex = DUPLEX_HALF;
1239  }
1240  else {
1241  bp->line_speed = 0;
1242  bp->link_up = 0;
1243  }
1244  }
1245  }
1246  else {
1247  if (bmcr & BMCR_SPEED100) {
1248  bp->line_speed = SPEED_100;
1249  }
1250  else {
1251  bp->line_speed = SPEED_10;
1252  }
1253  if (bmcr & BMCR_FULLDPLX) {
1254  bp->duplex = DUPLEX_FULL;
1255  }
1256  else {
1257  bp->duplex = DUPLEX_HALF;
1258  }
1259  }
1260 
1261  return 0;
1262 }
1263 
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267  u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268 
1271  val |= 0x02 << 8;
1272 
1273  if (bp->flow_ctrl & FLOW_CTRL_TX)
1275 
1276  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278 
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282  int i;
1283  u32 cid;
1284 
1285  for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286  if (i == 1)
1287  cid = RX_RSS_CID;
1288  bnx2_init_rx_context(bp, cid);
1289  }
1290 }
1291 
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295  u32 val;
1296 
1297  REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298  if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299  (bp->duplex == DUPLEX_HALF)) {
1300  REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301  }
1302 
1303  /* Configure the EMAC mode register. */
1304  val = REG_RD(bp, BNX2_EMAC_MODE);
1305 
1309 
1310  if (bp->link_up) {
1311  switch (bp->line_speed) {
1312  case SPEED_10:
1313  if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1315  break;
1316  }
1317  /* fall through */
1318  case SPEED_100:
1319  val |= BNX2_EMAC_MODE_PORT_MII;
1320  break;
1321  case SPEED_2500:
1322  val |= BNX2_EMAC_MODE_25G_MODE;
1323  /* fall through */
1324  case SPEED_1000:
1325  val |= BNX2_EMAC_MODE_PORT_GMII;
1326  break;
1327  }
1328  }
1329  else {
1330  val |= BNX2_EMAC_MODE_PORT_GMII;
1331  }
1332 
1333  /* Set the MAC to operate in the appropriate duplex mode. */
1334  if (bp->duplex == DUPLEX_HALF)
1336  REG_WR(bp, BNX2_EMAC_MODE, val);
1337 
1338  /* Enable/disable rx PAUSE. */
1340 
1341  if (bp->flow_ctrl & FLOW_CTRL_RX)
1343  REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 
1345  /* Enable/disable tx PAUSE. */
1346  val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347  val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 
1349  if (bp->flow_ctrl & FLOW_CTRL_TX)
1351  REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 
1353  /* Acknowledge the interrupt. */
1355 
1356  bnx2_init_all_rx_contexts(bp);
1357 }
1358 
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363  (CHIP_NUM(bp) == CHIP_NUM_5709))
1364  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1366 }
1367 
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372  (CHIP_NUM(bp) == CHIP_NUM_5709))
1373  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1375 }
1376 
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380  u32 up1;
1381  int ret = 1;
1382 
1384  return 0;
1385 
1386  if (bp->autoneg & AUTONEG_SPEED)
1388 
1389  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 
1392  bnx2_read_phy(bp, bp->mii_up1, &up1);
1393  if (!(up1 & BCM5708S_UP1_2G5)) {
1394  up1 |= BCM5708S_UP1_2G5;
1395  bnx2_write_phy(bp, bp->mii_up1, up1);
1396  ret = 0;
1397  }
1398 
1399  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1402 
1403  return ret;
1404 }
1405 
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409  u32 up1;
1410  int ret = 0;
1411 
1413  return 0;
1414 
1415  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 
1418  bnx2_read_phy(bp, bp->mii_up1, &up1);
1419  if (up1 & BCM5708S_UP1_2G5) {
1420  up1 &= ~BCM5708S_UP1_2G5;
1421  bnx2_write_phy(bp, bp->mii_up1, up1);
1422  ret = 1;
1423  }
1424 
1425  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1428 
1429  return ret;
1430 }
1431 
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435  u32 uninitialized_var(bmcr);
1436  int err;
1437 
1439  return;
1440 
1441  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442  u32 val;
1443 
1444  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1446  if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1448  val |= MII_BNX2_SD_MISC1_FORCE |
1450  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451  }
1452 
1453  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1455  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 
1457  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459  if (!err)
1460  bmcr |= BCM5708S_BMCR_FORCE_2500;
1461  } else {
1462  return;
1463  }
1464 
1465  if (err)
1466  return;
1467 
1468  if (bp->autoneg & AUTONEG_SPEED) {
1469  bmcr &= ~BMCR_ANENABLE;
1470  if (bp->req_duplex == DUPLEX_FULL)
1471  bmcr |= BMCR_FULLDPLX;
1472  }
1473  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475 
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479  u32 uninitialized_var(bmcr);
1480  int err;
1481 
1483  return;
1484 
1485  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486  u32 val;
1487 
1488  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1490  if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491  val &= ~MII_BNX2_SD_MISC1_FORCE;
1492  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493  }
1494 
1495  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1497  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 
1499  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501  if (!err)
1502  bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503  } else {
1504  return;
1505  }
1506 
1507  if (err)
1508  return;
1509 
1510  if (bp->autoneg & AUTONEG_SPEED)
1511  bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514 
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518  u32 val;
1519 
1520  bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522  if (start)
1523  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524  else
1525  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527 
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531  u32 bmsr;
1532  u8 link_up;
1533 
1534  if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535  bp->link_up = 1;
1536  return 0;
1537  }
1538 
1540  return 0;
1541 
1542  link_up = bp->link_up;
1543 
1544  bnx2_enable_bmsr1(bp);
1545  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547  bnx2_disable_bmsr1(bp);
1548 
1549  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550  (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551  u32 val, an_dbg;
1552 
1554  bnx2_5706s_force_link_dn(bp, 0);
1556  }
1557  val = REG_RD(bp, BNX2_EMAC_STATUS);
1558 
1559  bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562 
1563  if ((val & BNX2_EMAC_STATUS_LINK) &&
1564  !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565  bmsr |= BMSR_LSTATUS;
1566  else
1567  bmsr &= ~BMSR_LSTATUS;
1568  }
1569 
1570  if (bmsr & BMSR_LSTATUS) {
1571  bp->link_up = 1;
1572 
1573  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574  if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575  bnx2_5706s_linkup(bp);
1576  else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577  bnx2_5708s_linkup(bp);
1578  else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579  bnx2_5709s_linkup(bp);
1580  }
1581  else {
1582  bnx2_copper_linkup(bp);
1583  }
1584  bnx2_resolve_flow_ctrl(bp);
1585  }
1586  else {
1587  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588  (bp->autoneg & AUTONEG_SPEED))
1589  bnx2_disable_forced_2g5(bp);
1590 
1592  u32 bmcr;
1593 
1594  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595  bmcr |= BMCR_ANENABLE;
1596  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597 
1599  }
1600  bp->link_up = 0;
1601  }
1602 
1603  if (bp->link_up != link_up) {
1604  bnx2_report_link(bp);
1605  }
1606 
1607  bnx2_set_mac_link(bp);
1608 
1609  return 0;
1610 }
1611 
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615  int i;
1616  u32 reg;
1617 
1618  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619 
1620 #define PHY_RESET_MAX_WAIT 100
1621  for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622  udelay(10);
1623 
1624  bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625  if (!(reg & BMCR_RESET)) {
1626  udelay(20);
1627  break;
1628  }
1629  }
1630  if (i == PHY_RESET_MAX_WAIT) {
1631  return -EBUSY;
1632  }
1633  return 0;
1634 }
1635 
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639  u32 adv = 0;
1640 
1641  if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642  (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643 
1644  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645  adv = ADVERTISE_1000XPAUSE;
1646  }
1647  else {
1648  adv = ADVERTISE_PAUSE_CAP;
1649  }
1650  }
1651  else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654  }
1655  else {
1656  adv = ADVERTISE_PAUSE_ASYM;
1657  }
1658  }
1659  else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661  adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662  }
1663  else {
1664  adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665  }
1666  }
1667  return adv;
1668 }
1669 
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671 
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677  u32 speed_arg = 0, pause_adv;
1678 
1679  pause_adv = bnx2_phy_get_pause_adv(bp);
1680 
1681  if (bp->autoneg & AUTONEG_SPEED) {
1695  } else {
1696  if (bp->req_line_speed == SPEED_2500)
1698  else if (bp->req_line_speed == SPEED_1000)
1700  else if (bp->req_line_speed == SPEED_100) {
1701  if (bp->req_duplex == DUPLEX_FULL)
1703  else
1705  } else if (bp->req_line_speed == SPEED_10) {
1706  if (bp->req_duplex == DUPLEX_FULL)
1708  else
1710  }
1711  }
1712 
1713  if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1715  if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1717 
1718  if (port == PORT_TP)
1721 
1722  bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723 
1724  spin_unlock_bh(&bp->phy_lock);
1725  bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726  spin_lock_bh(&bp->phy_lock);
1727 
1728  return 0;
1729 }
1730 
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736  u32 adv, bmcr;
1737  u32 new_adv = 0;
1738 
1740  return bnx2_setup_remote_phy(bp, port);
1741 
1742  if (!(bp->autoneg & AUTONEG_SPEED)) {
1743  u32 new_bmcr;
1744  int force_link_down = 0;
1745 
1746  if (bp->req_line_speed == SPEED_2500) {
1747  if (!bnx2_test_and_enable_2g5(bp))
1748  force_link_down = 1;
1749  } else if (bp->req_line_speed == SPEED_1000) {
1750  if (bnx2_test_and_disable_2g5(bp))
1751  force_link_down = 1;
1752  }
1753  bnx2_read_phy(bp, bp->mii_adv, &adv);
1755 
1756  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757  new_bmcr = bmcr & ~BMCR_ANENABLE;
1758  new_bmcr |= BMCR_SPEED1000;
1759 
1760  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761  if (bp->req_line_speed == SPEED_2500)
1762  bnx2_enable_forced_2g5(bp);
1763  else if (bp->req_line_speed == SPEED_1000) {
1764  bnx2_disable_forced_2g5(bp);
1765  new_bmcr &= ~0x2000;
1766  }
1767 
1768  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769  if (bp->req_line_speed == SPEED_2500)
1770  new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771  else
1772  new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773  }
1774 
1775  if (bp->req_duplex == DUPLEX_FULL) {
1776  adv |= ADVERTISE_1000XFULL;
1777  new_bmcr |= BMCR_FULLDPLX;
1778  }
1779  else {
1780  adv |= ADVERTISE_1000XHALF;
1781  new_bmcr &= ~BMCR_FULLDPLX;
1782  }
1783  if ((new_bmcr != bmcr) || (force_link_down)) {
1784  /* Force a link down visible on the other side */
1785  if (bp->link_up) {
1786  bnx2_write_phy(bp, bp->mii_adv, adv &
1789  bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790  BMCR_ANRESTART | BMCR_ANENABLE);
1791 
1792  bp->link_up = 0;
1793  netif_carrier_off(bp->dev);
1794  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795  bnx2_report_link(bp);
1796  }
1797  bnx2_write_phy(bp, bp->mii_adv, adv);
1798  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799  } else {
1800  bnx2_resolve_flow_ctrl(bp);
1801  bnx2_set_mac_link(bp);
1802  }
1803  return 0;
1804  }
1805 
1806  bnx2_test_and_enable_2g5(bp);
1807 
1809  new_adv |= ADVERTISE_1000XFULL;
1810 
1811  new_adv |= bnx2_phy_get_pause_adv(bp);
1812 
1813  bnx2_read_phy(bp, bp->mii_adv, &adv);
1814  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815 
1816  bp->serdes_an_pending = 0;
1817  if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818  /* Force a link down visible on the other side */
1819  if (bp->link_up) {
1820  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821  spin_unlock_bh(&bp->phy_lock);
1822  msleep(20);
1823  spin_lock_bh(&bp->phy_lock);
1824  }
1825 
1826  bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827  bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828  BMCR_ANENABLE);
1829  /* Speed up link-up time when the link partner
1830  * does not autonegotiate which is very common
1831  * in blade servers. Some blade servers use
1832  * IPMI for kerboard input and it's important
1833  * to minimize link disruptions. Autoneg. involves
1834  * exchanging base pages plus 3 next pages and
1835  * normally completes in about 120 msec.
1836  */
1838  bp->serdes_an_pending = 1;
1839  mod_timer(&bp->timer, jiffies + bp->current_interval);
1840  } else {
1841  bnx2_resolve_flow_ctrl(bp);
1842  bnx2_set_mac_link(bp);
1843  }
1844 
1845  return 0;
1846 }
1847 
1848 #define ETHTOOL_ALL_FIBRE_SPEED \
1849  (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1850  (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851  (ADVERTISED_1000baseT_Full)
1852 
1853 #define ETHTOOL_ALL_COPPER_SPEED \
1854  (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1855  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1856  ADVERTISED_1000baseT_Full)
1857 
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859  ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860 
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862 
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866  u32 link;
1867 
1868  if (bp->phy_port == PORT_TP)
1869  link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870  else
1871  link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872 
1874  bp->req_line_speed = 0;
1875  bp->autoneg |= AUTONEG_SPEED;
1889  } else {
1890  bp->autoneg = 0;
1891  bp->advertising = 0;
1892  bp->req_duplex = DUPLEX_FULL;
1893  if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894  bp->req_line_speed = SPEED_10;
1896  bp->req_duplex = DUPLEX_HALF;
1897  }
1898  if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899  bp->req_line_speed = SPEED_100;
1901  bp->req_duplex = DUPLEX_HALF;
1902  }
1904  bp->req_line_speed = SPEED_1000;
1906  bp->req_line_speed = SPEED_2500;
1907  }
1908 }
1909 
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1914  bnx2_set_default_remote_link(bp);
1915  return;
1916  }
1917 
1919  bp->req_line_speed = 0;
1920  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921  u32 reg;
1922 
1924 
1925  reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1927  if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928  bp->autoneg = 0;
1929  bp->req_line_speed = bp->line_speed = SPEED_1000;
1930  bp->req_duplex = DUPLEX_FULL;
1931  }
1932  } else
1934 }
1935 
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939  u32 msg;
1940  u32 addr;
1941 
1942  spin_lock(&bp->indirect_lock);
1944  addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1946  REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947  spin_unlock(&bp->indirect_lock);
1948 }
1949 
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953  u32 msg;
1954  u8 link_up = bp->link_up;
1955  u8 old_port;
1956 
1957  msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958 
1960  bnx2_send_heart_beat(bp);
1961 
1962  msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963 
1965  bp->link_up = 0;
1966  else {
1967  u32 speed;
1968 
1969  bp->link_up = 1;
1970  speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971  bp->duplex = DUPLEX_FULL;
1972  switch (speed) {
1974  bp->duplex = DUPLEX_HALF;
1975  /* fall through */
1977  bp->line_speed = SPEED_10;
1978  break;
1980  bp->duplex = DUPLEX_HALF;
1981  /* fall through */
1984  bp->line_speed = SPEED_100;
1985  break;
1987  bp->duplex = DUPLEX_HALF;
1988  /* fall through */
1990  bp->line_speed = SPEED_1000;
1991  break;
1993  bp->duplex = DUPLEX_HALF;
1994  /* fall through */
1996  bp->line_speed = SPEED_2500;
1997  break;
1998  default:
1999  bp->line_speed = 0;
2000  break;
2001  }
2002 
2003  bp->flow_ctrl = 0;
2004  if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2006  if (bp->duplex == DUPLEX_FULL)
2007  bp->flow_ctrl = bp->req_flow_ctrl;
2008  } else {
2010  bp->flow_ctrl |= FLOW_CTRL_TX;
2012  bp->flow_ctrl |= FLOW_CTRL_RX;
2013  }
2014 
2015  old_port = bp->phy_port;
2016  if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017  bp->phy_port = PORT_FIBRE;
2018  else
2019  bp->phy_port = PORT_TP;
2020 
2021  if (old_port != bp->phy_port)
2022  bnx2_set_default_link(bp);
2023 
2024  }
2025  if (bp->link_up != link_up)
2026  bnx2_report_link(bp);
2027 
2028  bnx2_set_mac_link(bp);
2029 }
2030 
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034  u32 evt_code;
2035 
2036  evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037  switch (evt_code) {
2039  bnx2_remote_phy_event(bp);
2040  break;
2042  default:
2043  bnx2_send_heart_beat(bp);
2044  break;
2045  }
2046  return 0;
2047 }
2048 
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054  u32 bmcr;
2055  u32 new_bmcr;
2056 
2057  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058 
2059  if (bp->autoneg & AUTONEG_SPEED) {
2060  u32 adv_reg, adv1000_reg;
2061  u32 new_adv = 0;
2062  u32 new_adv1000 = 0;
2063 
2064  bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065  adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2067 
2068  bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069  adv1000_reg &= PHY_ALL_1000_SPEED;
2070 
2071  new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2072  new_adv |= ADVERTISE_CSMA;
2073  new_adv |= bnx2_phy_get_pause_adv(bp);
2074 
2075  new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2076 
2077  if ((adv1000_reg != new_adv1000) ||
2078  (adv_reg != new_adv) ||
2079  ((bmcr & BMCR_ANENABLE) == 0)) {
2080 
2081  bnx2_write_phy(bp, bp->mii_adv, new_adv);
2082  bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2083  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2084  BMCR_ANENABLE);
2085  }
2086  else if (bp->link_up) {
2087  /* Flow ctrl may have changed from auto to forced */
2088  /* or vice-versa. */
2089 
2090  bnx2_resolve_flow_ctrl(bp);
2091  bnx2_set_mac_link(bp);
2092  }
2093  return 0;
2094  }
2095 
2096  new_bmcr = 0;
2097  if (bp->req_line_speed == SPEED_100) {
2098  new_bmcr |= BMCR_SPEED100;
2099  }
2100  if (bp->req_duplex == DUPLEX_FULL) {
2101  new_bmcr |= BMCR_FULLDPLX;
2102  }
2103  if (new_bmcr != bmcr) {
2104  u32 bmsr;
2105 
2106  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108 
2109  if (bmsr & BMSR_LSTATUS) {
2110  /* Force link down */
2111  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2112  spin_unlock_bh(&bp->phy_lock);
2113  msleep(50);
2114  spin_lock_bh(&bp->phy_lock);
2115 
2116  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118  }
2119 
2120  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2121 
2122  /* Normally, the new speed is setup after the link has
2123  * gone down and up again. In some cases, link will not go
2124  * down so we need to set up the new speed here.
2125  */
2126  if (bmsr & BMSR_LSTATUS) {
2127  bp->line_speed = bp->req_line_speed;
2128  bp->duplex = bp->req_duplex;
2129  bnx2_resolve_flow_ctrl(bp);
2130  bnx2_set_mac_link(bp);
2131  }
2132  } else {
2133  bnx2_resolve_flow_ctrl(bp);
2134  bnx2_set_mac_link(bp);
2135  }
2136  return 0;
2137 }
2138 
2139 static int
2140 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2141 __releases(&bp->phy_lock)
2142 __acquires(&bp->phy_lock)
2143 {
2144  if (bp->loopback == MAC_LOOPBACK)
2145  return 0;
2146 
2147  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2148  return bnx2_setup_serdes_phy(bp, port);
2149  }
2150  else {
2151  return bnx2_setup_copper_phy(bp);
2152  }
2153 }
2154 
2155 static int
2156 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157 {
2158  u32 val;
2159 
2160  bp->mii_bmcr = MII_BMCR + 0x10;
2161  bp->mii_bmsr = MII_BMSR + 0x10;
2163  bp->mii_adv = MII_ADVERTISE + 0x10;
2164  bp->mii_lpa = MII_LPA + 0x10;
2166 
2167  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2168  bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2169 
2171  if (reset_phy)
2172  bnx2_reset_phy(bp);
2173 
2174  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2175 
2176  bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2179  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2180 
2181  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2182  bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2184  val |= BCM5708S_UP1_2G5;
2185  else
2186  val &= ~BCM5708S_UP1_2G5;
2187  bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2188 
2189  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2190  bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2192  bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2193 
2194  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2195 
2198  bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2199 
2201 
2202  return 0;
2203 }
2204 
2205 static int
2206 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207 {
2208  u32 val;
2209 
2210  if (reset_phy)
2211  bnx2_reset_phy(bp);
2212 
2213  bp->mii_up1 = BCM5708S_UP1;
2214 
2215  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2216  bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2217  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2218 
2219  bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2221  bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2222 
2223  bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2225  bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2226 
2228  bnx2_read_phy(bp, BCM5708S_UP1, &val);
2229  val |= BCM5708S_UP1_2G5;
2230  bnx2_write_phy(bp, BCM5708S_UP1, val);
2231  }
2232 
2233  if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2234  (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2235  (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2236  /* increase tx signal amplitude */
2237  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2239  bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2241  bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2242  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243  }
2244 
2245  val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2247 
2248  if (val) {
2249  u32 is_backplane;
2250 
2251  is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2252  if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2253  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255  bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2256  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258  }
2259  }
2260  return 0;
2261 }
2262 
2263 static int
2264 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2265 {
2266  if (reset_phy)
2267  bnx2_reset_phy(bp);
2268 
2270 
2271  if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272  REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273 
2274  if (bp->dev->mtu > 1500) {
2275  u32 val;
2276 
2277  /* Set extended packet length bit */
2278  bnx2_write_phy(bp, 0x18, 0x7);
2279  bnx2_read_phy(bp, 0x18, &val);
2280  bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2281 
2282  bnx2_write_phy(bp, 0x1c, 0x6c00);
2283  bnx2_read_phy(bp, 0x1c, &val);
2284  bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2285  }
2286  else {
2287  u32 val;
2288 
2289  bnx2_write_phy(bp, 0x18, 0x7);
2290  bnx2_read_phy(bp, 0x18, &val);
2291  bnx2_write_phy(bp, 0x18, val & ~0x4007);
2292 
2293  bnx2_write_phy(bp, 0x1c, 0x6c00);
2294  bnx2_read_phy(bp, 0x1c, &val);
2295  bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296  }
2297 
2298  return 0;
2299 }
2300 
2301 static int
2302 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303 {
2304  u32 val;
2305 
2306  if (reset_phy)
2307  bnx2_reset_phy(bp);
2308 
2309  if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2310  bnx2_write_phy(bp, 0x18, 0x0c00);
2311  bnx2_write_phy(bp, 0x17, 0x000a);
2312  bnx2_write_phy(bp, 0x15, 0x310b);
2313  bnx2_write_phy(bp, 0x17, 0x201f);
2314  bnx2_write_phy(bp, 0x15, 0x9506);
2315  bnx2_write_phy(bp, 0x17, 0x401f);
2316  bnx2_write_phy(bp, 0x15, 0x14e2);
2317  bnx2_write_phy(bp, 0x18, 0x0400);
2318  }
2319 
2321  bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2322  MII_BNX2_DSP_EXPAND_REG | 0x8);
2323  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2324  val &= ~(1 << 8);
2325  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326  }
2327 
2328  if (bp->dev->mtu > 1500) {
2329  /* Set extended packet length bit */
2330  bnx2_write_phy(bp, 0x18, 0x7);
2331  bnx2_read_phy(bp, 0x18, &val);
2332  bnx2_write_phy(bp, 0x18, val | 0x4000);
2333 
2334  bnx2_read_phy(bp, 0x10, &val);
2335  bnx2_write_phy(bp, 0x10, val | 0x1);
2336  }
2337  else {
2338  bnx2_write_phy(bp, 0x18, 0x7);
2339  bnx2_read_phy(bp, 0x18, &val);
2340  bnx2_write_phy(bp, 0x18, val & ~0x4007);
2341 
2342  bnx2_read_phy(bp, 0x10, &val);
2343  bnx2_write_phy(bp, 0x10, val & ~0x1);
2344  }
2345 
2346  /* ethernet@wirespeed */
2347  bnx2_write_phy(bp, 0x18, 0x7007);
2348  bnx2_read_phy(bp, 0x18, &val);
2349  bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350  return 0;
2351 }
2352 
2353 
2354 static int
2355 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2356 __releases(&bp->phy_lock)
2357 __acquires(&bp->phy_lock)
2358 {
2359  u32 val;
2360  int rc = 0;
2361 
2364 
2365  bp->mii_bmcr = MII_BMCR;
2366  bp->mii_bmsr = MII_BMSR;
2367  bp->mii_bmsr1 = MII_BMSR;
2368  bp->mii_adv = MII_ADVERTISE;
2369  bp->mii_lpa = MII_LPA;
2370 
2372 
2374  goto setup_phy;
2375 
2376  bnx2_read_phy(bp, MII_PHYSID1, &val);
2377  bp->phy_id = val << 16;
2378  bnx2_read_phy(bp, MII_PHYSID2, &val);
2379  bp->phy_id |= val & 0xffff;
2380 
2381  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382  if (CHIP_NUM(bp) == CHIP_NUM_5706)
2383  rc = bnx2_init_5706s_phy(bp, reset_phy);
2384  else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2385  rc = bnx2_init_5708s_phy(bp, reset_phy);
2386  else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2387  rc = bnx2_init_5709s_phy(bp, reset_phy);
2388  }
2389  else {
2390  rc = bnx2_init_copper_phy(bp, reset_phy);
2391  }
2392 
2393 setup_phy:
2394  if (!rc)
2395  rc = bnx2_setup_phy(bp, bp->phy_port);
2396 
2397  return rc;
2398 }
2399 
2400 static int
2401 bnx2_set_mac_loopback(struct bnx2 *bp)
2402 {
2403  u32 mac_mode;
2404 
2405  mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2406  mac_mode &= ~BNX2_EMAC_MODE_PORT;
2408  REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409  bp->link_up = 1;
2410  return 0;
2411 }
2412 
2413 static int bnx2_test_link(struct bnx2 *);
2414 
2415 static int
2416 bnx2_set_phy_loopback(struct bnx2 *bp)
2417 {
2418  u32 mac_mode;
2419  int rc, i;
2420 
2421  spin_lock_bh(&bp->phy_lock);
2422  rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2423  BMCR_SPEED1000);
2424  spin_unlock_bh(&bp->phy_lock);
2425  if (rc)
2426  return rc;
2427 
2428  for (i = 0; i < 10; i++) {
2429  if (bnx2_test_link(bp) == 0)
2430  break;
2431  msleep(100);
2432  }
2433 
2434  mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2438 
2439  mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440  REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441  bp->link_up = 1;
2442  return 0;
2443 }
2444 
2445 static void
2446 bnx2_dump_mcp_state(struct bnx2 *bp)
2447 {
2448  struct net_device *dev = bp->dev;
2449  u32 mcp_p0, mcp_p1;
2450 
2451  netdev_err(dev, "<--- start MCP states dump --->\n");
2452  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2453  mcp_p0 = BNX2_MCP_STATE_P0;
2454  mcp_p1 = BNX2_MCP_STATE_P1;
2455  } else {
2456  mcp_p0 = BNX2_MCP_STATE_P0_5708;
2457  mcp_p1 = BNX2_MCP_STATE_P1_5708;
2458  }
2459  netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2460  bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2461  netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2462  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2463  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2464  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2465  netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2466  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2467  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2468  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2469  netdev_err(dev, "DEBUG: shmem states:\n");
2470  netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2471  bnx2_shmem_rd(bp, BNX2_DRV_MB),
2472  bnx2_shmem_rd(bp, BNX2_FW_MB),
2473  bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2474  pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2475  netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2476  bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2477  bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2478  pr_cont(" condition[%08x]\n",
2479  bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2481  DP_SHMEM_LINE(bp, 0x3cc);
2482  DP_SHMEM_LINE(bp, 0x3dc);
2483  DP_SHMEM_LINE(bp, 0x3ec);
2484  netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2485  netdev_err(dev, "<--- end MCP states dump --->\n");
2486 }
2487 
2488 static int
2489 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2490 {
2491  int i;
2492  u32 val;
2493 
2494  bp->fw_wr_seq++;
2495  msg_data |= bp->fw_wr_seq;
2496 
2497  bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2498 
2499  if (!ack)
2500  return 0;
2501 
2502  /* wait for an acknowledgement. */
2503  for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2504  msleep(10);
2505 
2506  val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2507 
2508  if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2509  break;
2510  }
2511  if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2512  return 0;
2513 
2514  /* If we timed out, inform the firmware that this is the case. */
2515  if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2516  msg_data &= ~BNX2_DRV_MSG_CODE;
2517  msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2518 
2519  bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2520  if (!silent) {
2521  pr_err("fw sync timeout, reset code = %x\n", msg_data);
2522  bnx2_dump_mcp_state(bp);
2523  }
2524 
2525  return -EBUSY;
2526  }
2527 
2529  return -EIO;
2530 
2531  return 0;
2532 }
2533 
2534 static int
2535 bnx2_init_5709_context(struct bnx2 *bp)
2536 {
2537  int i, ret = 0;
2538  u32 val;
2539 
2541  val |= (BCM_PAGE_BITS - 8) << 16;
2542  REG_WR(bp, BNX2_CTX_COMMAND, val);
2543  for (i = 0; i < 10; i++) {
2544  val = REG_RD(bp, BNX2_CTX_COMMAND);
2545  if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546  break;
2547  udelay(2);
2548  }
2549  if (val & BNX2_CTX_COMMAND_MEM_INIT)
2550  return -EBUSY;
2551 
2552  for (i = 0; i < bp->ctx_pages; i++) {
2553  int j;
2554 
2555  if (bp->ctx_blk[i])
2556  memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2557  else
2558  return -ENOMEM;
2559 
2561  (bp->ctx_blk_mapping[i] & 0xffffffff) |
2564  (u64) bp->ctx_blk_mapping[i] >> 32);
2567  for (j = 0; j < 10; j++) {
2568 
2571  break;
2572  udelay(5);
2573  }
2575  ret = -EBUSY;
2576  break;
2577  }
2578  }
2579  return ret;
2580 }
2581 
2582 static void
2583 bnx2_init_context(struct bnx2 *bp)
2584 {
2585  u32 vcid;
2586 
2587  vcid = 96;
2588  while (vcid) {
2589  u32 vcid_addr, pcid_addr, offset;
2590  int i;
2591 
2592  vcid--;
2593 
2594  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2595  u32 new_vcid;
2596 
2597  vcid_addr = GET_PCID_ADDR(vcid);
2598  if (vcid & 0x8) {
2599  new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2600  }
2601  else {
2602  new_vcid = vcid;
2603  }
2604  pcid_addr = GET_PCID_ADDR(new_vcid);
2605  }
2606  else {
2607  vcid_addr = GET_CID_ADDR(vcid);
2608  pcid_addr = vcid_addr;
2609  }
2610 
2611  for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2612  vcid_addr += (i << PHY_CTX_SHIFT);
2613  pcid_addr += (i << PHY_CTX_SHIFT);
2614 
2615  REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2616  REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2617 
2618  /* Zero out the context. */
2619  for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2620  bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2621  }
2622  }
2623 }
2624 
2625 static int
2626 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2627 {
2628  u16 *good_mbuf;
2629  u32 good_mbuf_cnt;
2630  u32 val;
2631 
2632  good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2633  if (good_mbuf == NULL)
2634  return -ENOMEM;
2635 
2638 
2639  good_mbuf_cnt = 0;
2640 
2641  /* Allocate a bunch of mbufs and save the good ones in an array. */
2642  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2643  while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2644  bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2646 
2647  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2648 
2650 
2651  /* The addresses with Bit 9 set are bad memory blocks. */
2652  if (!(val & (1 << 9))) {
2653  good_mbuf[good_mbuf_cnt] = (u16) val;
2654  good_mbuf_cnt++;
2655  }
2656 
2657  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658  }
2659 
2660  /* Free the good ones back to the mbuf pool thus discarding
2661  * all the bad ones. */
2662  while (good_mbuf_cnt) {
2663  good_mbuf_cnt--;
2664 
2665  val = good_mbuf[good_mbuf_cnt];
2666  val = (val << 9) | val | 1;
2667 
2668  bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2669  }
2670  kfree(good_mbuf);
2671  return 0;
2672 }
2673 
2674 static void
2675 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2676 {
2677  u32 val;
2678 
2679  val = (mac_addr[0] << 8) | mac_addr[1];
2680 
2681  REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2682 
2683  val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2684  (mac_addr[4] << 8) | mac_addr[5];
2685 
2686  REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687 }
2688 
2689 static inline int
2690 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691 {
2693  struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2694  struct rx_bd *rxbd =
2695  &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2696  struct page *page = alloc_page(gfp);
2697 
2698  if (!page)
2699  return -ENOMEM;
2700  mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2702  if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2703  __free_page(page);
2704  return -EIO;
2705  }
2706 
2707  rx_pg->page = page;
2708  dma_unmap_addr_set(rx_pg, mapping, mapping);
2709  rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2710  rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2711  return 0;
2712 }
2713 
2714 static void
2715 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2716 {
2717  struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2718  struct page *page = rx_pg->page;
2719 
2720  if (!page)
2721  return;
2722 
2723  dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2725 
2726  __free_page(page);
2727  rx_pg->page = NULL;
2728 }
2729 
2730 static inline int
2731 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2732 {
2733  u8 *data;
2734  struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2736  struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2737 
2738  data = kmalloc(bp->rx_buf_size, gfp);
2739  if (!data)
2740  return -ENOMEM;
2741 
2742  mapping = dma_map_single(&bp->pdev->dev,
2743  get_l2_fhdr(data),
2744  bp->rx_buf_use_size,
2746  if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2747  kfree(data);
2748  return -EIO;
2749  }
2750 
2751  rx_buf->data = data;
2752  dma_unmap_addr_set(rx_buf, mapping, mapping);
2753 
2754  rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2755  rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2756 
2757  rxr->rx_prod_bseq += bp->rx_buf_use_size;
2758 
2759  return 0;
2760 }
2761 
2762 static int
2763 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2764 {
2765  struct status_block *sblk = bnapi->status_blk.msi;
2766  u32 new_link_state, old_link_state;
2767  int is_set = 1;
2768 
2769  new_link_state = sblk->status_attn_bits & event;
2770  old_link_state = sblk->status_attn_bits_ack & event;
2771  if (new_link_state != old_link_state) {
2772  if (new_link_state)
2774  else
2776  } else
2777  is_set = 0;
2778 
2779  return is_set;
2780 }
2781 
2782 static void
2783 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2784 {
2785  spin_lock(&bp->phy_lock);
2786 
2787  if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2788  bnx2_set_link(bp);
2789  if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2790  bnx2_set_remote_link(bp);
2791 
2792  spin_unlock(&bp->phy_lock);
2793 
2794 }
2795 
2796 static inline u16
2797 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2798 {
2799  u16 cons;
2800 
2801  /* Tell compiler that status block fields can change. */
2802  barrier();
2803  cons = *bnapi->hw_tx_cons_ptr;
2804  barrier();
2805  if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2806  cons++;
2807  return cons;
2808 }
2809 
2810 static int
2811 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2812 {
2813  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2814  u16 hw_cons, sw_cons, sw_ring_cons;
2815  int tx_pkt = 0, index;
2816  unsigned int tx_bytes = 0;
2817  struct netdev_queue *txq;
2818 
2819  index = (bnapi - bp->bnx2_napi);
2820  txq = netdev_get_tx_queue(bp->dev, index);
2821 
2822  hw_cons = bnx2_get_hw_tx_cons(bnapi);
2823  sw_cons = txr->tx_cons;
2824 
2825  while (sw_cons != hw_cons) {
2826  struct sw_tx_bd *tx_buf;
2827  struct sk_buff *skb;
2828  int i, last;
2829 
2830  sw_ring_cons = TX_RING_IDX(sw_cons);
2831 
2832  tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833  skb = tx_buf->skb;
2834 
2835  /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2836  prefetch(&skb->end);
2837 
2838  /* partial BD completions possible with TSO packets */
2839  if (tx_buf->is_gso) {
2840  u16 last_idx, last_ring_idx;
2841 
2842  last_idx = sw_cons + tx_buf->nr_frags + 1;
2843  last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2844  if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2845  last_idx++;
2846  }
2847  if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2848  break;
2849  }
2850  }
2851 
2852  dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2853  skb_headlen(skb), PCI_DMA_TODEVICE);
2854 
2855  tx_buf->skb = NULL;
2856  last = tx_buf->nr_frags;
2857 
2858  for (i = 0; i < last; i++) {
2859  sw_cons = NEXT_TX_BD(sw_cons);
2860 
2861  dma_unmap_page(&bp->pdev->dev,
2863  &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2864  mapping),
2865  skb_frag_size(&skb_shinfo(skb)->frags[i]),
2867  }
2868 
2869  sw_cons = NEXT_TX_BD(sw_cons);
2870 
2871  tx_bytes += skb->len;
2872  dev_kfree_skb(skb);
2873  tx_pkt++;
2874  if (tx_pkt == budget)
2875  break;
2876 
2877  if (hw_cons == sw_cons)
2878  hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879  }
2880 
2881  netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882  txr->hw_tx_cons = hw_cons;
2883  txr->tx_cons = sw_cons;
2884 
2885  /* Need to make the tx_cons update visible to bnx2_start_xmit()
2886  * before checking for netif_tx_queue_stopped(). Without the
2887  * memory barrier, there is a small possibility that bnx2_start_xmit()
2888  * will miss it and cause the queue to be stopped forever.
2889  */
2890  smp_mb();
2891 
2892  if (unlikely(netif_tx_queue_stopped(txq)) &&
2893  (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2894  __netif_tx_lock(txq, smp_processor_id());
2895  if ((netif_tx_queue_stopped(txq)) &&
2896  (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2897  netif_tx_wake_queue(txq);
2898  __netif_tx_unlock(txq);
2899  }
2900 
2901  return tx_pkt;
2902 }
2903 
2904 static void
2905 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906  struct sk_buff *skb, int count)
2907 {
2908  struct sw_pg *cons_rx_pg, *prod_rx_pg;
2909  struct rx_bd *cons_bd, *prod_bd;
2910  int i;
2911  u16 hw_prod, prod;
2912  u16 cons = rxr->rx_pg_cons;
2913 
2914  cons_rx_pg = &rxr->rx_pg_ring[cons];
2915 
2916  /* The caller was unable to allocate a new page to replace the
2917  * last one in the frags array, so we need to recycle that page
2918  * and then free the skb.
2919  */
2920  if (skb) {
2921  struct page *page;
2922  struct skb_shared_info *shinfo;
2923 
2924  shinfo = skb_shinfo(skb);
2925  shinfo->nr_frags--;
2926  page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2927  __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928 
2929  cons_rx_pg->page = page;
2930  dev_kfree_skb(skb);
2931  }
2932 
2933  hw_prod = rxr->rx_pg_prod;
2934 
2935  for (i = 0; i < count; i++) {
2936  prod = RX_PG_RING_IDX(hw_prod);
2937 
2938  prod_rx_pg = &rxr->rx_pg_ring[prod];
2939  cons_rx_pg = &rxr->rx_pg_ring[cons];
2940  cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2941  prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2942 
2943  if (prod != cons) {
2944  prod_rx_pg->page = cons_rx_pg->page;
2945  cons_rx_pg->page = NULL;
2946  dma_unmap_addr_set(prod_rx_pg, mapping,
2947  dma_unmap_addr(cons_rx_pg, mapping));
2948 
2949  prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2950  prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 
2952  }
2953  cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2954  hw_prod = NEXT_RX_BD(hw_prod);
2955  }
2956  rxr->rx_pg_prod = hw_prod;
2957  rxr->rx_pg_cons = cons;
2958 }
2959 
2960 static inline void
2961 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2962  u8 *data, u16 cons, u16 prod)
2963 {
2964  struct sw_bd *cons_rx_buf, *prod_rx_buf;
2965  struct rx_bd *cons_bd, *prod_bd;
2966 
2967  cons_rx_buf = &rxr->rx_buf_ring[cons];
2968  prod_rx_buf = &rxr->rx_buf_ring[prod];
2969 
2970  dma_sync_single_for_device(&bp->pdev->dev,
2971  dma_unmap_addr(cons_rx_buf, mapping),
2973 
2974  rxr->rx_prod_bseq += bp->rx_buf_use_size;
2975 
2976  prod_rx_buf->data = data;
2977 
2978  if (cons == prod)
2979  return;
2980 
2981  dma_unmap_addr_set(prod_rx_buf, mapping,
2982  dma_unmap_addr(cons_rx_buf, mapping));
2983 
2984  cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2985  prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2986  prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2987  prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2988 }
2989 
2990 static struct sk_buff *
2991 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2992  unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2993  u32 ring_idx)
2994 {
2995  int err;
2996  u16 prod = ring_idx & 0xffff;
2997  struct sk_buff *skb;
2998 
2999  err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3000  if (unlikely(err)) {
3001  bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3002 error:
3003  if (hdr_len) {
3004  unsigned int raw_len = len + 4;
3005  int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3006 
3007  bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3008  }
3009  return NULL;
3010  }
3011 
3012  dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3014  skb = build_skb(data, 0);
3015  if (!skb) {
3016  kfree(data);
3017  goto error;
3018  }
3019  skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3020  if (hdr_len == 0) {
3021  skb_put(skb, len);
3022  return skb;
3023  } else {
3024  unsigned int i, frag_len, frag_size, pages;
3025  struct sw_pg *rx_pg;
3026  u16 pg_cons = rxr->rx_pg_cons;
3027  u16 pg_prod = rxr->rx_pg_prod;
3028 
3029  frag_size = len + 4 - hdr_len;
3030  pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3031  skb_put(skb, hdr_len);
3032 
3033  for (i = 0; i < pages; i++) {
3034  dma_addr_t mapping_old;
3035 
3036  frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3037  if (unlikely(frag_len <= 4)) {
3038  unsigned int tail = 4 - frag_len;
3039 
3040  rxr->rx_pg_cons = pg_cons;
3041  rxr->rx_pg_prod = pg_prod;
3042  bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3043  pages - i);
3044  skb->len -= tail;
3045  if (i == 0) {
3046  skb->tail -= tail;
3047  } else {
3048  skb_frag_t *frag =
3049  &skb_shinfo(skb)->frags[i - 1];
3050  skb_frag_size_sub(frag, tail);
3051  skb->data_len -= tail;
3052  }
3053  return skb;
3054  }
3055  rx_pg = &rxr->rx_pg_ring[pg_cons];
3056 
3057  /* Don't unmap yet. If we're unable to allocate a new
3058  * page, we need to recycle the page and the DMA addr.
3059  */
3060  mapping_old = dma_unmap_addr(rx_pg, mapping);
3061  if (i == pages - 1)
3062  frag_len -= 4;
3063 
3064  skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3065  rx_pg->page = NULL;
3066 
3067  err = bnx2_alloc_rx_page(bp, rxr,
3068  RX_PG_RING_IDX(pg_prod),
3069  GFP_ATOMIC);
3070  if (unlikely(err)) {
3071  rxr->rx_pg_cons = pg_cons;
3072  rxr->rx_pg_prod = pg_prod;
3073  bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3074  pages - i);
3075  return NULL;
3076  }
3077 
3078  dma_unmap_page(&bp->pdev->dev, mapping_old,
3080 
3081  frag_size -= frag_len;
3082  skb->data_len += frag_len;
3083  skb->truesize += PAGE_SIZE;
3084  skb->len += frag_len;
3085 
3086  pg_prod = NEXT_RX_BD(pg_prod);
3087  pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3088  }
3089  rxr->rx_pg_prod = pg_prod;
3090  rxr->rx_pg_cons = pg_cons;
3091  }
3092  return skb;
3093 }
3094 
3095 static inline u16
3096 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3097 {
3098  u16 cons;
3099 
3100  /* Tell compiler that status block fields can change. */
3101  barrier();
3102  cons = *bnapi->hw_rx_cons_ptr;
3103  barrier();
3104  if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3105  cons++;
3106  return cons;
3107 }
3108 
3109 static int
3110 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3111 {
3112  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3113  u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3114  struct l2_fhdr *rx_hdr;
3115  int rx_pkt = 0, pg_ring_used = 0;
3116 
3117  hw_cons = bnx2_get_hw_rx_cons(bnapi);
3118  sw_cons = rxr->rx_cons;
3119  sw_prod = rxr->rx_prod;
3120 
3121  /* Memory barrier necessary as speculative reads of the rx
3122  * buffer can be ahead of the index in the status block
3123  */
3124  rmb();
3125  while (sw_cons != hw_cons) {
3126  unsigned int len, hdr_len;
3127  u32 status;
3128  struct sw_bd *rx_buf, *next_rx_buf;
3129  struct sk_buff *skb;
3131  u8 *data;
3132 
3133  sw_ring_cons = RX_RING_IDX(sw_cons);
3134  sw_ring_prod = RX_RING_IDX(sw_prod);
3135 
3136  rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3137  data = rx_buf->data;
3138  rx_buf->data = NULL;
3139 
3140  rx_hdr = get_l2_fhdr(data);
3141  prefetch(rx_hdr);
3142 
3143  dma_addr = dma_unmap_addr(rx_buf, mapping);
3144 
3145  dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3148 
3149  next_rx_buf =
3150  &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3151  prefetch(get_l2_fhdr(next_rx_buf->data));
3152 
3153  len = rx_hdr->l2_fhdr_pkt_len;
3154  status = rx_hdr->l2_fhdr_status;
3155 
3156  hdr_len = 0;
3157  if (status & L2_FHDR_STATUS_SPLIT) {
3158  hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3159  pg_ring_used = 1;
3160  } else if (len > bp->rx_jumbo_thresh) {
3161  hdr_len = bp->rx_jumbo_thresh;
3162  pg_ring_used = 1;
3163  }
3164 
3165  if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3170 
3171  bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3172  sw_ring_prod);
3173  if (pg_ring_used) {
3174  int pages;
3175 
3176  pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3177 
3178  bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3179  }
3180  goto next_rx;
3181  }
3182 
3183  len -= 4;
3184 
3185  if (len <= bp->rx_copy_thresh) {
3186  skb = netdev_alloc_skb(bp->dev, len + 6);
3187  if (skb == NULL) {
3188  bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3189  sw_ring_prod);
3190  goto next_rx;
3191  }
3192 
3193  /* aligned copy */
3194  memcpy(skb->data,
3195  (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3196  len + 6);
3197  skb_reserve(skb, 6);
3198  skb_put(skb, len);
3199 
3200  bnx2_reuse_rx_data(bp, rxr, data,
3201  sw_ring_cons, sw_ring_prod);
3202 
3203  } else {
3204  skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3205  (sw_ring_cons << 16) | sw_ring_prod);
3206  if (!skb)
3207  goto next_rx;
3208  }
3209  if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3211  __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3212 
3213  skb->protocol = eth_type_trans(skb, bp->dev);
3214 
3215  if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3216  (ntohs(skb->protocol) != 0x8100)) {
3217 
3218  dev_kfree_skb(skb);
3219  goto next_rx;
3220 
3221  }
3222 
3223  skb_checksum_none_assert(skb);
3224  if ((bp->dev->features & NETIF_F_RXCSUM) &&
3225  (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3227 
3228  if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3229  L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3231  }
3232  if ((bp->dev->features & NETIF_F_RXHASH) &&
3233  ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3234  L2_FHDR_STATUS_USE_RXHASH))
3235  skb->rxhash = rx_hdr->l2_fhdr_hash;
3236 
3237  skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3238  napi_gro_receive(&bnapi->napi, skb);
3239  rx_pkt++;
3240 
3241 next_rx:
3242  sw_cons = NEXT_RX_BD(sw_cons);
3243  sw_prod = NEXT_RX_BD(sw_prod);
3244 
3245  if ((rx_pkt == budget))
3246  break;
3247 
3248  /* Refresh hw_cons to see if there is new work */
3249  if (sw_cons == hw_cons) {
3250  hw_cons = bnx2_get_hw_rx_cons(bnapi);
3251  rmb();
3252  }
3253  }
3254  rxr->rx_cons = sw_cons;
3255  rxr->rx_prod = sw_prod;
3256 
3257  if (pg_ring_used)
3258  REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3259 
3260  REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3261 
3262  REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3263 
3264  mmiowb();
3265 
3266  return rx_pkt;
3267 
3268 }
3269 
3270 /* MSI ISR - The only difference between this and the INTx ISR
3271  * is that the MSI interrupt is always serviced.
3272  */
3273 static irqreturn_t
3274 bnx2_msi(int irq, void *dev_instance)
3275 {
3276  struct bnx2_napi *bnapi = dev_instance;
3277  struct bnx2 *bp = bnapi->bp;
3278 
3279  prefetch(bnapi->status_blk.msi);
3283 
3284  /* Return here if interrupt is disabled. */
3285  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3286  return IRQ_HANDLED;
3287 
3288  napi_schedule(&bnapi->napi);
3289 
3290  return IRQ_HANDLED;
3291 }
3292 
3293 static irqreturn_t
3294 bnx2_msi_1shot(int irq, void *dev_instance)
3295 {
3296  struct bnx2_napi *bnapi = dev_instance;
3297  struct bnx2 *bp = bnapi->bp;
3298 
3299  prefetch(bnapi->status_blk.msi);
3300 
3301  /* Return here if interrupt is disabled. */
3302  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3303  return IRQ_HANDLED;
3304 
3305  napi_schedule(&bnapi->napi);
3306 
3307  return IRQ_HANDLED;
3308 }
3309 
3310 static irqreturn_t
3311 bnx2_interrupt(int irq, void *dev_instance)
3312 {
3313  struct bnx2_napi *bnapi = dev_instance;
3314  struct bnx2 *bp = bnapi->bp;
3315  struct status_block *sblk = bnapi->status_blk.msi;
3316 
3317  /* When using INTx, it is possible for the interrupt to arrive
3318  * at the CPU before the status block posted prior to the
3319  * interrupt. Reading a register will flush the status block.
3320  * When using MSI, the MSI message will always complete after
3321  * the status block write.
3322  */
3323  if ((sblk->status_idx == bnapi->last_status_idx) &&
3326  return IRQ_NONE;
3327 
3331 
3332  /* Read back to deassert IRQ immediately to avoid too many
3333  * spurious interrupts.
3334  */
3336 
3337  /* Return here if interrupt is shared and is disabled. */
3338  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339  return IRQ_HANDLED;
3340 
3341  if (napi_schedule_prep(&bnapi->napi)) {
3342  bnapi->last_status_idx = sblk->status_idx;
3343  __napi_schedule(&bnapi->napi);
3344  }
3345 
3346  return IRQ_HANDLED;
3347 }
3348 
3349 static inline int
3350 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3351 {
3352  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3353  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3354 
3355  if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3356  (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3357  return 1;
3358  return 0;
3359 }
3360 
3361 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3362  STATUS_ATTN_BITS_TIMER_ABORT)
3363 
3364 static inline int
3365 bnx2_has_work(struct bnx2_napi *bnapi)
3366 {
3367  struct status_block *sblk = bnapi->status_blk.msi;
3368 
3369  if (bnx2_has_fast_work(bnapi))
3370  return 1;
3371 
3372 #ifdef BCM_CNIC
3373  if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3374  return 1;
3375 #endif
3376 
3377  if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3379  return 1;
3380 
3381  return 0;
3382 }
3383 
3384 static void
3385 bnx2_chk_missed_msi(struct bnx2 *bp)
3386 {
3387  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3388  u32 msi_ctrl;
3389 
3390  if (bnx2_has_work(bnapi)) {
3391  msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3392  if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3393  return;
3394 
3395  if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3396  REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3397  ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3398  REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3399  bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3400  }
3401  }
3402 
3403  bp->idle_chk_status_idx = bnapi->last_status_idx;
3404 }
3405 
3406 #ifdef BCM_CNIC
3407 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3408 {
3409  struct cnic_ops *c_ops;
3410 
3411  if (!bnapi->cnic_present)
3412  return;
3413 
3414  rcu_read_lock();
3415  c_ops = rcu_dereference(bp->cnic_ops);
3416  if (c_ops)
3417  bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3418  bnapi->status_blk.msi);
3419  rcu_read_unlock();
3420 }
3421 #endif
3422 
3423 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3424 {
3425  struct status_block *sblk = bnapi->status_blk.msi;
3428 
3429  if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3430  (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3431 
3432  bnx2_phy_int(bp, bnapi);
3433 
3434  /* This is needed to take care of transient status
3435  * during link changes.
3436  */
3437  REG_WR(bp, BNX2_HC_COMMAND,
3439  REG_RD(bp, BNX2_HC_COMMAND);
3440  }
3441 }
3442 
3443 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3444  int work_done, int budget)
3445 {
3446  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3447  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3448 
3449  if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3450  bnx2_tx_int(bp, bnapi, 0);
3451 
3452  if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3453  work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3454 
3455  return work_done;
3456 }
3457 
3458 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3459 {
3460  struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3461  struct bnx2 *bp = bnapi->bp;
3462  int work_done = 0;
3463  struct status_block_msix *sblk = bnapi->status_blk.msix;
3464 
3465  while (1) {
3466  work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3467  if (unlikely(work_done >= budget))
3468  break;
3469 
3470  bnapi->last_status_idx = sblk->status_idx;
3471  /* status idx must be read before checking for more work. */
3472  rmb();
3473  if (likely(!bnx2_has_fast_work(bnapi))) {
3474 
3475  napi_complete(napi);
3476  REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3478  bnapi->last_status_idx);
3479  break;
3480  }
3481  }
3482  return work_done;
3483 }
3484 
3485 static int bnx2_poll(struct napi_struct *napi, int budget)
3486 {
3487  struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3488  struct bnx2 *bp = bnapi->bp;
3489  int work_done = 0;
3490  struct status_block *sblk = bnapi->status_blk.msi;
3491 
3492  while (1) {
3493  bnx2_poll_link(bp, bnapi);
3494 
3495  work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3496 
3497 #ifdef BCM_CNIC
3498  bnx2_poll_cnic(bp, bnapi);
3499 #endif
3500 
3501  /* bnapi->last_status_idx is used below to tell the hw how
3502  * much work has been processed, so we must read it before
3503  * checking for more work.
3504  */
3505  bnapi->last_status_idx = sblk->status_idx;
3506 
3507  if (unlikely(work_done >= budget))
3508  break;
3509 
3510  rmb();
3511  if (likely(!bnx2_has_work(bnapi))) {
3512  napi_complete(napi);
3516  bnapi->last_status_idx);
3517  break;
3518  }
3522  bnapi->last_status_idx);
3523 
3526  bnapi->last_status_idx);
3527  break;
3528  }
3529  }
3530 
3531  return work_done;
3532 }
3533 
3534 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3535  * from set_multicast.
3536  */
3537 static void
3538 bnx2_set_rx_mode(struct net_device *dev)
3539 {
3540  struct bnx2 *bp = netdev_priv(dev);
3541  u32 rx_mode, sort_mode;
3542  struct netdev_hw_addr *ha;
3543  int i;
3544 
3545  if (!netif_running(dev))
3546  return;
3547 
3548  spin_lock_bh(&bp->phy_lock);
3549 
3550  rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3552  sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3553  if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556  if (dev->flags & IFF_PROMISC) {
3557  /* Promiscuous mode. */
3558  rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559  sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3561  }
3562  else if (dev->flags & IFF_ALLMULTI) {
3563  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565  0xffffffff);
3566  }
3567  sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3568  }
3569  else {
3570  /* Accept one or more multicast(s). */
3571  u32 mc_filter[NUM_MC_HASH_REGISTERS];
3572  u32 regidx;
3573  u32 bit;
3574  u32 crc;
3575 
3576  memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3577 
3578  netdev_for_each_mc_addr(ha, dev) {
3579  crc = ether_crc_le(ETH_ALEN, ha->addr);
3580  bit = crc & 0xff;
3581  regidx = (bit & 0xe0) >> 5;
3582  bit &= 0x1f;
3583  mc_filter[regidx] |= (1 << bit);
3584  }
3585 
3586  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3588  mc_filter[i]);
3589  }
3590 
3591  sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3592  }
3593 
3595  rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3596  sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3598  } else if (!(dev->flags & IFF_PROMISC)) {
3599  /* Add all entries into to the match filter list */
3600  i = 0;
3601  netdev_for_each_uc_addr(ha, dev) {
3602  bnx2_set_mac_addr(bp, ha->addr,
3604  sort_mode |= (1 <<
3606  i++;
3607  }
3608 
3609  }
3610 
3611  if (rx_mode != bp->rx_mode) {
3612  bp->rx_mode = rx_mode;
3613  REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3614  }
3615 
3616  REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617  REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3619 
3620  spin_unlock_bh(&bp->phy_lock);
3621 }
3622 
3623 static int
3624 check_fw_section(const struct firmware *fw,
3625  const struct bnx2_fw_file_section *section,
3626  u32 alignment, bool non_empty)
3627 {
3628  u32 offset = be32_to_cpu(section->offset);
3629  u32 len = be32_to_cpu(section->len);
3630 
3631  if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3632  return -EINVAL;
3633  if ((non_empty && len == 0) || len > fw->size - offset ||
3634  len & (alignment - 1))
3635  return -EINVAL;
3636  return 0;
3637 }
3638 
3639 static int
3640 check_mips_fw_entry(const struct firmware *fw,
3641  const struct bnx2_mips_fw_file_entry *entry)
3642 {
3643  if (check_fw_section(fw, &entry->text, 4, true) ||
3644  check_fw_section(fw, &entry->data, 4, false) ||
3645  check_fw_section(fw, &entry->rodata, 4, false))
3646  return -EINVAL;
3647  return 0;
3648 }
3649 
3650 static void bnx2_release_firmware(struct bnx2 *bp)
3651 {
3652  if (bp->rv2p_firmware) {
3655  bp->rv2p_firmware = NULL;
3656  }
3657 }
3658 
3659 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3660 {
3661  const char *mips_fw_file, *rv2p_fw_file;
3662  const struct bnx2_mips_fw_file *mips_fw;
3663  const struct bnx2_rv2p_fw_file *rv2p_fw;
3664  int rc;
3665 
3666  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3667  mips_fw_file = FW_MIPS_FILE_09;
3668  if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3669  (CHIP_ID(bp) == CHIP_ID_5709_A1))
3670  rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3671  else
3672  rv2p_fw_file = FW_RV2P_FILE_09;
3673  } else {
3674  mips_fw_file = FW_MIPS_FILE_06;
3675  rv2p_fw_file = FW_RV2P_FILE_06;
3676  }
3677 
3678  rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3679  if (rc) {
3680  pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3681  goto out;
3682  }
3683 
3684  rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3685  if (rc) {
3686  pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3687  goto err_release_mips_firmware;
3688  }
3689  mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3690  rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3691  if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3692  check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3693  check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3694  check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3695  check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3696  check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3697  pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3698  rc = -EINVAL;
3699  goto err_release_firmware;
3700  }
3701  if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3702  check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3703  check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3704  pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3705  rc = -EINVAL;
3706  goto err_release_firmware;
3707  }
3708 out:
3709  return rc;
3710 
3711 err_release_firmware:
3713  bp->rv2p_firmware = NULL;
3714 err_release_mips_firmware:
3716  goto out;
3717 }
3718 
3719 static int bnx2_request_firmware(struct bnx2 *bp)
3720 {
3721  return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3722 }
3723 
3724 static u32
3725 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3726 {
3727  switch (idx) {
3729  rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3730  rv2p_code |= RV2P_BD_PAGE_SIZE;
3731  break;
3732  }
3733  return rv2p_code;
3734 }
3735 
3736 static int
3737 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3738  const struct bnx2_rv2p_fw_file_entry *fw_entry)
3739 {
3740  u32 rv2p_code_len, file_offset;
3741  __be32 *rv2p_code;
3742  int i;
3743  u32 val, cmd, addr;
3744 
3745  rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3746  file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3747 
3748  rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749 
3750  if (rv2p_proc == RV2P_PROC1) {
3752  addr = BNX2_RV2P_PROC1_ADDR_CMD;
3753  } else {
3755  addr = BNX2_RV2P_PROC2_ADDR_CMD;
3756  }
3757 
3758  for (i = 0; i < rv2p_code_len; i += 8) {
3759  REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3760  rv2p_code++;
3761  REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3762  rv2p_code++;
3763 
3764  val = (i / 8) | cmd;
3765  REG_WR(bp, addr, val);
3766  }
3767 
3768  rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3769  for (i = 0; i < 8; i++) {
3770  u32 loc, code;
3771 
3772  loc = be32_to_cpu(fw_entry->fixup[i]);
3773  if (loc && ((loc * 4) < rv2p_code_len)) {
3774  code = be32_to_cpu(*(rv2p_code + loc - 1));
3775  REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3776  code = be32_to_cpu(*(rv2p_code + loc));
3777  code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3778  REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3779 
3780  val = (loc / 2) | cmd;
3781  REG_WR(bp, addr, val);
3782  }
3783  }
3784 
3785  /* Reset the processor, un-stall is done later. */
3786  if (rv2p_proc == RV2P_PROC1) {
3788  }
3789  else {
3791  }
3792 
3793  return 0;
3794 }
3795 
3796 static int
3797 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3798  const struct bnx2_mips_fw_file_entry *fw_entry)
3799 {
3800  u32 addr, len, file_offset;
3801  __be32 *data;
3802  u32 offset;
3803  u32 val;
3804 
3805  /* Halt the CPU. */
3806  val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3807  val |= cpu_reg->mode_value_halt;
3808  bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3809  bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3810 
3811  /* Load the Text area. */
3812  addr = be32_to_cpu(fw_entry->text.addr);
3813  len = be32_to_cpu(fw_entry->text.len);
3814  file_offset = be32_to_cpu(fw_entry->text.offset);
3815  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3816 
3817  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3818  if (len) {
3819  int j;
3820 
3821  for (j = 0; j < (len / 4); j++, offset += 4)
3822  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3823  }
3824 
3825  /* Load the Data area. */
3826  addr = be32_to_cpu(fw_entry->data.addr);
3827  len = be32_to_cpu(fw_entry->data.len);
3828  file_offset = be32_to_cpu(fw_entry->data.offset);
3829  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3830 
3831  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3832  if (len) {
3833  int j;
3834 
3835  for (j = 0; j < (len / 4); j++, offset += 4)
3836  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3837  }
3838 
3839  /* Load the Read-Only area. */
3840  addr = be32_to_cpu(fw_entry->rodata.addr);
3841  len = be32_to_cpu(fw_entry->rodata.len);
3842  file_offset = be32_to_cpu(fw_entry->rodata.offset);
3843  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3844 
3845  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3846  if (len) {
3847  int j;
3848 
3849  for (j = 0; j < (len / 4); j++, offset += 4)
3850  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3851  }
3852 
3853  /* Clear the pre-fetch instruction. */
3854  bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3855 
3856  val = be32_to_cpu(fw_entry->start_addr);
3857  bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3858 
3859  /* Start the CPU. */
3860  val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3861  val &= ~cpu_reg->mode_value_halt;
3862  bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3863  bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3864 
3865  return 0;
3866 }
3867 
3868 static int
3869 bnx2_init_cpus(struct bnx2 *bp)
3870 {
3871  const struct bnx2_mips_fw_file *mips_fw =
3872  (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3873  const struct bnx2_rv2p_fw_file *rv2p_fw =
3874  (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3875  int rc;
3876 
3877  /* Initialize the RV2P processor. */
3878  load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3879  load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3880 
3881  /* Initialize the RX Processor. */
3882  rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3883  if (rc)
3884  goto init_cpu_err;
3885 
3886  /* Initialize the TX Processor. */
3887  rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3888  if (rc)
3889  goto init_cpu_err;
3890 
3891  /* Initialize the TX Patch-up Processor. */
3892  rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3893  if (rc)
3894  goto init_cpu_err;
3895 
3896  /* Initialize the Completion Processor. */
3897  rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3898  if (rc)
3899  goto init_cpu_err;
3900 
3901  /* Initialize the Command Processor. */
3902  rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3903 
3904 init_cpu_err:
3905  return rc;
3906 }
3907 
3908 static int
3909 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3910 {
3911  u16 pmcsr;
3912 
3913  pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3914 
3915  switch (state) {
3916  case PCI_D0: {
3917  u32 val;
3918 
3919  pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3920  (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3922 
3923  if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3924  /* delay required during transition out of D3hot */
3925  msleep(20);
3926 
3927  val = REG_RD(bp, BNX2_EMAC_MODE);
3929  val &= ~BNX2_EMAC_MODE_MPKT;
3930  REG_WR(bp, BNX2_EMAC_MODE, val);
3931 
3932  val = REG_RD(bp, BNX2_RPM_CONFIG);
3933  val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3934  REG_WR(bp, BNX2_RPM_CONFIG, val);
3935  break;
3936  }
3937  case PCI_D3hot: {
3938  int i;
3939  u32 val, wol_msg;
3940 
3941  if (bp->wol) {
3942  u32 advertising;
3943  u8 autoneg;
3944 
3945  autoneg = bp->autoneg;
3946  advertising = bp->advertising;
3947 
3948  if (bp->phy_port == PORT_TP) {
3949  bp->autoneg = AUTONEG_SPEED;
3955  }
3956 
3957  spin_lock_bh(&bp->phy_lock);
3958  bnx2_setup_phy(bp, bp->phy_port);
3959  spin_unlock_bh(&bp->phy_lock);
3960 
3961  bp->autoneg = autoneg;
3962  bp->advertising = advertising;
3963 
3964  bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3965 
3966  val = REG_RD(bp, BNX2_EMAC_MODE);
3967 
3968  /* Enable port mode. */
3969  val &= ~BNX2_EMAC_MODE_PORT;
3970  val |= BNX2_EMAC_MODE_MPKT_RCVD |
3973  if (bp->phy_port == PORT_TP)
3974  val |= BNX2_EMAC_MODE_PORT_MII;
3975  else {
3976  val |= BNX2_EMAC_MODE_PORT_GMII;
3977  if (bp->line_speed == SPEED_2500)
3978  val |= BNX2_EMAC_MODE_25G_MODE;
3979  }
3980 
3981  REG_WR(bp, BNX2_EMAC_MODE, val);
3982 
3983  /* receive all multicast */
3984  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3985  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3986  0xffffffff);
3987  }
3990 
3991  val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3993  REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3994  REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3995  REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3997 
3998  /* Need to enable EMAC and RPM for WOL. */
4003 
4004  val = REG_RD(bp, BNX2_RPM_CONFIG);
4005  val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4006  REG_WR(bp, BNX2_RPM_CONFIG, val);
4007 
4009  }
4010  else {
4012  }
4013 
4014  if (!(bp->flags & BNX2_FLAG_NO_WOL))
4015  bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4016  1, 0);
4017 
4018  pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4019  if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4020  (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4021 
4022  if (bp->wol)
4023  pmcsr |= 3;
4024  }
4025  else {
4026  pmcsr |= 3;
4027  }
4028  if (bp->wol) {
4029  pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4030  }
4031  pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4032  pmcsr);
4033 
4034  /* No more memory access after this point until
4035  * device is brought back to D0.
4036  */
4037  udelay(50);
4038  break;
4039  }
4040  default:
4041  return -EINVAL;
4042  }
4043  return 0;
4044 }
4045 
4046 static int
4047 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4048 {
4049  u32 val;
4050  int j;
4051 
4052  /* Request access to the flash interface. */
4054  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4055  val = REG_RD(bp, BNX2_NVM_SW_ARB);
4056  if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4057  break;
4058 
4059  udelay(5);
4060  }
4061 
4062  if (j >= NVRAM_TIMEOUT_COUNT)
4063  return -EBUSY;
4064 
4065  return 0;
4066 }
4067 
4068 static int
4069 bnx2_release_nvram_lock(struct bnx2 *bp)
4070 {
4071  int j;
4072  u32 val;
4073 
4074  /* Relinquish nvram interface. */
4076 
4077  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4078  val = REG_RD(bp, BNX2_NVM_SW_ARB);
4079  if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4080  break;
4081 
4082  udelay(5);
4083  }
4084 
4085  if (j >= NVRAM_TIMEOUT_COUNT)
4086  return -EBUSY;
4087 
4088  return 0;
4089 }
4090 
4091 
4092 static int
4093 bnx2_enable_nvram_write(struct bnx2 *bp)
4094 {
4095  u32 val;
4096 
4097  val = REG_RD(bp, BNX2_MISC_CFG);
4099 
4100  if (bp->flash_info->flags & BNX2_NV_WREN) {
4101  int j;
4102 
4106 
4107  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108  udelay(5);
4109 
4110  val = REG_RD(bp, BNX2_NVM_COMMAND);
4111  if (val & BNX2_NVM_COMMAND_DONE)
4112  break;
4113  }
4114 
4115  if (j >= NVRAM_TIMEOUT_COUNT)
4116  return -EBUSY;
4117  }
4118  return 0;
4119 }
4120 
4121 static void
4122 bnx2_disable_nvram_write(struct bnx2 *bp)
4123 {
4124  u32 val;
4125 
4126  val = REG_RD(bp, BNX2_MISC_CFG);
4128 }
4129 
4130 
4131 static void
4132 bnx2_enable_nvram_access(struct bnx2 *bp)
4133 {
4134  u32 val;
4135 
4136  val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4137  /* Enable both bits, even on read. */
4140 }
4141 
4142 static void
4143 bnx2_disable_nvram_access(struct bnx2 *bp)
4144 {
4145  u32 val;
4146 
4147  val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4148  /* Disable both bits, even after read. */
4150  val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4152 }
4153 
4154 static int
4155 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4156 {
4157  u32 cmd;
4158  int j;
4159 
4160  if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4161  /* Buffered flash, no erase needed */
4162  return 0;
4163 
4164  /* Build an erase command */
4167 
4168  /* Need to clear DONE bit separately. */
4170 
4171  /* Address of the NVRAM to read from. */
4173 
4174  /* Issue an erase command. */
4175  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4176 
4177  /* Wait for completion. */
4178  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4179  u32 val;
4180 
4181  udelay(5);
4182 
4183  val = REG_RD(bp, BNX2_NVM_COMMAND);
4184  if (val & BNX2_NVM_COMMAND_DONE)
4185  break;
4186  }
4187 
4188  if (j >= NVRAM_TIMEOUT_COUNT)
4189  return -EBUSY;
4190 
4191  return 0;
4192 }
4193 
4194 static int
4195 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4196 {
4197  u32 cmd;
4198  int j;
4199 
4200  /* Build the command word. */
4201  cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4202 
4203  /* Calculate an offset of a buffered flash, not needed for 5709. */
4204  if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4205  offset = ((offset / bp->flash_info->page_size) <<
4206  bp->flash_info->page_bits) +
4207  (offset % bp->flash_info->page_size);
4208  }
4209 
4210  /* Need to clear DONE bit separately. */
4212 
4213  /* Address of the NVRAM to read from. */
4215 
4216  /* Issue a read command. */
4217  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4218 
4219  /* Wait for completion. */
4220  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4221  u32 val;
4222 
4223  udelay(5);
4224 
4225  val = REG_RD(bp, BNX2_NVM_COMMAND);
4226  if (val & BNX2_NVM_COMMAND_DONE) {
4228  memcpy(ret_val, &v, 4);
4229  break;
4230  }
4231  }
4232  if (j >= NVRAM_TIMEOUT_COUNT)
4233  return -EBUSY;
4234 
4235  return 0;
4236 }
4237 
4238 
4239 static int
4240 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4241 {
4242  u32 cmd;
4243  __be32 val32;
4244  int j;
4245 
4246  /* Build the command word. */
4247  cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4248 
4249  /* Calculate an offset of a buffered flash, not needed for 5709. */
4250  if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4251  offset = ((offset / bp->flash_info->page_size) <<
4252  bp->flash_info->page_bits) +
4253  (offset % bp->flash_info->page_size);
4254  }
4255 
4256  /* Need to clear DONE bit separately. */
4257  REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4258 
4259  memcpy(&val32, val, 4);
4260 
4261  /* Write the data. */
4262  REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4263 
4264  /* Address of the NVRAM to write to. */
4266 
4267  /* Issue the write command. */
4268  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4269 
4270  /* Wait for completion. */
4271  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4272  udelay(5);
4273 
4274  if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4275  break;
4276  }
4277  if (j >= NVRAM_TIMEOUT_COUNT)
4278  return -EBUSY;
4279 
4280  return 0;
4281 }
4282 
4283 static int
4284 bnx2_init_nvram(struct bnx2 *bp)
4285 {
4286  u32 val;
4287  int j, entry_count, rc = 0;
4288  const struct flash_spec *flash;
4289 
4290  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4291  bp->flash_info = &flash_5709;
4292  goto get_flash_size;
4293  }
4294 
4295  /* Determine the selected interface. */
4296  val = REG_RD(bp, BNX2_NVM_CFG1);
4297 
4298  entry_count = ARRAY_SIZE(flash_table);
4299 
4300  if (val & 0x40000000) {
4301 
4302  /* Flash interface has been reconfigured */
4303  for (j = 0, flash = &flash_table[0]; j < entry_count;
4304  j++, flash++) {
4305  if ((val & FLASH_BACKUP_STRAP_MASK) ==
4306  (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4307  bp->flash_info = flash;
4308  break;
4309  }
4310  }
4311  }
4312  else {
4313  u32 mask;
4314  /* Not yet been reconfigured */
4315 
4316  if (val & (1 << 23))
4317  mask = FLASH_BACKUP_STRAP_MASK;
4318  else
4319  mask = FLASH_STRAP_MASK;
4320 
4321  for (j = 0, flash = &flash_table[0]; j < entry_count;
4322  j++, flash++) {
4323 
4324  if ((val & mask) == (flash->strapping & mask)) {
4325  bp->flash_info = flash;
4326 
4327  /* Request access to the flash interface. */
4328  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4329  return rc;
4330 
4331  /* Enable access to flash interface */
4332  bnx2_enable_nvram_access(bp);
4333 
4334  /* Reconfigure the flash interface */
4335  REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4336  REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4337  REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4338  REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4339 
4340  /* Disable access to flash interface */
4341  bnx2_disable_nvram_access(bp);
4342  bnx2_release_nvram_lock(bp);
4343 
4344  break;
4345  }
4346  }
4347  } /* if (val & 0x40000000) */
4348 
4349  if (j == entry_count) {
4350  bp->flash_info = NULL;
4351  pr_alert("Unknown flash/EEPROM type\n");
4352  return -ENODEV;
4353  }
4354 
4355 get_flash_size:
4356  val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4358  if (val)
4359  bp->flash_size = val;
4360  else
4361  bp->flash_size = bp->flash_info->total_size;
4362 
4363  return rc;
4364 }
4365 
4366 static int
4367 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4368  int buf_size)
4369 {
4370  int rc = 0;
4371  u32 cmd_flags, offset32, len32, extra;
4372 
4373  if (buf_size == 0)
4374  return 0;
4375 
4376  /* Request access to the flash interface. */
4377  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378  return rc;
4379 
4380  /* Enable access to flash interface */
4381  bnx2_enable_nvram_access(bp);
4382 
4383  len32 = buf_size;
4384  offset32 = offset;
4385  extra = 0;
4386 
4387  cmd_flags = 0;
4388 
4389  if (offset32 & 3) {
4390  u8 buf[4];
4391  u32 pre_len;
4392 
4393  offset32 &= ~3;
4394  pre_len = 4 - (offset & 3);
4395 
4396  if (pre_len >= len32) {
4397  pre_len = len32;
4398  cmd_flags = BNX2_NVM_COMMAND_FIRST |
4400  }
4401  else {
4402  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4403  }
4404 
4405  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4406 
4407  if (rc)
4408  return rc;
4409 
4410  memcpy(ret_buf, buf + (offset & 3), pre_len);
4411 
4412  offset32 += 4;
4413  ret_buf += pre_len;
4414  len32 -= pre_len;
4415  }
4416  if (len32 & 3) {
4417  extra = 4 - (len32 & 3);
4418  len32 = (len32 + 4) & ~3;
4419  }
4420 
4421  if (len32 == 4) {
4422  u8 buf[4];
4423 
4424  if (cmd_flags)
4425  cmd_flags = BNX2_NVM_COMMAND_LAST;
4426  else
4427  cmd_flags = BNX2_NVM_COMMAND_FIRST |
4429 
4430  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4431 
4432  memcpy(ret_buf, buf, 4 - extra);
4433  }
4434  else if (len32 > 0) {
4435  u8 buf[4];
4436 
4437  /* Read the first word. */
4438  if (cmd_flags)
4439  cmd_flags = 0;
4440  else
4441  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4442 
4443  rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4444 
4445  /* Advance to the next dword. */
4446  offset32 += 4;
4447  ret_buf += 4;
4448  len32 -= 4;
4449 
4450  while (len32 > 4 && rc == 0) {
4451  rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4452 
4453  /* Advance to the next dword. */
4454  offset32 += 4;
4455  ret_buf += 4;
4456  len32 -= 4;
4457  }
4458 
4459  if (rc)
4460  return rc;
4461 
4462  cmd_flags = BNX2_NVM_COMMAND_LAST;
4463  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4464 
4465  memcpy(ret_buf, buf, 4 - extra);
4466  }
4467 
4468  /* Disable access to flash interface */
4469  bnx2_disable_nvram_access(bp);
4470 
4471  bnx2_release_nvram_lock(bp);
4472 
4473  return rc;
4474 }
4475 
4476 static int
4477 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4478  int buf_size)
4479 {
4480  u32 written, offset32, len32;
4481  u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4482  int rc = 0;
4483  int align_start, align_end;
4484 
4485  buf = data_buf;
4486  offset32 = offset;
4487  len32 = buf_size;
4488  align_start = align_end = 0;
4489 
4490  if ((align_start = (offset32 & 3))) {
4491  offset32 &= ~3;
4492  len32 += align_start;
4493  if (len32 < 4)
4494  len32 = 4;
4495  if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4496  return rc;
4497  }
4498 
4499  if (len32 & 3) {
4500  align_end = 4 - (len32 & 3);
4501  len32 += align_end;
4502  if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4503  return rc;
4504  }
4505 
4506  if (align_start || align_end) {
4507  align_buf = kmalloc(len32, GFP_KERNEL);
4508  if (align_buf == NULL)
4509  return -ENOMEM;
4510  if (align_start) {
4511  memcpy(align_buf, start, 4);
4512  }
4513  if (align_end) {
4514  memcpy(align_buf + len32 - 4, end, 4);
4515  }
4516  memcpy(align_buf + align_start, data_buf, buf_size);
4517  buf = align_buf;
4518  }
4519 
4520  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4521  flash_buffer = kmalloc(264, GFP_KERNEL);
4522  if (flash_buffer == NULL) {
4523  rc = -ENOMEM;
4524  goto nvram_write_end;
4525  }
4526  }
4527 
4528  written = 0;
4529  while ((written < len32) && (rc == 0)) {
4530  u32 page_start, page_end, data_start, data_end;
4531  u32 addr, cmd_flags;
4532  int i;
4533 
4534  /* Find the page_start addr */
4535  page_start = offset32 + written;
4536  page_start -= (page_start % bp->flash_info->page_size);
4537  /* Find the page_end addr */
4538  page_end = page_start + bp->flash_info->page_size;
4539  /* Find the data_start addr */
4540  data_start = (written == 0) ? offset32 : page_start;
4541  /* Find the data_end addr */
4542  data_end = (page_end > offset32 + len32) ?
4543  (offset32 + len32) : page_end;
4544 
4545  /* Request access to the flash interface. */
4546  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4547  goto nvram_write_end;
4548 
4549  /* Enable access to flash interface */
4550  bnx2_enable_nvram_access(bp);
4551 
4552  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4553  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554  int j;
4555 
4556  /* Read the whole page into the buffer
4557  * (non-buffer flash only) */
4558  for (j = 0; j < bp->flash_info->page_size; j += 4) {
4559  if (j == (bp->flash_info->page_size - 4)) {
4560  cmd_flags |= BNX2_NVM_COMMAND_LAST;
4561  }
4562  rc = bnx2_nvram_read_dword(bp,
4563  page_start + j,
4564  &flash_buffer[j],
4565  cmd_flags);
4566 
4567  if (rc)
4568  goto nvram_write_end;
4569 
4570  cmd_flags = 0;
4571  }
4572  }
4573 
4574  /* Enable writes to flash interface (unlock write-protect) */
4575  if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4576  goto nvram_write_end;
4577 
4578  /* Loop to write back the buffer data from page_start to
4579  * data_start */
4580  i = 0;
4581  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4582  /* Erase the page */
4583  if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4584  goto nvram_write_end;
4585 
4586  /* Re-enable the write again for the actual write */
4587  bnx2_enable_nvram_write(bp);
4588 
4589  for (addr = page_start; addr < data_start;
4590  addr += 4, i += 4) {
4591 
4592  rc = bnx2_nvram_write_dword(bp, addr,
4593  &flash_buffer[i], cmd_flags);
4594 
4595  if (rc != 0)
4596  goto nvram_write_end;
4597 
4598  cmd_flags = 0;
4599  }
4600  }
4601 
4602  /* Loop to write the new data from data_start to data_end */
4603  for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4604  if ((addr == page_end - 4) ||
4605  ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4606  (addr == data_end - 4))) {
4607 
4608  cmd_flags |= BNX2_NVM_COMMAND_LAST;
4609  }
4610  rc = bnx2_nvram_write_dword(bp, addr, buf,
4611  cmd_flags);
4612 
4613  if (rc != 0)
4614  goto nvram_write_end;
4615 
4616  cmd_flags = 0;
4617  buf += 4;
4618  }
4619 
4620  /* Loop to write back the buffer data from data_end
4621  * to page_end */
4622  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4623  for (addr = data_end; addr < page_end;
4624  addr += 4, i += 4) {
4625 
4626  if (addr == page_end-4) {
4627  cmd_flags = BNX2_NVM_COMMAND_LAST;
4628  }
4629  rc = bnx2_nvram_write_dword(bp, addr,
4630  &flash_buffer[i], cmd_flags);
4631 
4632  if (rc != 0)
4633  goto nvram_write_end;
4634 
4635  cmd_flags = 0;
4636  }
4637  }
4638 
4639  /* Disable writes to flash interface (lock write-protect) */
4640  bnx2_disable_nvram_write(bp);
4641 
4642  /* Disable access to flash interface */
4643  bnx2_disable_nvram_access(bp);
4644  bnx2_release_nvram_lock(bp);
4645 
4646  /* Increment written */
4647  written += data_end - data_start;
4648  }
4649 
4650 nvram_write_end:
4651  kfree(flash_buffer);
4652  kfree(align_buf);
4653  return rc;
4654 }
4655 
4656 static void
4657 bnx2_init_fw_cap(struct bnx2 *bp)
4658 {
4659  u32 val, sig = 0;
4660 
4663 
4664  if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4666 
4667  val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4669  return;
4670 
4671  if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4674  }
4675 
4676  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4678  u32 link;
4679 
4681 
4682  link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4683  if (link & BNX2_LINK_STATUS_SERDES_LINK)
4684  bp->phy_port = PORT_FIBRE;
4685  else
4686  bp->phy_port = PORT_TP;
4687 
4690  }
4691 
4692  if (netif_running(bp->dev) && sig)
4693  bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4694 }
4695 
4696 static void
4697 bnx2_setup_msix_tbl(struct bnx2 *bp)
4698 {
4700 
4703 }
4704 
4705 static int
4706 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4707 {
4708  u32 val;
4709  int i, rc = 0;
4710  u8 old_port;
4711 
4712  /* Wait for the current PCI transaction to complete before
4713  * issuing a reset. */
4714  if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4715  (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4721  val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4722  udelay(5);
4723  } else { /* 5709 */
4724  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4726  REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4727  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4728 
4729  for (i = 0; i < 100; i++) {
4730  msleep(1);
4732  if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4733  break;
4734  }
4735  }
4736 
4737  /* Wait for the firmware to tell us it is ok to issue a reset. */
4738  bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4739 
4740  /* Deposit a driver reset signature so the firmware knows that
4741  * this is a soft reset. */
4742  bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4744 
4745  /* Do a dummy read to force the chip to complete all current transaction
4746  * before we issue a reset. */
4747  val = REG_RD(bp, BNX2_MISC_ID);
4748 
4749  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4752  udelay(5);
4753 
4756 
4757  REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4758 
4759  } else {
4763 
4764  /* Chip reset. */
4765  REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4766 
4767  /* Reading back any register after chip reset will hang the
4768  * bus on 5706 A0 and A1. The msleep below provides plenty
4769  * of margin for write posting.
4770  */
4771  if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4772  (CHIP_ID(bp) == CHIP_ID_5706_A1))
4773  msleep(20);
4774 
4775  /* Reset takes approximate 30 usec */
4776  for (i = 0; i < 10; i++) {
4777  val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4780  break;
4781  udelay(10);
4782  }
4783 
4786  pr_err("Chip reset did not complete\n");
4787  return -EBUSY;
4788  }
4789  }
4790 
4791  /* Make sure byte swapping is properly configured. */
4792  val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4793  if (val != 0x01020304) {
4794  pr_err("Chip not in correct endian mode\n");
4795  return -ENODEV;
4796  }
4797 
4798  /* Wait for the firmware to finish its initialization. */
4799  rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4800  if (rc)
4801  return rc;
4802 
4803  spin_lock_bh(&bp->phy_lock);
4804  old_port = bp->phy_port;
4805  bnx2_init_fw_cap(bp);
4807  old_port != bp->phy_port)
4808  bnx2_set_default_remote_link(bp);
4809  spin_unlock_bh(&bp->phy_lock);
4810 
4811  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4812  /* Adjust the voltage regular to two steps lower. The default
4813  * of this register is 0x0000000e. */
4814  REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4815 
4816  /* Remove bad rbuf memory from the free pool. */
4817  rc = bnx2_alloc_bad_rbuf(bp);
4818  }
4819 
4820  if (bp->flags & BNX2_FLAG_USING_MSIX) {
4821  bnx2_setup_msix_tbl(bp);
4822  /* Prevent MSIX table reads and write from timing out */
4825  }
4826 
4827  return rc;
4828 }
4829 
4830 static int
4831 bnx2_init_chip(struct bnx2 *bp)
4832 {
4833  u32 val, mtu;
4834  int rc, i;
4835 
4836  /* Make sure the interrupt is not active. */
4838 
4841 #ifdef __BIG_ENDIAN
4843 #endif
4845  DMA_READ_CHANS << 12 |
4846  DMA_WRITE_CHANS << 16;
4847 
4848  val |= (0x2 << 20) | (1 << 11);
4849 
4850  if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4851  val |= (1 << 23);
4852 
4853  if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4854  (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4856 
4857  REG_WR(bp, BNX2_DMA_CONFIG, val);
4858 
4859  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4860  val = REG_RD(bp, BNX2_TDMA_CONFIG);
4861  val |= BNX2_TDMA_CONFIG_ONE_DMA;
4862  REG_WR(bp, BNX2_TDMA_CONFIG, val);
4863  }
4864 
4865  if (bp->flags & BNX2_FLAG_PCIX) {
4866  u16 val16;
4867 
4868  pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4869  &val16);
4870  pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4871  val16 & ~PCI_X_CMD_ERO);
4872  }
4873 
4878 
4879  /* Initialize context mapping and zero out the quick contexts. The
4880  * context block must have already been enabled. */
4881  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4882  rc = bnx2_init_5709_context(bp);
4883  if (rc)
4884  return rc;
4885  } else
4886  bnx2_init_context(bp);
4887 
4888  if ((rc = bnx2_init_cpus(bp)) != 0)
4889  return rc;
4890 
4891  bnx2_init_nvram(bp);
4892 
4893  bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4894 
4895  val = REG_RD(bp, BNX2_MQ_CONFIG);
4898  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4900  if (CHIP_REV(bp) == CHIP_REV_Ax)
4901  val |= BNX2_MQ_CONFIG_HALT_DIS;
4902  }
4903 
4904  REG_WR(bp, BNX2_MQ_CONFIG, val);
4905 
4906  val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4908  REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4909 
4910  val = (BCM_PAGE_BITS - 8) << 24;
4911  REG_WR(bp, BNX2_RV2P_CONFIG, val);
4912 
4913  /* Configure page size. */
4914  val = REG_RD(bp, BNX2_TBDR_CONFIG);
4916  val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4917  REG_WR(bp, BNX2_TBDR_CONFIG, val);
4918 
4919  val = bp->mac_addr[0] +
4920  (bp->mac_addr[1] << 8) +
4921  (bp->mac_addr[2] << 16) +
4922  bp->mac_addr[3] +
4923  (bp->mac_addr[4] << 8) +
4924  (bp->mac_addr[5] << 16);
4925  REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4926 
4927  /* Program the MTU. Also include 4 bytes for CRC32. */
4928  mtu = bp->dev->mtu;
4929  val = mtu + ETH_HLEN + ETH_FCS_LEN;
4930  if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4932  REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4933 
4934  if (mtu < 1500)
4935  mtu = 1500;
4936 
4937  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4938  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4939  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4940 
4941  memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4942  for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4943  bp->bnx2_napi[i].last_status_idx = 0;
4944 
4945  bp->idle_chk_status_idx = 0xffff;
4946 
4948 
4949  /* Set up how to generate a link change interrupt. */
4951 
4953  (u64) bp->status_blk_mapping & 0xffffffff);
4955 
4957  (u64) bp->stats_blk_mapping & 0xffffffff);
4959  (u64) bp->stats_blk_mapping >> 32);
4960 
4962  (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4963 
4965  (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4966 
4968  (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4969 
4970  REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4971 
4972  REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4973 
4975  (bp->com_ticks_int << 16) | bp->com_ticks);
4976 
4978  (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4979 
4980  if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4981  REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4982  else
4984  REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4985 
4986  if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4988  else {
4991  }
4992 
4993  if (bp->flags & BNX2_FLAG_USING_MSIX) {
4996 
4998  }
4999 
5000  if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5002 
5003  REG_WR(bp, BNX2_HC_CONFIG, val);
5004 
5005  if (bp->rx_ticks < 25)
5006  bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5007  else
5008  bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5009 
5010  for (i = 1; i < bp->irq_nvecs; i++) {
5011  u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5013 
5014  REG_WR(bp, base,
5018 
5020  (bp->tx_quick_cons_trip_int << 16) |
5021  bp->tx_quick_cons_trip);
5022 
5023  REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5024  (bp->tx_ticks_int << 16) | bp->tx_ticks);
5025 
5027  (bp->rx_quick_cons_trip_int << 16) |
5028  bp->rx_quick_cons_trip);
5029 
5030  REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5031  (bp->rx_ticks_int << 16) | bp->rx_ticks);
5032  }
5033 
5034  /* Clear internal stats counters. */
5036 
5037  REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5038 
5039  /* Initialize the receive filter. */
5040  bnx2_set_rx_mode(bp->dev);
5041 
5042  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5043  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5045  REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5046  }
5047  rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5048  1, 0);
5049 
5052 
5053  udelay(20);
5054 
5055  bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5056 
5057  return rc;
5058 }
5059 
5060 static void
5061 bnx2_clear_ring_states(struct bnx2 *bp)
5062 {
5063  struct bnx2_napi *bnapi;
5064  struct bnx2_tx_ring_info *txr;
5065  struct bnx2_rx_ring_info *rxr;
5066  int i;
5067 
5068  for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5069  bnapi = &bp->bnx2_napi[i];
5070  txr = &bnapi->tx_ring;
5071  rxr = &bnapi->rx_ring;
5072 
5073  txr->tx_cons = 0;
5074  txr->hw_tx_cons = 0;
5075  rxr->rx_prod_bseq = 0;
5076  rxr->rx_prod = 0;
5077  rxr->rx_cons = 0;
5078  rxr->rx_pg_prod = 0;
5079  rxr->rx_pg_cons = 0;
5080  }
5081 }
5082 
5083 static void
5084 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5085 {
5086  u32 val, offset0, offset1, offset2, offset3;
5087  u32 cid_addr = GET_CID_ADDR(cid);
5088 
5089  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5090  offset0 = BNX2_L2CTX_TYPE_XI;
5091  offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5092  offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5093  offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5094  } else {
5095  offset0 = BNX2_L2CTX_TYPE;
5096  offset1 = BNX2_L2CTX_CMD_TYPE;
5097  offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5098  offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5099  }
5101  bnx2_ctx_wr(bp, cid_addr, offset0, val);
5102 
5103  val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5104  bnx2_ctx_wr(bp, cid_addr, offset1, val);
5105 
5106  val = (u64) txr->tx_desc_mapping >> 32;
5107  bnx2_ctx_wr(bp, cid_addr, offset2, val);
5108 
5109  val = (u64) txr->tx_desc_mapping & 0xffffffff;
5110  bnx2_ctx_wr(bp, cid_addr, offset3, val);
5111 }
5112 
5113 static void
5114 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5115 {
5116  struct tx_bd *txbd;
5117  u32 cid = TX_CID;
5118  struct bnx2_napi *bnapi;
5119  struct bnx2_tx_ring_info *txr;
5120 
5121  bnapi = &bp->bnx2_napi[ring_num];
5122  txr = &bnapi->tx_ring;
5123 
5124  if (ring_num == 0)
5125  cid = TX_CID;
5126  else
5127  cid = TX_TSS_CID + ring_num - 1;
5128 
5129  bp->tx_wake_thresh = bp->tx_ring_size / 2;
5130 
5131  txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5132 
5133  txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5134  txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5135 
5136  txr->tx_prod = 0;
5137  txr->tx_prod_bseq = 0;
5138 
5141 
5142  bnx2_init_tx_context(bp, cid, txr);
5143 }
5144 
5145 static void
5146 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5147  int num_rings)
5148 {
5149  int i;
5150  struct rx_bd *rxbd;
5151 
5152  for (i = 0; i < num_rings; i++) {
5153  int j;
5154 
5155  rxbd = &rx_ring[i][0];
5156  for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5157  rxbd->rx_bd_len = buf_size;
5159  }
5160  if (i == (num_rings - 1))
5161  j = 0;
5162  else
5163  j = i + 1;
5164  rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5165  rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5166  }
5167 }
5168 
5169 static void
5170 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5171 {
5172  int i;
5173  u16 prod, ring_prod;
5174  u32 cid, rx_cid_addr, val;
5175  struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5176  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5177 
5178  if (ring_num == 0)
5179  cid = RX_CID;
5180  else
5181  cid = RX_RSS_CID + ring_num - 1;
5182 
5183  rx_cid_addr = GET_CID_ADDR(cid);
5184 
5185  bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5186  bp->rx_buf_use_size, bp->rx_max_ring);
5187 
5188  bnx2_init_rx_context(bp, cid);
5189 
5190  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5191  val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5193  }
5194 
5195  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5196  if (bp->rx_pg_ring_size) {
5197  bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5198  rxr->rx_pg_desc_mapping,
5199  PAGE_SIZE, bp->rx_max_pg_ring);
5200  val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5201  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5202  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5203  BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5204 
5205  val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5206  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5207 
5208  val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5209  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5210 
5211  if (CHIP_NUM(bp) == CHIP_NUM_5709)
5213  }
5214 
5215  val = (u64) rxr->rx_desc_mapping[0] >> 32;
5216  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5217 
5218  val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5219  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5220 
5221  ring_prod = prod = rxr->rx_pg_prod;
5222  for (i = 0; i < bp->rx_pg_ring_size; i++) {
5223  if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5224  netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5225  ring_num, i, bp->rx_pg_ring_size);
5226  break;
5227  }
5228  prod = NEXT_RX_BD(prod);
5229  ring_prod = RX_PG_RING_IDX(prod);
5230  }
5231  rxr->rx_pg_prod = prod;
5232 
5233  ring_prod = prod = rxr->rx_prod;
5234  for (i = 0; i < bp->rx_ring_size; i++) {
5235  if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5236  netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5237  ring_num, i, bp->rx_ring_size);
5238  break;
5239  }
5240  prod = NEXT_RX_BD(prod);
5241  ring_prod = RX_RING_IDX(prod);
5242  }
5243  rxr->rx_prod = prod;
5244 
5248 
5249  REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5250  REG_WR16(bp, rxr->rx_bidx_addr, prod);
5251 
5252  REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5253 }
5254 
5255 static void
5256 bnx2_init_all_rings(struct bnx2 *bp)
5257 {
5258  int i;
5259  u32 val;
5260 
5261  bnx2_clear_ring_states(bp);
5262 
5263  REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5264  for (i = 0; i < bp->num_tx_rings; i++)
5265  bnx2_init_tx_ring(bp, i);
5266 
5267  if (bp->num_tx_rings > 1)
5268  REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5269  (TX_TSS_CID << 7));
5270 
5271  REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5272  bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5273 
5274  for (i = 0; i < bp->num_rx_rings; i++)
5275  bnx2_init_rx_ring(bp, i);
5276 
5277  if (bp->num_rx_rings > 1) {
5278  u32 tbl_32 = 0;
5279 
5280  for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5281  int shift = (i % 8) << 2;
5282 
5283  tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5284  if ((i % 8) == 7) {
5285  REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5286  REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5290  tbl_32 = 0;
5291  }
5292  }
5293 
5296 
5297  REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5298 
5299  }
5300 }
5301 
5302 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5303 {
5304  u32 max, num_rings = 1;
5305 
5306  while (ring_size > MAX_RX_DESC_CNT) {
5307  ring_size -= MAX_RX_DESC_CNT;
5308  num_rings++;
5309  }
5310  /* round to next power of 2 */
5311  max = max_size;
5312  while ((max & num_rings) == 0)
5313  max >>= 1;
5314 
5315  if (num_rings != max)
5316  max <<= 1;
5317 
5318  return max;
5319 }
5320 
5321 static void
5322 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5323 {
5324  u32 rx_size, rx_space, jumbo_size;
5325 
5326  /* 8 for CRC and VLAN */
5327  rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5328 
5329  rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5330  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5331 
5333  bp->rx_pg_ring_size = 0;
5334  bp->rx_max_pg_ring = 0;
5335  bp->rx_max_pg_ring_idx = 0;
5336  if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5337  int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5338 
5339  jumbo_size = size * pages;
5340  if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5341  jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5342 
5343  bp->rx_pg_ring_size = jumbo_size;
5344  bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5345  MAX_RX_PG_RINGS);
5346  bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5347  rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5348  bp->rx_copy_thresh = 0;
5349  }
5350 
5351  bp->rx_buf_use_size = rx_size;
5352  /* hw alignment + build_skb() overhead*/
5354  NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5355  bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5356  bp->rx_ring_size = size;
5357  bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5358  bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5359 }
5360 
5361 static void
5362 bnx2_free_tx_skbs(struct bnx2 *bp)
5363 {
5364  int i;
5365 
5366  for (i = 0; i < bp->num_tx_rings; i++) {
5367  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5368  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5369  int j;
5370 
5371  if (txr->tx_buf_ring == NULL)
5372  continue;
5373 
5374  for (j = 0; j < TX_DESC_CNT; ) {
5375  struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5376  struct sk_buff *skb = tx_buf->skb;
5377  int k, last;
5378 
5379  if (skb == NULL) {
5380  j = NEXT_TX_BD(j);
5381  continue;
5382  }
5383 
5384  dma_unmap_single(&bp->pdev->dev,
5385  dma_unmap_addr(tx_buf, mapping),
5386  skb_headlen(skb),
5388 
5389  tx_buf->skb = NULL;
5390 
5391  last = tx_buf->nr_frags;
5392  j = NEXT_TX_BD(j);
5393  for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
5394  tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5395  dma_unmap_page(&bp->pdev->dev,
5396  dma_unmap_addr(tx_buf, mapping),
5397  skb_frag_size(&skb_shinfo(skb)->frags[k]),
5399  }
5400  dev_kfree_skb(skb);
5401  }
5402  netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5403  }
5404 }
5405 
5406 static void
5407 bnx2_free_rx_skbs(struct bnx2 *bp)
5408 {
5409  int i;
5410 
5411  for (i = 0; i < bp->num_rx_rings; i++) {
5412  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5413  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5414  int j;
5415 
5416  if (rxr->rx_buf_ring == NULL)
5417  return;
5418 
5419  for (j = 0; j < bp->rx_max_ring_idx; j++) {
5420  struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5421  u8 *data = rx_buf->data;
5422 
5423  if (data == NULL)
5424  continue;
5425 
5426  dma_unmap_single(&bp->pdev->dev,
5427  dma_unmap_addr(rx_buf, mapping),
5428  bp->rx_buf_use_size,
5430 
5431  rx_buf->data = NULL;
5432 
5433  kfree(data);
5434  }
5435  for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5436  bnx2_free_rx_page(bp, rxr, j);
5437  }
5438 }
5439 
5440 static void
5441 bnx2_free_skbs(struct bnx2 *bp)
5442 {
5443  bnx2_free_tx_skbs(bp);
5444  bnx2_free_rx_skbs(bp);
5445 }
5446 
5447 static int
5448 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5449 {
5450  int rc;
5451 
5452  rc = bnx2_reset_chip(bp, reset_code);
5453  bnx2_free_skbs(bp);
5454  if (rc)
5455  return rc;
5456 
5457  if ((rc = bnx2_init_chip(bp)) != 0)
5458  return rc;
5459 
5460  bnx2_init_all_rings(bp);
5461  return 0;
5462 }
5463 
5464 static int
5465 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5466 {
5467  int rc;
5468 
5469  if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5470  return rc;
5471 
5472  spin_lock_bh(&bp->phy_lock);
5473  bnx2_init_phy(bp, reset_phy);
5474  bnx2_set_link(bp);
5476  bnx2_remote_phy_event(bp);
5477  spin_unlock_bh(&bp->phy_lock);
5478  return 0;
5479 }
5480 
5481 static int
5482 bnx2_shutdown_chip(struct bnx2 *bp)
5483 {
5484  u32 reset_code;
5485 
5486  if (bp->flags & BNX2_FLAG_NO_WOL)
5487  reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5488  else if (bp->wol)
5489  reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5490  else
5491  reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5492 
5493  return bnx2_reset_chip(bp, reset_code);
5494 }
5495 
5496 static int
5497 bnx2_test_registers(struct bnx2 *bp)
5498 {
5499  int ret;
5500  int i, is_5709;
5501  static const struct {
5502  u16 offset;
5503  u16 flags;
5504 #define BNX2_FL_NOT_5709 1
5505  u32 rw_mask;
5506  u32 ro_mask;
5507  } reg_tbl[] = {
5508  { 0x006c, 0, 0x00000000, 0x0000003f },
5509  { 0x0090, 0, 0xffffffff, 0x00000000 },
5510  { 0x0094, 0, 0x00000000, 0x00000000 },
5511 
5512  { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5513  { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514  { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515  { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5516  { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5517  { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5518  { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5519  { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520  { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 
5522  { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5523  { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5524  { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5525  { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5526  { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5527  { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5528 
5529  { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5530  { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5531  { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5532 
5533  { 0x1000, 0, 0x00000000, 0x00000001 },
5534  { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5535 
5536  { 0x1408, 0, 0x01c00800, 0x00000000 },
5537  { 0x149c, 0, 0x8000ffff, 0x00000000 },
5538  { 0x14a8, 0, 0x00000000, 0x000001ff },
5539  { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5540  { 0x14b0, 0, 0x00000002, 0x00000001 },
5541  { 0x14b8, 0, 0x00000000, 0x00000000 },
5542  { 0x14c0, 0, 0x00000000, 0x00000009 },
5543  { 0x14c4, 0, 0x00003fff, 0x00000000 },
5544  { 0x14cc, 0, 0x00000000, 0x00000001 },
5545  { 0x14d0, 0, 0xffffffff, 0x00000000 },
5546 
5547  { 0x1800, 0, 0x00000000, 0x00000001 },
5548  { 0x1804, 0, 0x00000000, 0x00000003 },
5549 
5550  { 0x2800, 0, 0x00000000, 0x00000001 },
5551  { 0x2804, 0, 0x00000000, 0x00003f01 },
5552  { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5553  { 0x2810, 0, 0xffff0000, 0x00000000 },
5554  { 0x2814, 0, 0xffff0000, 0x00000000 },
5555  { 0x2818, 0, 0xffff0000, 0x00000000 },
5556  { 0x281c, 0, 0xffff0000, 0x00000000 },
5557  { 0x2834, 0, 0xffffffff, 0x00000000 },
5558  { 0x2840, 0, 0x00000000, 0xffffffff },
5559  { 0x2844, 0, 0x00000000, 0xffffffff },
5560  { 0x2848, 0, 0xffffffff, 0x00000000 },
5561  { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5562 
5563  { 0x2c00, 0, 0x00000000, 0x00000011 },
5564  { 0x2c04, 0, 0x00000000, 0x00030007 },
5565 
5566  { 0x3c00, 0, 0x00000000, 0x00000001 },
5567  { 0x3c04, 0, 0x00000000, 0x00070000 },
5568  { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5569  { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5570  { 0x3c10, 0, 0xffffffff, 0x00000000 },
5571  { 0x3c14, 0, 0x00000000, 0xffffffff },
5572  { 0x3c18, 0, 0x00000000, 0xffffffff },
5573  { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5574  { 0x3c20, 0, 0xffffff00, 0x00000000 },
5575 
5576  { 0x5004, 0, 0x00000000, 0x0000007f },
5577  { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5578 
5579  { 0x5c00, 0, 0x00000000, 0x00000001 },
5580  { 0x5c04, 0, 0x00000000, 0x0003000f },
5581  { 0x5c08, 0, 0x00000003, 0x00000000 },
5582  { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5583  { 0x5c10, 0, 0x00000000, 0xffffffff },
5584  { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5585  { 0x5c84, 0, 0x00000000, 0x0000f333 },
5586  { 0x5c88, 0, 0x00000000, 0x00077373 },
5587  { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5588 
5589  { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5590  { 0x680c, 0, 0xffffffff, 0x00000000 },
5591  { 0x6810, 0, 0xffffffff, 0x00000000 },
5592  { 0x6814, 0, 0xffffffff, 0x00000000 },
5593  { 0x6818, 0, 0xffffffff, 0x00000000 },
5594  { 0x681c, 0, 0xffffffff, 0x00000000 },
5595  { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5596  { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5597  { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5598  { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5599  { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5600  { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5601  { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5602  { 0x683c, 0, 0x0000ffff, 0x00000000 },
5603  { 0x6840, 0, 0x00000ff0, 0x00000000 },
5604  { 0x6844, 0, 0x00ffff00, 0x00000000 },
5605  { 0x684c, 0, 0xffffffff, 0x00000000 },
5606  { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5607  { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5608  { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5609  { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5610  { 0x6908, 0, 0x00000000, 0x0001ff0f },
5611  { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5612 
5613  { 0xffff, 0, 0x00000000, 0x00000000 },
5614  };
5615 
5616  ret = 0;
5617  is_5709 = 0;
5618  if (CHIP_NUM(bp) == CHIP_NUM_5709)
5619  is_5709 = 1;
5620 
5621  for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5622  u32 offset, rw_mask, ro_mask, save_val, val;
5623  u16 flags = reg_tbl[i].flags;
5624 
5625  if (is_5709 && (flags & BNX2_FL_NOT_5709))
5626  continue;
5627 
5628  offset = (u32) reg_tbl[i].offset;
5629  rw_mask = reg_tbl[i].rw_mask;
5630  ro_mask = reg_tbl[i].ro_mask;
5631 
5632  save_val = readl(bp->regview + offset);
5633 
5634  writel(0, bp->regview + offset);
5635 
5636  val = readl(bp->regview + offset);
5637  if ((val & rw_mask) != 0) {
5638  goto reg_test_err;
5639  }
5640 
5641  if ((val & ro_mask) != (save_val & ro_mask)) {
5642  goto reg_test_err;
5643  }
5644 
5645  writel(0xffffffff, bp->regview + offset);
5646 
5647  val = readl(bp->regview + offset);
5648  if ((val & rw_mask) != rw_mask) {
5649  goto reg_test_err;
5650  }
5651 
5652  if ((val & ro_mask) != (save_val & ro_mask)) {
5653  goto reg_test_err;
5654  }
5655 
5656  writel(save_val, bp->regview + offset);
5657  continue;
5658 
5659 reg_test_err:
5660  writel(save_val, bp->regview + offset);
5661  ret = -ENODEV;
5662  break;
5663  }
5664  return ret;
5665 }
5666 
5667 static int
5668 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5669 {
5670  static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5671  0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5672  int i;
5673 
5674  for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5675  u32 offset;
5676 
5677  for (offset = 0; offset < size; offset += 4) {
5678 
5679  bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5680 
5681  if (bnx2_reg_rd_ind(bp, start + offset) !=
5682  test_pattern[i]) {
5683  return -ENODEV;
5684  }
5685  }
5686  }
5687  return 0;
5688 }
5689 
5690 static int
5691 bnx2_test_memory(struct bnx2 *bp)
5692 {
5693  int ret = 0;
5694  int i;
5695  static struct mem_entry {
5696  u32 offset;
5697  u32 len;
5698  } mem_tbl_5706[] = {
5699  { 0x60000, 0x4000 },
5700  { 0xa0000, 0x3000 },
5701  { 0xe0000, 0x4000 },
5702  { 0x120000, 0x4000 },
5703  { 0x1a0000, 0x4000 },
5704  { 0x160000, 0x4000 },
5705  { 0xffffffff, 0 },
5706  },
5707  mem_tbl_5709[] = {
5708  { 0x60000, 0x4000 },
5709  { 0xa0000, 0x3000 },
5710  { 0xe0000, 0x4000 },
5711  { 0x120000, 0x4000 },
5712  { 0x1a0000, 0x4000 },
5713  { 0xffffffff, 0 },
5714  };
5715  struct mem_entry *mem_tbl;
5716 
5717  if (CHIP_NUM(bp) == CHIP_NUM_5709)
5718  mem_tbl = mem_tbl_5709;
5719  else
5720  mem_tbl = mem_tbl_5706;
5721 
5722  for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5723  if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5724  mem_tbl[i].len)) != 0) {
5725  return ret;
5726  }
5727  }
5728 
5729  return ret;
5730 }
5731 
5732 #define BNX2_MAC_LOOPBACK 0
5733 #define BNX2_PHY_LOOPBACK 1
5734 
5735 static int
5736 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5737 {
5738  unsigned int pkt_size, num_pkts, i;
5739  struct sk_buff *skb;
5740  u8 *data;
5741  unsigned char *packet;
5742  u16 rx_start_idx, rx_idx;
5743  dma_addr_t map;
5744  struct tx_bd *txbd;
5745  struct sw_bd *rx_buf;
5746  struct l2_fhdr *rx_hdr;
5747  int ret = -ENODEV;
5748  struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5749  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5750  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5751 
5752  tx_napi = bnapi;
5753 
5754  txr = &tx_napi->tx_ring;
5755  rxr = &bnapi->rx_ring;
5756  if (loopback_mode == BNX2_MAC_LOOPBACK) {
5757  bp->loopback = MAC_LOOPBACK;
5758  bnx2_set_mac_loopback(bp);
5759  }
5760  else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5762  return 0;
5763 
5764  bp->loopback = PHY_LOOPBACK;
5765  bnx2_set_phy_loopback(bp);
5766  }
5767  else
5768  return -EINVAL;
5769 
5770  pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5771  skb = netdev_alloc_skb(bp->dev, pkt_size);
5772  if (!skb)
5773  return -ENOMEM;
5774  packet = skb_put(skb, pkt_size);
5775  memcpy(packet, bp->dev->dev_addr, 6);
5776  memset(packet + 6, 0x0, 8);
5777  for (i = 14; i < pkt_size; i++)
5778  packet[i] = (unsigned char) (i & 0xff);
5779 
5780  map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5782  if (dma_mapping_error(&bp->pdev->dev, map)) {
5783  dev_kfree_skb(skb);
5784  return -EIO;
5785  }
5786 
5787  REG_WR(bp, BNX2_HC_COMMAND,
5789 
5790  REG_RD(bp, BNX2_HC_COMMAND);
5791 
5792  udelay(5);
5793  rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5794 
5795  num_pkts = 0;
5796 
5797  txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5798 
5799  txbd->tx_bd_haddr_hi = (u64) map >> 32;
5800  txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5801  txbd->tx_bd_mss_nbytes = pkt_size;
5803 
5804  num_pkts++;
5805  txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5806  txr->tx_prod_bseq += pkt_size;
5807 
5808  REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5809  REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5810 
5811  udelay(100);
5812 
5813  REG_WR(bp, BNX2_HC_COMMAND,
5815 
5816  REG_RD(bp, BNX2_HC_COMMAND);
5817 
5818  udelay(5);
5819 
5820  dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5821  dev_kfree_skb(skb);
5822 
5823  if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5824  goto loopback_test_done;
5825 
5826  rx_idx = bnx2_get_hw_rx_cons(bnapi);
5827  if (rx_idx != rx_start_idx + num_pkts) {
5828  goto loopback_test_done;
5829  }
5830 
5831  rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5832  data = rx_buf->data;
5833 
5834  rx_hdr = get_l2_fhdr(data);
5835  data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5836 
5837  dma_sync_single_for_cpu(&bp->pdev->dev,
5838  dma_unmap_addr(rx_buf, mapping),
5840 
5841  if (rx_hdr->l2_fhdr_status &
5847 
5848  goto loopback_test_done;
5849  }
5850 
5851  if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5852  goto loopback_test_done;
5853  }
5854 
5855  for (i = 14; i < pkt_size; i++) {
5856  if (*(data + i) != (unsigned char) (i & 0xff)) {
5857  goto loopback_test_done;
5858  }
5859  }
5860 
5861  ret = 0;
5862 
5863 loopback_test_done:
5864  bp->loopback = 0;
5865  return ret;
5866 }
5867 
5868 #define BNX2_MAC_LOOPBACK_FAILED 1
5869 #define BNX2_PHY_LOOPBACK_FAILED 2
5870 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5871  BNX2_PHY_LOOPBACK_FAILED)
5872 
5873 static int
5874 bnx2_test_loopback(struct bnx2 *bp)
5875 {
5876  int rc = 0;
5877 
5878  if (!netif_running(bp->dev))
5879  return BNX2_LOOPBACK_FAILED;
5880 
5881  bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5882  spin_lock_bh(&bp->phy_lock);
5883  bnx2_init_phy(bp, 1);
5884  spin_unlock_bh(&bp->phy_lock);
5885  if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5887  if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5889  return rc;
5890 }
5891 
5892 #define NVRAM_SIZE 0x200
5893 #define CRC32_RESIDUAL 0xdebb20e3
5894 
5895 static int
5896 bnx2_test_nvram(struct bnx2 *bp)
5897 {
5898  __be32 buf[NVRAM_SIZE / 4];
5899  u8 *data = (u8 *) buf;
5900  int rc = 0;
5901  u32 magic, csum;
5902 
5903  if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5904  goto test_nvram_done;
5905 
5906  magic = be32_to_cpu(buf[0]);
5907  if (magic != 0x669955aa) {
5908  rc = -ENODEV;
5909  goto test_nvram_done;
5910  }
5911 
5912  if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5913  goto test_nvram_done;
5914 
5915  csum = ether_crc_le(0x100, data);
5916  if (csum != CRC32_RESIDUAL) {
5917  rc = -ENODEV;
5918  goto test_nvram_done;
5919  }
5920 
5921  csum = ether_crc_le(0x100, data + 0x100);
5922  if (csum != CRC32_RESIDUAL) {
5923  rc = -ENODEV;
5924  }
5925 
5926 test_nvram_done:
5927  return rc;
5928 }
5929 
5930 static int
5931 bnx2_test_link(struct bnx2 *bp)
5932 {
5933  u32 bmsr;
5934 
5935  if (!netif_running(bp->dev))
5936  return -ENODEV;
5937 
5939  if (bp->link_up)
5940  return 0;
5941  return -ENODEV;
5942  }
5943  spin_lock_bh(&bp->phy_lock);
5944  bnx2_enable_bmsr1(bp);
5945  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5946  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5947  bnx2_disable_bmsr1(bp);
5948  spin_unlock_bh(&bp->phy_lock);
5949 
5950  if (bmsr & BMSR_LSTATUS) {
5951  return 0;
5952  }
5953  return -ENODEV;
5954 }
5955 
5956 static int
5957 bnx2_test_intr(struct bnx2 *bp)
5958 {
5959  int i;
5960  u16 status_idx;
5961 
5962  if (!netif_running(bp->dev))
5963  return -ENODEV;
5964 
5965  status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5966 
5967  /* This register is not touched during run-time. */
5969  REG_RD(bp, BNX2_HC_COMMAND);
5970 
5971  for (i = 0; i < 10; i++) {
5972  if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5973  status_idx) {
5974 
5975  break;
5976  }
5977 
5979  }
5980  if (i < 10)
5981  return 0;
5982 
5983  return -ENODEV;
5984 }
5985 
5986 /* Determining link for parallel detection. */
5987 static int
5988 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5989 {
5990  u32 mode_ctl, an_dbg, exp;
5991 
5993  return 0;
5994 
5995  bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5996  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5997 
5998  if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5999  return 0;
6000 
6001  bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6002  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6003  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6004 
6006  return 0;
6007 
6008  bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6009  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6010  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6011 
6012  if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6013  return 0;
6014 
6015  return 1;
6016 }
6017 
6018 static void
6019 bnx2_5706_serdes_timer(struct bnx2 *bp)
6020 {
6021  int check_link = 1;
6022 
6023  spin_lock(&bp->phy_lock);
6024  if (bp->serdes_an_pending) {
6025  bp->serdes_an_pending--;
6026  check_link = 0;
6027  } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6028  u32 bmcr;
6029 
6031 
6032  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6033 
6034  if (bmcr & BMCR_ANENABLE) {
6035  if (bnx2_5706_serdes_has_link(bp)) {
6036  bmcr &= ~BMCR_ANENABLE;
6037  bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6040  }
6041  }
6042  }
6043  else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6045  u32 phy2;
6046 
6047  bnx2_write_phy(bp, 0x17, 0x0f01);
6048  bnx2_read_phy(bp, 0x15, &phy2);
6049  if (phy2 & 0x20) {
6050  u32 bmcr;
6051 
6052  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6053  bmcr |= BMCR_ANENABLE;
6054  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6055 
6057  }
6058  } else
6060 
6061  if (check_link) {
6062  u32 val;
6063 
6064  bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6065  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6066  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6067 
6068  if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6069  if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6070  bnx2_5706s_force_link_dn(bp, 1);
6072  } else
6073  bnx2_set_link(bp);
6074  } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6075  bnx2_set_link(bp);
6076  }
6077  spin_unlock(&bp->phy_lock);
6078 }
6079 
6080 static void
6081 bnx2_5708_serdes_timer(struct bnx2 *bp)
6082 {
6084  return;
6085 
6086  if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6087  bp->serdes_an_pending = 0;
6088  return;
6089  }
6090 
6091  spin_lock(&bp->phy_lock);
6092  if (bp->serdes_an_pending)
6093  bp->serdes_an_pending--;
6094  else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6095  u32 bmcr;
6096 
6097  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6098  if (bmcr & BMCR_ANENABLE) {
6099  bnx2_enable_forced_2g5(bp);
6101  } else {
6102  bnx2_disable_forced_2g5(bp);
6103  bp->serdes_an_pending = 2;
6105  }
6106 
6107  } else
6109 
6110  spin_unlock(&bp->phy_lock);
6111 }
6112 
6113 static void
6114 bnx2_timer(unsigned long data)
6115 {
6116  struct bnx2 *bp = (struct bnx2 *) data;
6117 
6118  if (!netif_running(bp->dev))
6119  return;
6120 
6121  if (atomic_read(&bp->intr_sem) != 0)
6122  goto bnx2_restart_timer;
6123 
6126  bnx2_chk_missed_msi(bp);
6127 
6128  bnx2_send_heart_beat(bp);
6129 
6130  bp->stats_blk->stat_FwRxDrop =
6131  bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6132 
6133  /* workaround occasional corrupted counters */
6134  if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6135  REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6137 
6138  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6139  if (CHIP_NUM(bp) == CHIP_NUM_5706)
6140  bnx2_5706_serdes_timer(bp);
6141  else
6142  bnx2_5708_serdes_timer(bp);
6143  }
6144 
6145 bnx2_restart_timer:
6146  mod_timer(&bp->timer, jiffies + bp->current_interval);
6147 }
6148 
6149 static int
6150 bnx2_request_irq(struct bnx2 *bp)
6151 {
6152  unsigned long flags;
6153  struct bnx2_irq *irq;
6154  int rc = 0, i;
6155 
6157  flags = 0;
6158  else
6159  flags = IRQF_SHARED;
6160 
6161  for (i = 0; i < bp->irq_nvecs; i++) {
6162  irq = &bp->irq_tbl[i];
6163  rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6164  &bp->bnx2_napi[i]);
6165  if (rc)
6166  break;
6167  irq->requested = 1;
6168  }
6169  return rc;
6170 }
6171 
6172 static void
6173 __bnx2_free_irq(struct bnx2 *bp)
6174 {
6175  struct bnx2_irq *irq;
6176  int i;
6177 
6178  for (i = 0; i < bp->irq_nvecs; i++) {
6179  irq = &bp->irq_tbl[i];
6180  if (irq->requested)
6181  free_irq(irq->vector, &bp->bnx2_napi[i]);
6182  irq->requested = 0;
6183  }
6184 }
6185 
6186 static void
6187 bnx2_free_irq(struct bnx2 *bp)
6188 {
6189 
6190  __bnx2_free_irq(bp);
6191  if (bp->flags & BNX2_FLAG_USING_MSI)
6192  pci_disable_msi(bp->pdev);
6193  else if (bp->flags & BNX2_FLAG_USING_MSIX)
6194  pci_disable_msix(bp->pdev);
6195 
6197 }
6198 
6199 static void
6200 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6201 {
6202  int i, total_vecs, rc;
6203  struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6204  struct net_device *dev = bp->dev;
6205  const int len = sizeof(bp->irq_tbl[0].name);
6206 
6207  bnx2_setup_msix_tbl(bp);
6211 
6212  /* Need to flush the previous three writes to ensure MSI-X
6213  * is setup properly */
6215 
6216  for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6217  msix_ent[i].entry = i;
6218  msix_ent[i].vector = 0;
6219  }
6220 
6221  total_vecs = msix_vecs;
6222 #ifdef BCM_CNIC
6223  total_vecs++;
6224 #endif
6225  rc = -ENOSPC;
6226  while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6227  rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6228  if (rc <= 0)
6229  break;
6230  if (rc > 0)
6231  total_vecs = rc;
6232  }
6233 
6234  if (rc != 0)
6235  return;
6236 
6237  msix_vecs = total_vecs;
6238 #ifdef BCM_CNIC
6239  msix_vecs--;
6240 #endif
6241  bp->irq_nvecs = msix_vecs;
6243  for (i = 0; i < total_vecs; i++) {
6244  bp->irq_tbl[i].vector = msix_ent[i].vector;
6245  snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6246  bp->irq_tbl[i].handler = bnx2_msi_1shot;
6247  }
6248 }
6249 
6250 static int
6251 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6252 {
6254  int msix_vecs;
6255 
6256  if (!bp->num_req_rx_rings)
6257  msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6258  else if (!bp->num_req_tx_rings)
6259  msix_vecs = max(cpus, bp->num_req_rx_rings);
6260  else
6261  msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6262 
6263  msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6264 
6265  bp->irq_tbl[0].handler = bnx2_interrupt;
6266  strcpy(bp->irq_tbl[0].name, bp->dev->name);
6267  bp->irq_nvecs = 1;
6268  bp->irq_tbl[0].vector = bp->pdev->irq;
6269 
6270  if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6271  bnx2_enable_msix(bp, msix_vecs);
6272 
6273  if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6274  !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6275  if (pci_enable_msi(bp->pdev) == 0) {
6276  bp->flags |= BNX2_FLAG_USING_MSI;
6277  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6279  bp->irq_tbl[0].handler = bnx2_msi_1shot;
6280  } else
6281  bp->irq_tbl[0].handler = bnx2_msi;
6282 
6283  bp->irq_tbl[0].vector = bp->pdev->irq;
6284  }
6285  }
6286 
6287  if (!bp->num_req_tx_rings)
6289  else
6290  bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6291 
6292  if (!bp->num_req_rx_rings)
6293  bp->num_rx_rings = bp->irq_nvecs;
6294  else
6295  bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6296 
6298 
6299  return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6300 }
6301 
6302 /* Called with rtnl_lock */
6303 static int
6304 bnx2_open(struct net_device *dev)
6305 {
6306  struct bnx2 *bp = netdev_priv(dev);
6307  int rc;
6308 
6309  rc = bnx2_request_firmware(bp);
6310  if (rc < 0)
6311  goto out;
6312 
6313  netif_carrier_off(dev);
6314 
6315  bnx2_set_power_state(bp, PCI_D0);
6316  bnx2_disable_int(bp);
6317 
6318  rc = bnx2_setup_int_mode(bp, disable_msi);
6319  if (rc)
6320  goto open_err;
6321  bnx2_init_napi(bp);
6322  bnx2_napi_enable(bp);
6323  rc = bnx2_alloc_mem(bp);
6324  if (rc)
6325  goto open_err;
6326 
6327  rc = bnx2_request_irq(bp);
6328  if (rc)
6329  goto open_err;
6330 
6331  rc = bnx2_init_nic(bp, 1);
6332  if (rc)
6333  goto open_err;
6334 
6335  mod_timer(&bp->timer, jiffies + bp->current_interval);
6336 
6337  atomic_set(&bp->intr_sem, 0);
6338 
6339  memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6340 
6341  bnx2_enable_int(bp);
6342 
6343  if (bp->flags & BNX2_FLAG_USING_MSI) {
6344  /* Test MSI to make sure it is working
6345  * If MSI test fails, go back to INTx mode
6346  */
6347  if (bnx2_test_intr(bp) != 0) {
6348  netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6349 
6350  bnx2_disable_int(bp);
6351  bnx2_free_irq(bp);
6352 
6353  bnx2_setup_int_mode(bp, 1);
6354 
6355  rc = bnx2_init_nic(bp, 0);
6356 
6357  if (!rc)
6358  rc = bnx2_request_irq(bp);
6359 
6360  if (rc) {
6361  del_timer_sync(&bp->timer);
6362  goto open_err;
6363  }
6364  bnx2_enable_int(bp);
6365  }
6366  }
6367  if (bp->flags & BNX2_FLAG_USING_MSI)
6368  netdev_info(dev, "using MSI\n");
6369  else if (bp->flags & BNX2_FLAG_USING_MSIX)
6370  netdev_info(dev, "using MSIX\n");
6371 
6372  netif_tx_start_all_queues(dev);
6373 out:
6374  return rc;
6375 
6376 open_err:
6377  bnx2_napi_disable(bp);
6378  bnx2_free_skbs(bp);
6379  bnx2_free_irq(bp);
6380  bnx2_free_mem(bp);
6381  bnx2_del_napi(bp);
6382  bnx2_release_firmware(bp);
6383  goto out;
6384 }
6385 
6386 static void
6387 bnx2_reset_task(struct work_struct *work)
6388 {
6389  struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6390  int rc;
6391  u16 pcicmd;
6392 
6393  rtnl_lock();
6394  if (!netif_running(bp->dev)) {
6395  rtnl_unlock();
6396  return;
6397  }
6398 
6399  bnx2_netif_stop(bp, true);
6400 
6401  pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6402  if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6403  /* in case PCI block has reset */
6404  pci_restore_state(bp->pdev);
6405  pci_save_state(bp->pdev);
6406  }
6407  rc = bnx2_init_nic(bp, 1);
6408  if (rc) {
6409  netdev_err(bp->dev, "failed to reset NIC, closing\n");
6410  bnx2_napi_enable(bp);
6411  dev_close(bp->dev);
6412  rtnl_unlock();
6413  return;
6414  }
6415 
6416  atomic_set(&bp->intr_sem, 1);
6417  bnx2_netif_start(bp, true);
6418  rtnl_unlock();
6419 }
6420 
6421 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6422 
6423 static void
6424 bnx2_dump_ftq(struct bnx2 *bp)
6425 {
6426  int i;
6427  u32 reg, bdidx, cid, valid;
6428  struct net_device *dev = bp->dev;
6429  static const struct ftq_reg {
6430  char *name;
6431  u32 off;
6432  } ftq_arr[] = {
6433  BNX2_FTQ_ENTRY(RV2P_P),
6434  BNX2_FTQ_ENTRY(RV2P_T),
6435  BNX2_FTQ_ENTRY(RV2P_M),
6436  BNX2_FTQ_ENTRY(TBDR_),
6437  BNX2_FTQ_ENTRY(TDMA_),
6438  BNX2_FTQ_ENTRY(TXP_),
6439  BNX2_FTQ_ENTRY(TXP_),
6440  BNX2_FTQ_ENTRY(TPAT_),
6441  BNX2_FTQ_ENTRY(RXP_C),
6442  BNX2_FTQ_ENTRY(RXP_),
6443  BNX2_FTQ_ENTRY(COM_COMXQ_),
6444  BNX2_FTQ_ENTRY(COM_COMTQ_),
6445  BNX2_FTQ_ENTRY(COM_COMQ_),
6446  BNX2_FTQ_ENTRY(CP_CPQ_),
6447  };
6448 
6449  netdev_err(dev, "<--- start FTQ dump --->\n");
6450  for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6451  netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6452  bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6453 
6454  netdev_err(dev, "CPU states:\n");
6455  for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6456  netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6457  reg, bnx2_reg_rd_ind(bp, reg),
6458  bnx2_reg_rd_ind(bp, reg + 4),
6459  bnx2_reg_rd_ind(bp, reg + 8),
6460  bnx2_reg_rd_ind(bp, reg + 0x1c),
6461  bnx2_reg_rd_ind(bp, reg + 0x1c),
6462  bnx2_reg_rd_ind(bp, reg + 0x20));
6463 
6464  netdev_err(dev, "<--- end FTQ dump --->\n");
6465  netdev_err(dev, "<--- start TBDC dump --->\n");
6466  netdev_err(dev, "TBDC free cnt: %ld\n",
6468  netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6469  for (i = 0; i < 0x20; i++) {
6470  int j = 0;
6471 
6472  REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
6476  while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
6477  BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6478  j++;
6479 
6480  cid = REG_RD(bp, BNX2_TBDC_CID);
6481  bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
6482  valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
6483  netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6484  i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6485  bdidx >> 24, (valid >> 8) & 0x0ff);
6486  }
6487  netdev_err(dev, "<--- end TBDC dump --->\n");
6488 }
6489 
6490 static void
6491 bnx2_dump_state(struct bnx2 *bp)
6492 {
6493  struct net_device *dev = bp->dev;
6494  u32 val1, val2;
6495 
6496  pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6497  netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6498  atomic_read(&bp->intr_sem), val1);
6499  pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6500  pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6501  netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6502  netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6505  netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6507  netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6509  if (bp->flags & BNX2_FLAG_USING_MSIX)
6510  netdev_err(dev, "DEBUG: PBA[%08x]\n",
6512 }
6513 
6514 static void
6515 bnx2_tx_timeout(struct net_device *dev)
6516 {
6517  struct bnx2 *bp = netdev_priv(dev);
6518 
6519  bnx2_dump_ftq(bp);
6520  bnx2_dump_state(bp);
6521  bnx2_dump_mcp_state(bp);
6522 
6523  /* This allows the netif to be shutdown gracefully before resetting */
6524  schedule_work(&bp->reset_task);
6525 }
6526 
6527 /* Called with netif_tx_lock.
6528  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6529  * netif_wake_queue().
6530  */
6531 static netdev_tx_t
6532 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6533 {
6534  struct bnx2 *bp = netdev_priv(dev);
6536  struct tx_bd *txbd;
6537  struct sw_tx_bd *tx_buf;
6538  u32 len, vlan_tag_flags, last_frag, mss;
6539  u16 prod, ring_prod;
6540  int i;
6541  struct bnx2_napi *bnapi;
6542  struct bnx2_tx_ring_info *txr;
6543  struct netdev_queue *txq;
6544 
6545  /* Determine which tx ring we will be placed on */
6546  i = skb_get_queue_mapping(skb);
6547  bnapi = &bp->bnx2_napi[i];
6548  txr = &bnapi->tx_ring;
6549  txq = netdev_get_tx_queue(dev, i);
6550 
6551  if (unlikely(bnx2_tx_avail(bp, txr) <
6552  (skb_shinfo(skb)->nr_frags + 1))) {
6553  netif_tx_stop_queue(txq);
6554  netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6555 
6556  return NETDEV_TX_BUSY;
6557  }
6558  len = skb_headlen(skb);
6559  prod = txr->tx_prod;
6560  ring_prod = TX_RING_IDX(prod);
6561 
6562  vlan_tag_flags = 0;
6563  if (skb->ip_summed == CHECKSUM_PARTIAL) {
6564  vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6565  }
6566 
6567  if (vlan_tx_tag_present(skb)) {
6568  vlan_tag_flags |=
6569  (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6570  }
6571 
6572  if ((mss = skb_shinfo(skb)->gso_size)) {
6573  u32 tcp_opt_len;
6574  struct iphdr *iph;
6575 
6576  vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6577 
6578  tcp_opt_len = tcp_optlen(skb);
6579 
6580  if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6581  u32 tcp_off = skb_transport_offset(skb) -
6582  sizeof(struct ipv6hdr) - ETH_HLEN;
6583 
6584  vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6586  if (likely(tcp_off == 0))
6587  vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6588  else {
6589  tcp_off >>= 3;
6590  vlan_tag_flags |= ((tcp_off & 0x3) <<
6592  ((tcp_off & 0x10) <<
6594  mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6595  }
6596  } else {
6597  iph = ip_hdr(skb);
6598  if (tcp_opt_len || (iph->ihl > 5)) {
6599  vlan_tag_flags |= ((iph->ihl - 5) +
6600  (tcp_opt_len >> 2)) << 8;
6601  }
6602  }
6603  } else
6604  mss = 0;
6605 
6606  mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6607  if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6608  dev_kfree_skb(skb);
6609  return NETDEV_TX_OK;
6610  }
6611 
6612  tx_buf = &txr->tx_buf_ring[ring_prod];
6613  tx_buf->skb = skb;
6614  dma_unmap_addr_set(tx_buf, mapping, mapping);
6615 
6616  txbd = &txr->tx_desc_ring[ring_prod];
6617 
6618  txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6619  txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6620  txbd->tx_bd_mss_nbytes = len | (mss << 16);
6621  txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6622 
6623  last_frag = skb_shinfo(skb)->nr_frags;
6624  tx_buf->nr_frags = last_frag;
6625  tx_buf->is_gso = skb_is_gso(skb);
6626 
6627  for (i = 0; i < last_frag; i++) {
6628  const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6629 
6630  prod = NEXT_TX_BD(prod);
6631  ring_prod = TX_RING_IDX(prod);
6632  txbd = &txr->tx_desc_ring[ring_prod];
6633 
6634  len = skb_frag_size(frag);
6635  mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6636  DMA_TO_DEVICE);
6637  if (dma_mapping_error(&bp->pdev->dev, mapping))
6638  goto dma_error;
6639  dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6640  mapping);
6641 
6642  txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6643  txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6644  txbd->tx_bd_mss_nbytes = len | (mss << 16);
6645  txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6646 
6647  }
6649 
6650  /* Sync BD data before updating TX mailbox */
6651  wmb();
6652 
6653  netdev_tx_sent_queue(txq, skb->len);
6654 
6655  prod = NEXT_TX_BD(prod);
6656  txr->tx_prod_bseq += skb->len;
6657 
6658  REG_WR16(bp, txr->tx_bidx_addr, prod);
6659  REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6660 
6661  mmiowb();
6662 
6663  txr->tx_prod = prod;
6664 
6665  if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6666  netif_tx_stop_queue(txq);
6667 
6668  /* netif_tx_stop_queue() must be done before checking
6669  * tx index in bnx2_tx_avail() below, because in
6670  * bnx2_tx_int(), we update tx index before checking for
6671  * netif_tx_queue_stopped().
6672  */
6673  smp_mb();
6674  if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6675  netif_tx_wake_queue(txq);
6676  }
6677 
6678  return NETDEV_TX_OK;
6679 dma_error:
6680  /* save value of frag that failed */
6681  last_frag = i;
6682 
6683  /* start back at beginning and unmap skb */
6684  prod = txr->tx_prod;
6685  ring_prod = TX_RING_IDX(prod);
6686  tx_buf = &txr->tx_buf_ring[ring_prod];
6687  tx_buf->skb = NULL;
6688  dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6689  skb_headlen(skb), PCI_DMA_TODEVICE);
6690 
6691  /* unmap remaining mapped pages */
6692  for (i = 0; i < last_frag; i++) {
6693  prod = NEXT_TX_BD(prod);
6694  ring_prod = TX_RING_IDX(prod);
6695  tx_buf = &txr->tx_buf_ring[ring_prod];
6696  dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6697  skb_frag_size(&skb_shinfo(skb)->frags[i]),
6699  }
6700 
6701  dev_kfree_skb(skb);
6702  return NETDEV_TX_OK;
6703 }
6704 
6705 /* Called with rtnl_lock */
6706 static int
6707 bnx2_close(struct net_device *dev)
6708 {
6709  struct bnx2 *bp = netdev_priv(dev);
6710 
6711  bnx2_disable_int_sync(bp);
6712  bnx2_napi_disable(bp);
6713  netif_tx_disable(dev);
6714  del_timer_sync(&bp->timer);
6715  bnx2_shutdown_chip(bp);
6716  bnx2_free_irq(bp);
6717  bnx2_free_skbs(bp);
6718  bnx2_free_mem(bp);
6719  bnx2_del_napi(bp);
6720  bp->link_up = 0;
6721  netif_carrier_off(bp->dev);
6722  bnx2_set_power_state(bp, PCI_D3hot);
6723  return 0;
6724 }
6725 
6726 static void
6727 bnx2_save_stats(struct bnx2 *bp)
6728 {
6729  u32 *hw_stats = (u32 *) bp->stats_blk;
6730  u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6731  int i;
6732 
6733  /* The 1st 10 counters are 64-bit counters */
6734  for (i = 0; i < 20; i += 2) {
6735  u32 hi;
6736  u64 lo;
6737 
6738  hi = temp_stats[i] + hw_stats[i];
6739  lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6740  if (lo > 0xffffffff)
6741  hi++;
6742  temp_stats[i] = hi;
6743  temp_stats[i + 1] = lo & 0xffffffff;
6744  }
6745 
6746  for ( ; i < sizeof(struct statistics_block) / 4; i++)
6747  temp_stats[i] += hw_stats[i];
6748 }
6749 
6750 #define GET_64BIT_NET_STATS64(ctr) \
6751  (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6752 
6753 #define GET_64BIT_NET_STATS(ctr) \
6754  GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6755  GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6756 
6757 #define GET_32BIT_NET_STATS(ctr) \
6758  (unsigned long) (bp->stats_blk->ctr + \
6759  bp->temp_stats_blk->ctr)
6760 
6761 static struct rtnl_link_stats64 *
6762 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6763 {
6764  struct bnx2 *bp = netdev_priv(dev);
6765 
6766  if (bp->stats_blk == NULL)
6767  return net_stats;
6768 
6769  net_stats->rx_packets =
6770  GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6771  GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6772  GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6773 
6774  net_stats->tx_packets =
6775  GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6776  GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6777  GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6778 
6779  net_stats->rx_bytes =
6780  GET_64BIT_NET_STATS(stat_IfHCInOctets);
6781 
6782  net_stats->tx_bytes =
6783  GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6784 
6785  net_stats->multicast =
6786  GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6787 
6788  net_stats->collisions =
6789  GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6790 
6791  net_stats->rx_length_errors =
6792  GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6793  GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6794 
6795  net_stats->rx_over_errors =
6796  GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6797  GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6798 
6799  net_stats->rx_frame_errors =
6800  GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6801 
6802  net_stats->rx_crc_errors =
6803  GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6804 
6805  net_stats->rx_errors = net_stats->rx_length_errors +
6806  net_stats->rx_over_errors + net_stats->rx_frame_errors +
6807  net_stats->rx_crc_errors;
6808 
6809  net_stats->tx_aborted_errors =
6810  GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6811  GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6812 
6813  if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6814  (CHIP_ID(bp) == CHIP_ID_5708_A0))
6815  net_stats->tx_carrier_errors = 0;
6816  else {
6817  net_stats->tx_carrier_errors =
6818  GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6819  }
6820 
6821  net_stats->tx_errors =
6822  GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6823  net_stats->tx_aborted_errors +
6824  net_stats->tx_carrier_errors;
6825 
6826  net_stats->rx_missed_errors =
6827  GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6828  GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6829  GET_32BIT_NET_STATS(stat_FwRxDrop);
6830 
6831  return net_stats;
6832 }
6833 
6834 /* All ethtool functions called with rtnl_lock */
6835 
6836 static int
6837 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6838 {
6839  struct bnx2 *bp = netdev_priv(dev);
6840  int support_serdes = 0, support_copper = 0;
6841 
6844  support_serdes = 1;
6845  support_copper = 1;
6846  } else if (bp->phy_port == PORT_FIBRE)
6847  support_serdes = 1;
6848  else
6849  support_copper = 1;
6850 
6851  if (support_serdes) {
6856 
6857  }
6858  if (support_copper) {
6864  SUPPORTED_TP;
6865 
6866  }
6867 
6868  spin_lock_bh(&bp->phy_lock);
6869  cmd->port = bp->phy_port;
6870  cmd->advertising = bp->advertising;
6871 
6872  if (bp->autoneg & AUTONEG_SPEED) {
6873  cmd->autoneg = AUTONEG_ENABLE;
6874  } else {
6875  cmd->autoneg = AUTONEG_DISABLE;
6876  }
6877 
6878  if (netif_carrier_ok(dev)) {
6879  ethtool_cmd_speed_set(cmd, bp->line_speed);
6880  cmd->duplex = bp->duplex;
6881  }
6882  else {
6883  ethtool_cmd_speed_set(cmd, -1);
6884  cmd->duplex = -1;
6885  }
6886  spin_unlock_bh(&bp->phy_lock);
6887 
6888  cmd->transceiver = XCVR_INTERNAL;
6889  cmd->phy_address = bp->phy_addr;
6890 
6891  return 0;
6892 }
6893 
6894 static int
6895 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6896 {
6897  struct bnx2 *bp = netdev_priv(dev);
6898  u8 autoneg = bp->autoneg;
6899  u8 req_duplex = bp->req_duplex;
6901  u32 advertising = bp->advertising;
6902  int err = -EINVAL;
6903 
6904  spin_lock_bh(&bp->phy_lock);
6905 
6906  if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6907  goto err_out_unlock;
6908 
6909  if (cmd->port != bp->phy_port &&
6911  goto err_out_unlock;
6912 
6913  /* If device is down, we can store the settings only if the user
6914  * is setting the currently active port.
6915  */
6916  if (!netif_running(dev) && cmd->port != bp->phy_port)
6917  goto err_out_unlock;
6918 
6919  if (cmd->autoneg == AUTONEG_ENABLE) {
6920  autoneg |= AUTONEG_SPEED;
6921 
6922  advertising = cmd->advertising;
6923  if (cmd->port == PORT_TP) {
6924  advertising &= ETHTOOL_ALL_COPPER_SPEED;
6925  if (!advertising)
6926  advertising = ETHTOOL_ALL_COPPER_SPEED;
6927  } else {
6928  advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6929  if (!advertising)
6930  advertising = ETHTOOL_ALL_FIBRE_SPEED;
6931  }
6932  advertising |= ADVERTISED_Autoneg;
6933  }
6934  else {
6935  u32 speed = ethtool_cmd_speed(cmd);
6936  if (cmd->port == PORT_FIBRE) {
6937  if ((speed != SPEED_1000 &&
6938  speed != SPEED_2500) ||
6939  (cmd->duplex != DUPLEX_FULL))
6940  goto err_out_unlock;
6941 
6942  if (speed == SPEED_2500 &&
6944  goto err_out_unlock;
6945  } else if (speed == SPEED_1000 || speed == SPEED_2500)
6946  goto err_out_unlock;
6947 
6948  autoneg &= ~AUTONEG_SPEED;
6949  req_line_speed = speed;
6950  req_duplex = cmd->duplex;
6951  advertising = 0;
6952  }
6953 
6954  bp->autoneg = autoneg;
6955  bp->advertising = advertising;
6957  bp->req_duplex = req_duplex;
6958 
6959  err = 0;
6960  /* If device is down, the new settings will be picked up when it is
6961  * brought up.
6962  */
6963  if (netif_running(dev))
6964  err = bnx2_setup_phy(bp, cmd->port);
6965 
6966 err_out_unlock:
6967  spin_unlock_bh(&bp->phy_lock);
6968 
6969  return err;
6970 }
6971 
6972 static void
6973 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6974 {
6975  struct bnx2 *bp = netdev_priv(dev);
6976 
6977  strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6978  strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6979  strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6980  strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6981 }
6982 
6983 #define BNX2_REGDUMP_LEN (32 * 1024)
6984 
6985 static int
6986 bnx2_get_regs_len(struct net_device *dev)
6987 {
6988  return BNX2_REGDUMP_LEN;
6989 }
6990 
6991 static void
6992 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6993 {
6994  u32 *p = _p, i, offset;
6995  u8 *orig_p = _p;
6996  struct bnx2 *bp = netdev_priv(dev);
6997  static const u32 reg_boundaries[] = {
6998  0x0000, 0x0098, 0x0400, 0x045c,
6999  0x0800, 0x0880, 0x0c00, 0x0c10,
7000  0x0c30, 0x0d08, 0x1000, 0x101c,
7001  0x1040, 0x1048, 0x1080, 0x10a4,
7002  0x1400, 0x1490, 0x1498, 0x14f0,
7003  0x1500, 0x155c, 0x1580, 0x15dc,
7004  0x1600, 0x1658, 0x1680, 0x16d8,
7005  0x1800, 0x1820, 0x1840, 0x1854,
7006  0x1880, 0x1894, 0x1900, 0x1984,
7007  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7008  0x1c80, 0x1c94, 0x1d00, 0x1d84,
7009  0x2000, 0x2030, 0x23c0, 0x2400,
7010  0x2800, 0x2820, 0x2830, 0x2850,
7011  0x2b40, 0x2c10, 0x2fc0, 0x3058,
7012  0x3c00, 0x3c94, 0x4000, 0x4010,
7013  0x4080, 0x4090, 0x43c0, 0x4458,
7014  0x4c00, 0x4c18, 0x4c40, 0x4c54,
7015  0x4fc0, 0x5010, 0x53c0, 0x5444,
7016  0x5c00, 0x5c18, 0x5c80, 0x5c90,
7017  0x5fc0, 0x6000, 0x6400, 0x6428,
7018  0x6800, 0x6848, 0x684c, 0x6860,
7019  0x6888, 0x6910, 0x8000
7020  };
7021 
7022  regs->version = 0;
7023 
7024  memset(p, 0, BNX2_REGDUMP_LEN);
7025 
7026  if (!netif_running(bp->dev))
7027  return;
7028 
7029  i = 0;
7030  offset = reg_boundaries[0];
7031  p += offset;
7032  while (offset < BNX2_REGDUMP_LEN) {
7033  *p++ = REG_RD(bp, offset);
7034  offset += 4;
7035  if (offset == reg_boundaries[i + 1]) {
7036  offset = reg_boundaries[i + 2];
7037  p = (u32 *) (orig_p + offset);
7038  i += 2;
7039  }
7040  }
7041 }
7042 
7043 static void
7044 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7045 {
7046  struct bnx2 *bp = netdev_priv(dev);
7047 
7048  if (bp->flags & BNX2_FLAG_NO_WOL) {
7049  wol->supported = 0;
7050  wol->wolopts = 0;
7051  }
7052  else {
7053  wol->supported = WAKE_MAGIC;
7054  if (bp->wol)
7055  wol->wolopts = WAKE_MAGIC;
7056  else
7057  wol->wolopts = 0;
7058  }
7059  memset(&wol->sopass, 0, sizeof(wol->sopass));
7060 }
7061 
7062 static int
7063 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7064 {
7065  struct bnx2 *bp = netdev_priv(dev);
7066 
7067  if (wol->wolopts & ~WAKE_MAGIC)
7068  return -EINVAL;
7069 
7070  if (wol->wolopts & WAKE_MAGIC) {
7071  if (bp->flags & BNX2_FLAG_NO_WOL)
7072  return -EINVAL;
7073 
7074  bp->wol = 1;
7075  }
7076  else {
7077  bp->wol = 0;
7078  }
7079  return 0;
7080 }
7081 
7082 static int
7083 bnx2_nway_reset(struct net_device *dev)
7084 {
7085  struct bnx2 *bp = netdev_priv(dev);
7086  u32 bmcr;
7087 
7088  if (!netif_running(dev))
7089  return -EAGAIN;
7090 
7091  if (!(bp->autoneg & AUTONEG_SPEED)) {
7092  return -EINVAL;
7093  }
7094 
7095  spin_lock_bh(&bp->phy_lock);
7096 
7098  int rc;
7099 
7100  rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7101  spin_unlock_bh(&bp->phy_lock);
7102  return rc;
7103  }
7104 
7105  /* Force a link down visible on the other side */
7106  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7107  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7108  spin_unlock_bh(&bp->phy_lock);
7109 
7110  msleep(20);
7111 
7112  spin_lock_bh(&bp->phy_lock);
7113 
7115  bp->serdes_an_pending = 1;
7116  mod_timer(&bp->timer, jiffies + bp->current_interval);
7117  }
7118 
7119  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7120  bmcr &= ~BMCR_LOOPBACK;
7121  bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7122 
7123  spin_unlock_bh(&bp->phy_lock);
7124 
7125  return 0;
7126 }
7127 
7128 static u32
7129 bnx2_get_link(struct net_device *dev)
7130 {
7131  struct bnx2 *bp = netdev_priv(dev);
7132 
7133  return bp->link_up;
7134 }
7135 
7136 static int
7137 bnx2_get_eeprom_len(struct net_device *dev)
7138 {
7139  struct bnx2 *bp = netdev_priv(dev);
7140 
7141  if (bp->flash_info == NULL)
7142  return 0;
7143 
7144  return (int) bp->flash_size;
7145 }
7146 
7147 static int
7148 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7149  u8 *eebuf)
7150 {
7151  struct bnx2 *bp = netdev_priv(dev);
7152  int rc;
7153 
7154  if (!netif_running(dev))
7155  return -EAGAIN;
7156 
7157  /* parameters already validated in ethtool_get_eeprom */
7158 
7159  rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7160 
7161  return rc;
7162 }
7163 
7164 static int
7165 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7166  u8 *eebuf)
7167 {
7168  struct bnx2 *bp = netdev_priv(dev);
7169  int rc;
7170 
7171  if (!netif_running(dev))
7172  return -EAGAIN;
7173 
7174  /* parameters already validated in ethtool_set_eeprom */
7175 
7176  rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7177 
7178  return rc;
7179 }
7180 
7181 static int
7182 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7183 {
7184  struct bnx2 *bp = netdev_priv(dev);
7185 
7186  memset(coal, 0, sizeof(struct ethtool_coalesce));
7187 
7188  coal->rx_coalesce_usecs = bp->rx_ticks;
7190  coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7192 
7193  coal->tx_coalesce_usecs = bp->tx_ticks;
7195  coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7197 
7199 
7200  return 0;
7201 }
7202 
7203 static int
7204 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7205 {
7206  struct bnx2 *bp = netdev_priv(dev);
7207 
7208  bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7209  if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7210 
7212  if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7213 
7214  bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7215  if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7216 
7218  if (bp->rx_quick_cons_trip_int > 0xff)
7219  bp->rx_quick_cons_trip_int = 0xff;
7220 
7221  bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7222  if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7223 
7225  if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7226 
7227  bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7228  if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7229 
7232  0xff;
7233 
7235  if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7236  if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7237  bp->stats_ticks = USEC_PER_SEC;
7238  }
7242 
7243  if (netif_running(bp->dev)) {
7244  bnx2_netif_stop(bp, true);
7245  bnx2_init_nic(bp, 0);
7246  bnx2_netif_start(bp, true);
7247  }
7248 
7249  return 0;
7250 }
7251 
7252 static void
7253 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7254 {
7255  struct bnx2 *bp = netdev_priv(dev);
7256 
7259 
7260  ering->rx_pending = bp->rx_ring_size;
7261  ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7262 
7264  ering->tx_pending = bp->tx_ring_size;
7265 }
7266 
7267 static int
7268 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7269 {
7270  if (netif_running(bp->dev)) {
7271  /* Reset will erase chipset stats; save them */
7272  bnx2_save_stats(bp);
7273 
7274  bnx2_netif_stop(bp, true);
7275  bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7276  if (reset_irq) {
7277  bnx2_free_irq(bp);
7278  bnx2_del_napi(bp);
7279  } else {
7280  __bnx2_free_irq(bp);
7281  }
7282  bnx2_free_skbs(bp);
7283  bnx2_free_mem(bp);
7284  }
7285 
7286  bnx2_set_rx_ring_size(bp, rx);
7287  bp->tx_ring_size = tx;
7288 
7289  if (netif_running(bp->dev)) {
7290  int rc = 0;
7291 
7292  if (reset_irq) {
7293  rc = bnx2_setup_int_mode(bp, disable_msi);
7294  bnx2_init_napi(bp);
7295  }
7296 
7297  if (!rc)
7298  rc = bnx2_alloc_mem(bp);
7299 
7300  if (!rc)
7301  rc = bnx2_request_irq(bp);
7302 
7303  if (!rc)
7304  rc = bnx2_init_nic(bp, 0);
7305 
7306  if (rc) {
7307  bnx2_napi_enable(bp);
7308  dev_close(bp->dev);
7309  return rc;
7310  }
7311 #ifdef BCM_CNIC
7312  mutex_lock(&bp->cnic_lock);
7313  /* Let cnic know about the new status block. */
7314  if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7315  bnx2_setup_cnic_irq_info(bp);
7316  mutex_unlock(&bp->cnic_lock);
7317 #endif
7318  bnx2_netif_start(bp, true);
7319  }
7320  return 0;
7321 }
7322 
7323 static int
7324 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7325 {
7326  struct bnx2 *bp = netdev_priv(dev);
7327  int rc;
7328 
7329  if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7330  (ering->tx_pending > MAX_TX_DESC_CNT) ||
7331  (ering->tx_pending <= MAX_SKB_FRAGS)) {
7332 
7333  return -EINVAL;
7334  }
7335  rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7336  false);
7337  return rc;
7338 }
7339 
7340 static void
7341 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7342 {
7343  struct bnx2 *bp = netdev_priv(dev);
7344 
7345  epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7346  epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7347  epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7348 }
7349 
7350 static int
7351 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7352 {
7353  struct bnx2 *bp = netdev_priv(dev);
7354 
7355  bp->req_flow_ctrl = 0;
7356  if (epause->rx_pause)
7357  bp->req_flow_ctrl |= FLOW_CTRL_RX;
7358  if (epause->tx_pause)
7359  bp->req_flow_ctrl |= FLOW_CTRL_TX;
7360 
7361  if (epause->autoneg) {
7362  bp->autoneg |= AUTONEG_FLOW_CTRL;
7363  }
7364  else {
7365  bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7366  }
7367 
7368  if (netif_running(dev)) {
7369  spin_lock_bh(&bp->phy_lock);
7370  bnx2_setup_phy(bp, bp->phy_port);
7371  spin_unlock_bh(&bp->phy_lock);
7372  }
7373 
7374  return 0;
7375 }
7376 
7377 static struct {
7378  char string[ETH_GSTRING_LEN];
7379 } bnx2_stats_str_arr[] = {
7380  { "rx_bytes" },
7381  { "rx_error_bytes" },
7382  { "tx_bytes" },
7383  { "tx_error_bytes" },
7384  { "rx_ucast_packets" },
7385  { "rx_mcast_packets" },
7386  { "rx_bcast_packets" },
7387  { "tx_ucast_packets" },
7388  { "tx_mcast_packets" },
7389  { "tx_bcast_packets" },
7390  { "tx_mac_errors" },
7391  { "tx_carrier_errors" },
7392  { "rx_crc_errors" },
7393  { "rx_align_errors" },
7394  { "tx_single_collisions" },
7395  { "tx_multi_collisions" },
7396  { "tx_deferred" },
7397  { "tx_excess_collisions" },
7398  { "tx_late_collisions" },
7399  { "tx_total_collisions" },
7400  { "rx_fragments" },
7401  { "rx_jabbers" },
7402  { "rx_undersize_packets" },
7403  { "rx_oversize_packets" },
7404  { "rx_64_byte_packets" },
7405  { "rx_65_to_127_byte_packets" },
7406  { "rx_128_to_255_byte_packets" },
7407  { "rx_256_to_511_byte_packets" },
7408  { "rx_512_to_1023_byte_packets" },
7409  { "rx_1024_to_1522_byte_packets" },
7410  { "rx_1523_to_9022_byte_packets" },
7411  { "tx_64_byte_packets" },
7412  { "tx_65_to_127_byte_packets" },
7413  { "tx_128_to_255_byte_packets" },
7414  { "tx_256_to_511_byte_packets" },
7415  { "tx_512_to_1023_byte_packets" },
7416  { "tx_1024_to_1522_byte_packets" },
7417  { "tx_1523_to_9022_byte_packets" },
7418  { "rx_xon_frames" },
7419  { "rx_xoff_frames" },
7420  { "tx_xon_frames" },
7421  { "tx_xoff_frames" },
7422  { "rx_mac_ctrl_frames" },
7423  { "rx_filtered_packets" },
7424  { "rx_ftq_discards" },
7425  { "rx_discards" },
7426  { "rx_fw_discards" },
7427 };
7428 
7429 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7430 
7431 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7432 
7433 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7434  STATS_OFFSET32(stat_IfHCInOctets_hi),
7435  STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7436  STATS_OFFSET32(stat_IfHCOutOctets_hi),
7437  STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7438  STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7439  STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7440  STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7441  STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7442  STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7443  STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7444  STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7445  STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7446  STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7447  STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7448  STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7449  STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7450  STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7451  STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7452  STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7453  STATS_OFFSET32(stat_EtherStatsCollisions),
7454  STATS_OFFSET32(stat_EtherStatsFragments),
7455  STATS_OFFSET32(stat_EtherStatsJabbers),
7456  STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7457  STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7458  STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7459  STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7460  STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7461  STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7462  STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7463  STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7464  STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7465  STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7466  STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7467  STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7468  STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7469  STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7470  STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7471  STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7472  STATS_OFFSET32(stat_XonPauseFramesReceived),
7473  STATS_OFFSET32(stat_XoffPauseFramesReceived),
7474  STATS_OFFSET32(stat_OutXonSent),
7475  STATS_OFFSET32(stat_OutXoffSent),
7476  STATS_OFFSET32(stat_MacControlFramesReceived),
7477  STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7478  STATS_OFFSET32(stat_IfInFTQDiscards),
7479  STATS_OFFSET32(stat_IfInMBUFDiscards),
7480  STATS_OFFSET32(stat_FwRxDrop),
7481 };
7482 
7483 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7484  * skipped because of errata.
7485  */
7486 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7487  8,0,8,8,8,8,8,8,8,8,
7488  4,0,4,4,4,4,4,4,4,4,
7489  4,4,4,4,4,4,4,4,4,4,
7490  4,4,4,4,4,4,4,4,4,4,
7491  4,4,4,4,4,4,4,
7492 };
7493 
7494 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7495  8,0,8,8,8,8,8,8,8,8,
7496  4,4,4,4,4,4,4,4,4,4,
7497  4,4,4,4,4,4,4,4,4,4,
7498  4,4,4,4,4,4,4,4,4,4,
7499  4,4,4,4,4,4,4,
7500 };
7501 
7502 #define BNX2_NUM_TESTS 6
7503 
7504 static struct {
7505  char string[ETH_GSTRING_LEN];
7506 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7507  { "register_test (offline)" },
7508  { "memory_test (offline)" },
7509  { "loopback_test (offline)" },
7510  { "nvram_test (online)" },
7511  { "interrupt_test (online)" },
7512  { "link_test (online)" },
7513 };
7514 
7515 static int
7516 bnx2_get_sset_count(struct net_device *dev, int sset)
7517 {
7518  switch (sset) {
7519  case ETH_SS_TEST:
7520  return BNX2_NUM_TESTS;
7521  case ETH_SS_STATS:
7522  return BNX2_NUM_STATS;
7523  default:
7524  return -EOPNOTSUPP;
7525  }
7526 }
7527 
7528 static void
7529 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7530 {
7531  struct bnx2 *bp = netdev_priv(dev);
7532 
7533  bnx2_set_power_state(bp, PCI_D0);
7534 
7535  memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7536  if (etest->flags & ETH_TEST_FL_OFFLINE) {
7537  int i;
7538 
7539  bnx2_netif_stop(bp, true);
7540  bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7541  bnx2_free_skbs(bp);
7542 
7543  if (bnx2_test_registers(bp) != 0) {
7544  buf[0] = 1;
7545  etest->flags |= ETH_TEST_FL_FAILED;
7546  }
7547  if (bnx2_test_memory(bp) != 0) {
7548  buf[1] = 1;
7549  etest->flags |= ETH_TEST_FL_FAILED;
7550  }
7551  if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7552  etest->flags |= ETH_TEST_FL_FAILED;
7553 
7554  if (!netif_running(bp->dev))
7555  bnx2_shutdown_chip(bp);
7556  else {
7557  bnx2_init_nic(bp, 1);
7558  bnx2_netif_start(bp, true);
7559  }
7560 
7561  /* wait for link up */
7562  for (i = 0; i < 7; i++) {
7563  if (bp->link_up)
7564  break;
7565  msleep_interruptible(1000);
7566  }
7567  }
7568 
7569  if (bnx2_test_nvram(bp) != 0) {
7570  buf[3] = 1;
7571  etest->flags |= ETH_TEST_FL_FAILED;
7572  }
7573  if (bnx2_test_intr(bp) != 0) {
7574  buf[4] = 1;
7575  etest->flags |= ETH_TEST_FL_FAILED;
7576  }
7577 
7578  if (bnx2_test_link(bp) != 0) {
7579  buf[5] = 1;
7580  etest->flags |= ETH_TEST_FL_FAILED;
7581 
7582  }
7583  if (!netif_running(bp->dev))
7584  bnx2_set_power_state(bp, PCI_D3hot);
7585 }
7586 
7587 static void
7588 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7589 {
7590  switch (stringset) {
7591  case ETH_SS_STATS:
7592  memcpy(buf, bnx2_stats_str_arr,
7593  sizeof(bnx2_stats_str_arr));
7594  break;
7595  case ETH_SS_TEST:
7596  memcpy(buf, bnx2_tests_str_arr,
7597  sizeof(bnx2_tests_str_arr));
7598  break;
7599  }
7600 }
7601 
7602 static void
7603 bnx2_get_ethtool_stats(struct net_device *dev,
7604  struct ethtool_stats *stats, u64 *buf)
7605 {
7606  struct bnx2 *bp = netdev_priv(dev);
7607  int i;
7608  u32 *hw_stats = (u32 *) bp->stats_blk;
7609  u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7610  u8 *stats_len_arr = NULL;
7611 
7612  if (hw_stats == NULL) {
7613  memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7614  return;
7615  }
7616 
7617  if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7618  (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7619  (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7620  (CHIP_ID(bp) == CHIP_ID_5708_A0))
7621  stats_len_arr = bnx2_5706_stats_len_arr;
7622  else
7623  stats_len_arr = bnx2_5708_stats_len_arr;
7624 
7625  for (i = 0; i < BNX2_NUM_STATS; i++) {
7626  unsigned long offset;
7627 
7628  if (stats_len_arr[i] == 0) {
7629  /* skip this counter */
7630  buf[i] = 0;
7631  continue;
7632  }
7633 
7634  offset = bnx2_stats_offset_arr[i];
7635  if (stats_len_arr[i] == 4) {
7636  /* 4-byte counter */
7637  buf[i] = (u64) *(hw_stats + offset) +
7638  *(temp_stats + offset);
7639  continue;
7640  }
7641  /* 8-byte counter */
7642  buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7643  *(hw_stats + offset + 1) +
7644  (((u64) *(temp_stats + offset)) << 32) +
7645  *(temp_stats + offset + 1);
7646  }
7647 }
7648 
7649 static int
7650 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7651 {
7652  struct bnx2 *bp = netdev_priv(dev);
7653 
7654  switch (state) {
7655  case ETHTOOL_ID_ACTIVE:
7656  bnx2_set_power_state(bp, PCI_D0);
7657 
7658  bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7660  return 1; /* cycle on/off once per second */
7661 
7662  case ETHTOOL_ID_ON:
7669  break;
7670 
7671  case ETHTOOL_ID_OFF:
7673  break;
7674 
7675  case ETHTOOL_ID_INACTIVE:
7676  REG_WR(bp, BNX2_EMAC_LED, 0);
7677  REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7678 
7679  if (!netif_running(dev))
7680  bnx2_set_power_state(bp, PCI_D3hot);
7681  break;
7682  }
7683 
7684  return 0;
7685 }
7686 
7687 static netdev_features_t
7688 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7689 {
7690  struct bnx2 *bp = netdev_priv(dev);
7691 
7692  if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7693  features |= NETIF_F_HW_VLAN_RX;
7694 
7695  return features;
7696 }
7697 
7698 static int
7699 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7700 {
7701  struct bnx2 *bp = netdev_priv(dev);
7702 
7703  /* TSO with VLAN tag won't work with current firmware */
7704  if (features & NETIF_F_HW_VLAN_TX)
7705  dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7706  else
7707  dev->vlan_features &= ~NETIF_F_ALL_TSO;
7708 
7709  if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7711  netif_running(dev)) {
7712  bnx2_netif_stop(bp, false);
7713  dev->features = features;
7714  bnx2_set_rx_mode(dev);
7715  bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7716  bnx2_netif_start(bp, false);
7717  return 1;
7718  }
7719 
7720  return 0;
7721 }
7722 
7723 static void bnx2_get_channels(struct net_device *dev,
7724  struct ethtool_channels *channels)
7725 {
7726  struct bnx2 *bp = netdev_priv(dev);
7727  u32 max_rx_rings = 1;
7728  u32 max_tx_rings = 1;
7729 
7730  if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7731  max_rx_rings = RX_MAX_RINGS;
7732  max_tx_rings = TX_MAX_RINGS;
7733  }
7734 
7735  channels->max_rx = max_rx_rings;
7736  channels->max_tx = max_tx_rings;
7737  channels->max_other = 0;
7738  channels->max_combined = 0;
7739  channels->rx_count = bp->num_rx_rings;
7740  channels->tx_count = bp->num_tx_rings;
7741  channels->other_count = 0;
7742  channels->combined_count = 0;
7743 }
7744 
7745 static int bnx2_set_channels(struct net_device *dev,
7746  struct ethtool_channels *channels)
7747 {
7748  struct bnx2 *bp = netdev_priv(dev);
7749  u32 max_rx_rings = 1;
7750  u32 max_tx_rings = 1;
7751  int rc = 0;
7752 
7753  if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7754  max_rx_rings = RX_MAX_RINGS;
7755  max_tx_rings = TX_MAX_RINGS;
7756  }
7757  if (channels->rx_count > max_rx_rings ||
7758  channels->tx_count > max_tx_rings)
7759  return -EINVAL;
7760 
7761  bp->num_req_rx_rings = channels->rx_count;
7762  bp->num_req_tx_rings = channels->tx_count;
7763 
7764  if (netif_running(dev))
7765  rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7766  bp->tx_ring_size, true);
7767 
7768  return rc;
7769 }
7770 
7771 static const struct ethtool_ops bnx2_ethtool_ops = {
7772  .get_settings = bnx2_get_settings,
7773  .set_settings = bnx2_set_settings,
7774  .get_drvinfo = bnx2_get_drvinfo,
7775  .get_regs_len = bnx2_get_regs_len,
7776  .get_regs = bnx2_get_regs,
7777  .get_wol = bnx2_get_wol,
7778  .set_wol = bnx2_set_wol,
7779  .nway_reset = bnx2_nway_reset,
7780  .get_link = bnx2_get_link,
7781  .get_eeprom_len = bnx2_get_eeprom_len,
7782  .get_eeprom = bnx2_get_eeprom,
7783  .set_eeprom = bnx2_set_eeprom,
7784  .get_coalesce = bnx2_get_coalesce,
7785  .set_coalesce = bnx2_set_coalesce,
7786  .get_ringparam = bnx2_get_ringparam,
7787  .set_ringparam = bnx2_set_ringparam,
7788  .get_pauseparam = bnx2_get_pauseparam,
7789  .set_pauseparam = bnx2_set_pauseparam,
7790  .self_test = bnx2_self_test,
7791  .get_strings = bnx2_get_strings,
7792  .set_phys_id = bnx2_set_phys_id,
7793  .get_ethtool_stats = bnx2_get_ethtool_stats,
7794  .get_sset_count = bnx2_get_sset_count,
7795  .get_channels = bnx2_get_channels,
7796  .set_channels = bnx2_set_channels,
7797 };
7798 
7799 /* Called with rtnl_lock */
7800 static int
7801 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7802 {
7803  struct mii_ioctl_data *data = if_mii(ifr);
7804  struct bnx2 *bp = netdev_priv(dev);
7805  int err;
7806 
7807  switch(cmd) {
7808  case SIOCGMIIPHY:
7809  data->phy_id = bp->phy_addr;
7810 
7811  /* fallthru */
7812  case SIOCGMIIREG: {
7813  u32 mii_regval;
7814 
7816  return -EOPNOTSUPP;
7817 
7818  if (!netif_running(dev))
7819  return -EAGAIN;
7820 
7821  spin_lock_bh(&bp->phy_lock);
7822  err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7823  spin_unlock_bh(&bp->phy_lock);
7824 
7825  data->val_out = mii_regval;
7826 
7827  return err;
7828  }
7829 
7830  case SIOCSMIIREG:
7832  return -EOPNOTSUPP;
7833 
7834  if (!netif_running(dev))
7835  return -EAGAIN;
7836 
7837  spin_lock_bh(&bp->phy_lock);
7838  err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7839  spin_unlock_bh(&bp->phy_lock);
7840 
7841  return err;
7842 
7843  default:
7844  /* do nothing */
7845  break;
7846  }
7847  return -EOPNOTSUPP;
7848 }
7849 
7850 /* Called with rtnl_lock */
7851 static int
7852 bnx2_change_mac_addr(struct net_device *dev, void *p)
7853 {
7854  struct sockaddr *addr = p;
7855  struct bnx2 *bp = netdev_priv(dev);
7856 
7857  if (!is_valid_ether_addr(addr->sa_data))
7858  return -EADDRNOTAVAIL;
7859 
7860  memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7861  if (netif_running(dev))
7862  bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7863 
7864  return 0;
7865 }
7866 
7867 /* Called with rtnl_lock */
7868 static int
7869 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7870 {
7871  struct bnx2 *bp = netdev_priv(dev);
7872 
7873  if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7874  ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7875  return -EINVAL;
7876 
7877  dev->mtu = new_mtu;
7878  return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7879  false);
7880 }
7881 
7882 #ifdef CONFIG_NET_POLL_CONTROLLER
7883 static void
7884 poll_bnx2(struct net_device *dev)
7885 {
7886  struct bnx2 *bp = netdev_priv(dev);
7887  int i;
7888 
7889  for (i = 0; i < bp->irq_nvecs; i++) {
7890  struct bnx2_irq *irq = &bp->irq_tbl[i];
7891 
7892  disable_irq(irq->vector);
7893  irq->handler(irq->vector, &bp->bnx2_napi[i]);
7894  enable_irq(irq->vector);
7895  }
7896 }
7897 #endif
7898 
7899 static void __devinit
7900 bnx2_get_5709_media(struct bnx2 *bp)
7901 {
7903  u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7904  u32 strap;
7905 
7906  if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7907  return;
7908  else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7910  return;
7911  }
7912 
7914  strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7915  else
7916  strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7917 
7918  if (bp->func == 0) {
7919  switch (strap) {
7920  case 0x4:
7921  case 0x5:
7922  case 0x6:
7924  return;
7925  }
7926  } else {
7927  switch (strap) {
7928  case 0x1:
7929  case 0x2:
7930  case 0x4:
7932  return;
7933  }
7934  }
7935 }
7936 
7937 static void __devinit
7938 bnx2_get_pci_speed(struct bnx2 *bp)
7939 {
7940  u32 reg;
7941 
7942  reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7944  u32 clkreg;
7945 
7946  bp->flags |= BNX2_FLAG_PCIX;
7947 
7949 
7951  switch (clkreg) {
7953  bp->bus_speed_mhz = 133;
7954  break;
7955 
7957  bp->bus_speed_mhz = 100;
7958  break;
7959 
7962  bp->bus_speed_mhz = 66;
7963  break;
7964 
7967  bp->bus_speed_mhz = 50;
7968  break;
7969 
7973  bp->bus_speed_mhz = 33;
7974  break;
7975  }
7976  }
7977  else {
7979  bp->bus_speed_mhz = 66;
7980  else
7981  bp->bus_speed_mhz = 33;
7982  }
7983 
7985  bp->flags |= BNX2_FLAG_PCI_32BIT;
7986 
7987 }
7988 
7989 static void __devinit
7990 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7991 {
7992  int rc, i, j;
7993  u8 *data;
7994  unsigned int block_end, rosize, len;
7995 
7996 #define BNX2_VPD_NVRAM_OFFSET 0x300
7997 #define BNX2_VPD_LEN 128
7998 #define BNX2_MAX_VER_SLEN 30
7999 
8000  data = kmalloc(256, GFP_KERNEL);
8001  if (!data)
8002  return;
8003 
8004  rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8005  BNX2_VPD_LEN);
8006  if (rc)
8007  goto vpd_done;
8008 
8009  for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8010  data[i] = data[i + BNX2_VPD_LEN + 3];
8011  data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8012  data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8013  data[i + 3] = data[i + BNX2_VPD_LEN];
8014  }
8015 
8016  i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8017  if (i < 0)
8018  goto vpd_done;
8019 
8020  rosize = pci_vpd_lrdt_size(&data[i]);
8021  i += PCI_VPD_LRDT_TAG_SIZE;
8022  block_end = i + rosize;
8023 
8024  if (block_end > BNX2_VPD_LEN)
8025  goto vpd_done;
8026 
8027  j = pci_vpd_find_info_keyword(data, i, rosize,
8029  if (j < 0)
8030  goto vpd_done;
8031 
8032  len = pci_vpd_info_field_size(&data[j]);
8033 
8035  if (j + len > block_end || len != 4 ||
8036  memcmp(&data[j], "1028", 4))
8037  goto vpd_done;
8038 
8039  j = pci_vpd_find_info_keyword(data, i, rosize,
8041  if (j < 0)
8042  goto vpd_done;
8043 
8044  len = pci_vpd_info_field_size(&data[j]);
8045 
8047  if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8048  goto vpd_done;
8049 
8050  memcpy(bp->fw_version, &data[j], len);
8051  bp->fw_version[len] = ' ';
8052 
8053 vpd_done:
8054  kfree(data);
8055 }
8056 
8057 static int __devinit
8058 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8059 {
8060  struct bnx2 *bp;
8061  int rc, i, j;
8062  u32 reg;
8063  u64 dma_mask, persist_dma_mask;
8064  int err;
8065 
8066  SET_NETDEV_DEV(dev, &pdev->dev);
8067  bp = netdev_priv(dev);
8068 
8069  bp->flags = 0;
8070  bp->phy_flags = 0;
8071 
8072  bp->temp_stats_blk =
8073  kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8074 
8075  if (bp->temp_stats_blk == NULL) {
8076  rc = -ENOMEM;
8077  goto err_out;
8078  }
8079 
8080  /* enable device (incl. PCI PM wakeup), and bus-mastering */
8081  rc = pci_enable_device(pdev);
8082  if (rc) {
8083  dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8084  goto err_out;
8085  }
8086 
8087  if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8088  dev_err(&pdev->dev,
8089  "Cannot find PCI device base address, aborting\n");
8090  rc = -ENODEV;
8091  goto err_out_disable;
8092  }
8093 
8095  if (rc) {
8096  dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8097  goto err_out_disable;
8098  }
8099 
8100  pci_set_master(pdev);
8101 
8103  if (bp->pm_cap == 0) {
8104  dev_err(&pdev->dev,
8105  "Cannot find power management capability, aborting\n");
8106  rc = -EIO;
8107  goto err_out_release;
8108  }
8109 
8110  bp->dev = dev;
8111  bp->pdev = pdev;
8112 
8113  spin_lock_init(&bp->phy_lock);
8115 #ifdef BCM_CNIC
8116  mutex_init(&bp->cnic_lock);
8117 #endif
8118  INIT_WORK(&bp->reset_task, bnx2_reset_task);
8119 
8120  bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8121  TX_MAX_TSS_RINGS + 1));
8122  if (!bp->regview) {
8123  dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8124  rc = -ENOMEM;
8125  goto err_out_release;
8126  }
8127 
8128  bnx2_set_power_state(bp, PCI_D0);
8129 
8130  /* Configure byte swap and enable write to the reg_window registers.
8131  * Rely on CPU to do target byte swapping on big endian systems
8132  * The chip's target access swapping will not swap all accesses
8133  */
8137 
8138  bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
8139 
8140  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8141  if (!pci_is_pcie(pdev)) {
8142  dev_err(&pdev->dev, "Not PCIE, aborting\n");
8143  rc = -EIO;
8144  goto err_out_unmap;
8145  }
8146  bp->flags |= BNX2_FLAG_PCIE;
8147  if (CHIP_REV(bp) == CHIP_REV_Ax)
8149 
8150  /* AER (Advanced Error Reporting) hooks */
8151  err = pci_enable_pcie_error_reporting(pdev);
8152  if (!err)
8154 
8155  } else {
8157  if (bp->pcix_cap == 0) {
8158  dev_err(&pdev->dev,
8159  "Cannot find PCIX capability, aborting\n");
8160  rc = -EIO;
8161  goto err_out_unmap;
8162  }
8164  }
8165 
8166  if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8168  bp->flags |= BNX2_FLAG_MSIX_CAP;
8169  }
8170 
8171  if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8173  bp->flags |= BNX2_FLAG_MSI_CAP;
8174  }
8175 
8176  /* 5708 cannot support DMA addresses > 40-bit. */
8177  if (CHIP_NUM(bp) == CHIP_NUM_5708)
8178  persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8179  else
8180  persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8181 
8182  /* Configure DMA attributes. */
8183  if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8184  dev->features |= NETIF_F_HIGHDMA;
8185  rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8186  if (rc) {
8187  dev_err(&pdev->dev,
8188  "pci_set_consistent_dma_mask failed, aborting\n");
8189  goto err_out_unmap;
8190  }
8191  } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8192  dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8193  goto err_out_unmap;
8194  }
8195 
8196  if (!(bp->flags & BNX2_FLAG_PCIE))
8197  bnx2_get_pci_speed(bp);
8198 
8199  /* 5706A0 may falsely detect SERR and PERR. */
8200  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8201  reg = REG_RD(bp, PCI_COMMAND);
8203  REG_WR(bp, PCI_COMMAND, reg);
8204  }
8205  else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8206  !(bp->flags & BNX2_FLAG_PCIX)) {
8207 
8208  dev_err(&pdev->dev,
8209  "5706 A1 can only be used in a PCIX bus, aborting\n");
8210  goto err_out_unmap;
8211  }
8212 
8213  bnx2_init_nvram(bp);
8214 
8215  reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8216 
8217  if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8218  bp->func = 1;
8219 
8220  if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8222  u32 off = bp->func << 2;
8223 
8224  bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8225  } else
8227 
8228  /* Get the permanent MAC address. First we need to make sure the
8229  * firmware is actually running.
8230  */
8231  reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8232 
8233  if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8235  dev_err(&pdev->dev, "Firmware not running, aborting\n");
8236  rc = -ENODEV;
8237  goto err_out_unmap;
8238  }
8239 
8240  bnx2_read_vpd_fw_ver(bp);
8241 
8242  j = strlen(bp->fw_version);
8243  reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8244  for (i = 0; i < 3 && j < 24; i++) {
8245  u8 num, k, skip0;
8246 
8247  if (i == 0) {
8248  bp->fw_version[j++] = 'b';
8249  bp->fw_version[j++] = 'c';
8250  bp->fw_version[j++] = ' ';
8251  }
8252  num = (u8) (reg >> (24 - (i * 8)));
8253  for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8254  if (num >= k || !skip0 || k == 1) {
8255  bp->fw_version[j++] = (num / k) + '0';
8256  skip0 = 0;
8257  }
8258  }
8259  if (i != 2)
8260  bp->fw_version[j++] = '.';
8261  }
8262  reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8264  bp->wol = 1;
8265 
8266  if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8267  bp->flags |= BNX2_FLAG_ASF_ENABLE;
8268 
8269  for (i = 0; i < 30; i++) {
8270  reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8271  if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8272  break;
8273  msleep(10);
8274  }
8275  }
8276  reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8278  if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8279  reg != BNX2_CONDITION_MFW_RUN_NONE) {
8280  u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8281 
8282  if (j < 32)
8283  bp->fw_version[j++] = ' ';
8284  for (i = 0; i < 3 && j < 28; i++) {
8285  reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8286  reg = be32_to_cpu(reg);
8287  memcpy(&bp->fw_version[j], &reg, 4);
8288  j += 4;
8289  }
8290  }
8291 
8292  reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8293  bp->mac_addr[0] = (u8) (reg >> 8);
8294  bp->mac_addr[1] = (u8) reg;
8295 
8296  reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8297  bp->mac_addr[2] = (u8) (reg >> 24);
8298  bp->mac_addr[3] = (u8) (reg >> 16);
8299  bp->mac_addr[4] = (u8) (reg >> 8);
8300  bp->mac_addr[5] = (u8) reg;
8301 
8303  bnx2_set_rx_ring_size(bp, 255);
8304 
8305  bp->tx_quick_cons_trip_int = 2;
8306  bp->tx_quick_cons_trip = 20;
8307  bp->tx_ticks_int = 18;
8308  bp->tx_ticks = 80;
8309 
8310  bp->rx_quick_cons_trip_int = 2;
8311  bp->rx_quick_cons_trip = 12;
8312  bp->rx_ticks_int = 18;
8313  bp->rx_ticks = 18;
8314 
8316 
8318 
8319  bp->phy_addr = 1;
8320 
8321  /* Disable WOL support if we are running on a SERDES chip. */
8322  if (CHIP_NUM(bp) == CHIP_NUM_5709)
8323  bnx2_get_5709_media(bp);
8324  else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8326 
8327  bp->phy_port = PORT_TP;
8328  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8329  bp->phy_port = PORT_FIBRE;
8330  reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8331  if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8332  bp->flags |= BNX2_FLAG_NO_WOL;
8333  bp->wol = 0;
8334  }
8335  if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8336  /* Don't do parallel detect on this board because of
8337  * some board problems. The link will not go down
8338  * if we do parallel detect.
8339  */
8340  if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8341  pdev->subsystem_device == 0x310c)
8343  } else {
8344  bp->phy_addr = 2;
8345  if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8347  }
8348  } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8349  CHIP_NUM(bp) == CHIP_NUM_5708)
8351  else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8352  (CHIP_REV(bp) == CHIP_REV_Ax ||
8353  CHIP_REV(bp) == CHIP_REV_Bx))
8355 
8356  bnx2_init_fw_cap(bp);
8357 
8358  if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8359  (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8360  (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8362  bp->flags |= BNX2_FLAG_NO_WOL;
8363  bp->wol = 0;
8364  }
8365 
8366  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8368  bp->tx_quick_cons_trip;
8369  bp->tx_ticks_int = bp->tx_ticks;
8371  bp->rx_quick_cons_trip;
8372  bp->rx_ticks_int = bp->rx_ticks;
8374  bp->com_ticks_int = bp->com_ticks;
8375  bp->cmd_ticks_int = bp->cmd_ticks;
8376  }
8377 
8378  /* Disable MSI on 5706 if AMD 8132 bridge is found.
8379  *
8380  * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8381  * with byte enables disabled on the unused 32-bit word. This is legal
8382  * but causes problems on the AMD 8132 which will eventually stop
8383  * responding after a while.
8384  *
8385  * AMD believes this incompatibility is unique to the 5706, and
8386  * prefers to locally disable MSI rather than globally disabling it.
8387  */
8388  if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8389  struct pci_dev *amd_8132 = NULL;
8390 
8391  while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8393  amd_8132))) {
8394 
8395  if (amd_8132->revision >= 0x10 &&
8396  amd_8132->revision <= 0x13) {
8397  disable_msi = 1;
8398  pci_dev_put(amd_8132);
8399  break;
8400  }
8401  }
8402  }
8403 
8404  bnx2_set_default_link(bp);
8406 
8407  init_timer(&bp->timer);
8408  bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8409  bp->timer.data = (unsigned long) bp;
8410  bp->timer.function = bnx2_timer;
8411 
8412 #ifdef BCM_CNIC
8413  if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8414  bp->cnic_eth_dev.max_iscsi_conn =
8415  (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8417 #endif
8418  pci_save_state(pdev);
8419 
8420  return 0;
8421 
8422 err_out_unmap:
8423  if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8425  bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8426  }
8427 
8428  pci_iounmap(pdev, bp->regview);
8429  bp->regview = NULL;
8430 
8431 err_out_release:
8432  pci_release_regions(pdev);
8433 
8434 err_out_disable:
8435  pci_disable_device(pdev);
8436  pci_set_drvdata(pdev, NULL);
8437 
8438 err_out:
8439  return rc;
8440 }
8441 
8442 static char * __devinit
8443 bnx2_bus_string(struct bnx2 *bp, char *str)
8444 {
8445  char *s = str;
8446 
8447  if (bp->flags & BNX2_FLAG_PCIE) {
8448  s += sprintf(s, "PCI Express");
8449  } else {
8450  s += sprintf(s, "PCI");
8451  if (bp->flags & BNX2_FLAG_PCIX)
8452  s += sprintf(s, "-X");
8453  if (bp->flags & BNX2_FLAG_PCI_32BIT)
8454  s += sprintf(s, " 32-bit");
8455  else
8456  s += sprintf(s, " 64-bit");
8457  s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8458  }
8459  return str;
8460 }
8461 
8462 static void
8463 bnx2_del_napi(struct bnx2 *bp)
8464 {
8465  int i;
8466 
8467  for (i = 0; i < bp->irq_nvecs; i++)
8468  netif_napi_del(&bp->bnx2_napi[i].napi);
8469 }
8470 
8471 static void
8472 bnx2_init_napi(struct bnx2 *bp)
8473 {
8474  int i;
8475 
8476  for (i = 0; i < bp->irq_nvecs; i++) {
8477  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8478  int (*poll)(struct napi_struct *, int);
8479 
8480  if (i == 0)
8481  poll = bnx2_poll;
8482  else
8483  poll = bnx2_poll_msix;
8484 
8485  netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8486  bnapi->bp = bp;
8487  }
8488 }
8489 
8490 static const struct net_device_ops bnx2_netdev_ops = {
8491  .ndo_open = bnx2_open,
8492  .ndo_start_xmit = bnx2_start_xmit,
8493  .ndo_stop = bnx2_close,
8494  .ndo_get_stats64 = bnx2_get_stats64,
8495  .ndo_set_rx_mode = bnx2_set_rx_mode,
8496  .ndo_do_ioctl = bnx2_ioctl,
8497  .ndo_validate_addr = eth_validate_addr,
8498  .ndo_set_mac_address = bnx2_change_mac_addr,
8499  .ndo_change_mtu = bnx2_change_mtu,
8500  .ndo_fix_features = bnx2_fix_features,
8501  .ndo_set_features = bnx2_set_features,
8502  .ndo_tx_timeout = bnx2_tx_timeout,
8503 #ifdef CONFIG_NET_POLL_CONTROLLER
8504  .ndo_poll_controller = poll_bnx2,
8505 #endif
8506 };
8507 
8508 static int __devinit
8509 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8510 {
8511  static int version_printed = 0;
8512  struct net_device *dev;
8513  struct bnx2 *bp;
8514  int rc;
8515  char str[40];
8516 
8517  if (version_printed++ == 0)
8518  pr_info("%s", version);
8519 
8520  /* dev zeroed in init_etherdev */
8521  dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8522  if (!dev)
8523  return -ENOMEM;
8524 
8525  rc = bnx2_init_board(pdev, dev);
8526  if (rc < 0)
8527  goto err_free;
8528 
8529  dev->netdev_ops = &bnx2_netdev_ops;
8530  dev->watchdog_timeo = TX_TIMEOUT;
8531  dev->ethtool_ops = &bnx2_ethtool_ops;
8532 
8533  bp = netdev_priv(dev);
8534 
8535  pci_set_drvdata(pdev, dev);
8536 
8537  memcpy(dev->dev_addr, bp->mac_addr, 6);
8538  memcpy(dev->perm_addr, bp->mac_addr, 6);
8539 
8543 
8544  if (CHIP_NUM(bp) == CHIP_NUM_5709)
8546 
8547  dev->vlan_features = dev->hw_features;
8548  dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8549  dev->features |= dev->hw_features;
8550  dev->priv_flags |= IFF_UNICAST_FLT;
8551 
8552  if ((rc = register_netdev(dev))) {
8553  dev_err(&pdev->dev, "Cannot register net device\n");
8554  goto error;
8555  }
8556 
8557  netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8558  "node addr %pM\n", board_info[ent->driver_data].name,
8559  ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8560  ((CHIP_ID(bp) & 0x0ff0) >> 4),
8561  bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8562  pdev->irq, dev->dev_addr);
8563 
8564  return 0;
8565 
8566 error:
8567  pci_iounmap(pdev, bp->regview);
8568  pci_release_regions(pdev);
8569  pci_disable_device(pdev);
8570  pci_set_drvdata(pdev, NULL);
8571 err_free:
8572  free_netdev(dev);
8573  return rc;
8574 }
8575 
8576 static void __devexit
8577 bnx2_remove_one(struct pci_dev *pdev)
8578 {
8579  struct net_device *dev = pci_get_drvdata(pdev);
8580  struct bnx2 *bp = netdev_priv(dev);
8581 
8582  unregister_netdev(dev);
8583 
8584  del_timer_sync(&bp->timer);
8586 
8587  pci_iounmap(bp->pdev, bp->regview);
8588 
8589  kfree(bp->temp_stats_blk);
8590 
8591  if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8593  bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8594  }
8595 
8596  bnx2_release_firmware(bp);
8597 
8598  free_netdev(dev);
8599 
8600  pci_release_regions(pdev);
8601  pci_disable_device(pdev);
8602  pci_set_drvdata(pdev, NULL);
8603 }
8604 
8605 static int
8606 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8607 {
8608  struct net_device *dev = pci_get_drvdata(pdev);
8609  struct bnx2 *bp = netdev_priv(dev);
8610 
8611  /* PCI register 4 needs to be saved whether netif_running() or not.
8612  * MSI address and data need to be saved if using MSI and
8613  * netif_running().
8614  */
8615  pci_save_state(pdev);
8616  if (!netif_running(dev))
8617  return 0;
8618 
8620  bnx2_netif_stop(bp, true);
8621  netif_device_detach(dev);
8622  del_timer_sync(&bp->timer);
8623  bnx2_shutdown_chip(bp);
8624  bnx2_free_skbs(bp);
8625  bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8626  return 0;
8627 }
8628 
8629 static int
8630 bnx2_resume(struct pci_dev *pdev)
8631 {
8632  struct net_device *dev = pci_get_drvdata(pdev);
8633  struct bnx2 *bp = netdev_priv(dev);
8634 
8635  pci_restore_state(pdev);
8636  if (!netif_running(dev))
8637  return 0;
8638 
8639  bnx2_set_power_state(bp, PCI_D0);
8640  netif_device_attach(dev);
8641  bnx2_init_nic(bp, 1);
8642  bnx2_netif_start(bp, true);
8643  return 0;
8644 }
8645 
8654 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8655  pci_channel_state_t state)
8656 {
8657  struct net_device *dev = pci_get_drvdata(pdev);
8658  struct bnx2 *bp = netdev_priv(dev);
8659 
8660  rtnl_lock();
8661  netif_device_detach(dev);
8662 
8663  if (state == pci_channel_io_perm_failure) {
8664  rtnl_unlock();
8666  }
8667 
8668  if (netif_running(dev)) {
8669  bnx2_netif_stop(bp, true);
8670  del_timer_sync(&bp->timer);
8671  bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8672  }
8673 
8674  pci_disable_device(pdev);
8675  rtnl_unlock();
8676 
8677  /* Request a slot slot reset. */
8679 }
8680 
8687 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8688 {
8689  struct net_device *dev = pci_get_drvdata(pdev);
8690  struct bnx2 *bp = netdev_priv(dev);
8692  int err;
8693 
8694  rtnl_lock();
8695  if (pci_enable_device(pdev)) {
8696  dev_err(&pdev->dev,
8697  "Cannot re-enable PCI device after reset\n");
8698  result = PCI_ERS_RESULT_DISCONNECT;
8699  } else {
8700  pci_set_master(pdev);
8701  pci_restore_state(pdev);
8702  pci_save_state(pdev);
8703 
8704  if (netif_running(dev)) {
8705  bnx2_set_power_state(bp, PCI_D0);
8706  bnx2_init_nic(bp, 1);
8707  }
8708  result = PCI_ERS_RESULT_RECOVERED;
8709  }
8710  rtnl_unlock();
8711 
8712  if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8713  return result;
8714 
8716  if (err) {
8717  dev_err(&pdev->dev,
8718  "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8719  err); /* non-fatal, continue */
8720  }
8721 
8722  return result;
8723 }
8724 
8732 static void bnx2_io_resume(struct pci_dev *pdev)
8733 {
8734  struct net_device *dev = pci_get_drvdata(pdev);
8735  struct bnx2 *bp = netdev_priv(dev);
8736 
8737  rtnl_lock();
8738  if (netif_running(dev))
8739  bnx2_netif_start(bp, true);
8740 
8741  netif_device_attach(dev);
8742  rtnl_unlock();
8743 }
8744 
8745 static const struct pci_error_handlers bnx2_err_handler = {
8746  .error_detected = bnx2_io_error_detected,
8747  .slot_reset = bnx2_io_slot_reset,
8748  .resume = bnx2_io_resume,
8749 };
8750 
8751 static struct pci_driver bnx2_pci_driver = {
8752  .name = DRV_MODULE_NAME,
8753  .id_table = bnx2_pci_tbl,
8754  .probe = bnx2_init_one,
8755  .remove = __devexit_p(bnx2_remove_one),
8756  .suspend = bnx2_suspend,
8757  .resume = bnx2_resume,
8758  .err_handler = &bnx2_err_handler,
8759 };
8760 
8761 static int __init bnx2_init(void)
8762 {
8763  return pci_register_driver(&bnx2_pci_driver);
8764 }
8765 
8766 static void __exit bnx2_cleanup(void)
8767 {
8768  pci_unregister_driver(&bnx2_pci_driver);
8769 }
8770 
8771 module_init(bnx2_init);
8772 module_exit(bnx2_cleanup);
8773 
8774 
8775