Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnx2.c
Go to the documentation of this file.
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan ([email protected])
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59 
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.2.3"
62 #define DRV_MODULE_RELDATE "June 27, 2012"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
68 
69 #define RUN_AT(x) (jiffies + (x))
70 
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73 
74 static char version[] __devinitdata =
75  "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 
77 MODULE_AUTHOR("Michael Chan <[email protected]>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
86 
87 static int disable_msi = 0;
88 
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91 
92 typedef enum {
93  BCM5706 = 0,
104 } board_t;
105 
106 /* indexed by board_t, above */
107 static struct {
108  char *name;
110  { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111  { "HP NC370T Multifunction Gigabit Server Adapter" },
112  { "HP NC370i Multifunction Gigabit Server Adapter" },
113  { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114  { "HP NC370F Multifunction Gigabit Server Adapter" },
115  { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116  { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117  { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118  { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119  { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120  { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121  };
122 
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142  { PCI_VENDOR_ID_BROADCOM, 0x163b,
143  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144  { PCI_VENDOR_ID_BROADCOM, 0x163c,
145  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146  { 0, }
147 };
148 
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153  /* Slow EEPROM */
154  {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
157  "EEPROM - slow"},
158  /* Expansion entry 0001 */
159  {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
162  "Entry 0001"},
163  /* Saifun SA25F010 (non-buffered flash) */
164  /* strap, cfg1, & write1 need updates */
165  {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
168  "Non-buffered flash (128kB)"},
169  /* Saifun SA25F020 (non-buffered flash) */
170  /* strap, cfg1, & write1 need updates */
171  {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
174  "Non-buffered flash (256kB)"},
175  /* Expansion entry 0100 */
176  {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
179  "Entry 0100"},
180  /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181  {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
184  "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185  /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186  {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
189  "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190  /* Saifun SA25F005 (non-buffered flash) */
191  /* strap, cfg1, & write1 need updates */
192  {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
195  "Non-buffered flash (64kB)"},
196  /* Fast EEPROM */
197  {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
200  "EEPROM - fast"},
201  /* Expansion entry 1001 */
202  {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
205  "Entry 1001"},
206  /* Expansion entry 1010 */
207  {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
210  "Entry 1010"},
211  /* ATMEL AT45DB011B (buffered flash) */
212  {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
215  "Buffered flash (128kB)"},
216  /* Expansion entry 1100 */
217  {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
220  "Entry 1100"},
221  /* Expansion entry 1101 */
222  {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
225  "Entry 1101"},
226  /* Ateml Expansion entry 1110 */
227  {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
230  "Entry 1110 (Atmel)"},
231  /* ATMEL AT45DB021B (buffered flash) */
232  {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
235  "Buffered flash (256kB)"},
236 };
237 
238 static const struct flash_spec flash_5709 = {
239  .flags = BNX2_NV_BUFFERED,
240  .page_bits = BCM5709_FLASH_PAGE_BITS,
241  .page_size = BCM5709_FLASH_PAGE_SIZE,
242  .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243  .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244  .name = "5709 Buffered flash (256kB)",
245 };
246 
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251 
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254  u32 diff;
255 
256  /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257  barrier();
258 
259  /* The ring uses 256 indices for 255 entries, one of them
260  * needs to be skipped.
261  */
262  diff = txr->tx_prod - txr->tx_cons;
263  if (unlikely(diff >= TX_DESC_CNT)) {
264  diff &= 0xffff;
265  if (diff == TX_DESC_CNT)
266  diff = MAX_TX_DESC_CNT;
267  }
268  return bp->tx_ring_size - diff;
269 }
270 
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274  u32 val;
275 
276  spin_lock_bh(&bp->indirect_lock);
278  val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279  spin_unlock_bh(&bp->indirect_lock);
280  return val;
281 }
282 
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286  spin_lock_bh(&bp->indirect_lock);
288  REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289  spin_unlock_bh(&bp->indirect_lock);
290 }
291 
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295  bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297 
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301  return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303 
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307  offset += cid_addr;
308  spin_lock_bh(&bp->indirect_lock);
309  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310  int i;
311 
312  REG_WR(bp, BNX2_CTX_CTX_DATA, val);
314  offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315  for (i = 0; i < 5; i++) {
316  val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317  if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318  break;
319  udelay(5);
320  }
321  } else {
322  REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323  REG_WR(bp, BNX2_CTX_DATA, val);
324  }
325  spin_unlock_bh(&bp->indirect_lock);
326 }
327 
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332  struct bnx2 *bp = netdev_priv(dev);
333  struct drv_ctl_io *io = &info->data.io;
334 
335  switch (info->cmd) {
336  case DRV_CTL_IO_WR_CMD:
337  bnx2_reg_wr_ind(bp, io->offset, io->data);
338  break;
339  case DRV_CTL_IO_RD_CMD:
340  io->data = bnx2_reg_rd_ind(bp, io->offset);
341  break;
342  case DRV_CTL_CTX_WR_CMD:
343  bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344  break;
345  default:
346  return -EINVAL;
347  }
348  return 0;
349 }
350 
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355  int sb_id;
356 
357  if (bp->flags & BNX2_FLAG_USING_MSIX) {
359  bnapi->cnic_present = 0;
360  sb_id = bp->irq_nvecs;
361  cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362  } else {
364  bnapi->cnic_tag = bnapi->last_status_idx;
365  bnapi->cnic_present = 1;
366  sb_id = 0;
367  cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368  }
369 
370  cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371  cp->irq_arr[0].status_blk = (void *)
372  ((unsigned long) bnapi->status_blk.msi +
373  (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374  cp->irq_arr[0].status_blk_num = sb_id;
375  cp->num_irq = 1;
376 }
377 
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379  void *data)
380 {
381  struct bnx2 *bp = netdev_priv(dev);
382  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383 
384  if (ops == NULL)
385  return -EINVAL;
386 
387  if (cp->drv_state & CNIC_DRV_STATE_REGD)
388  return -EBUSY;
389 
390  if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391  return -ENODEV;
392 
393  bp->cnic_data = data;
394  rcu_assign_pointer(bp->cnic_ops, ops);
395 
396  cp->num_irq = 0;
398 
399  bnx2_setup_cnic_irq_info(bp);
400 
401  return 0;
402 }
403 
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406  struct bnx2 *bp = netdev_priv(dev);
407  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409 
410  mutex_lock(&bp->cnic_lock);
411  cp->drv_state = 0;
412  bnapi->cnic_present = 0;
413  RCU_INIT_POINTER(bp->cnic_ops, NULL);
414  mutex_unlock(&bp->cnic_lock);
415  synchronize_rcu();
416  return 0;
417 }
418 
419 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421  struct bnx2 *bp = netdev_priv(dev);
422  struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423 
424  if (!cp->max_iscsi_conn)
425  return NULL;
426 
427  cp->drv_owner = THIS_MODULE;
428  cp->chip_id = bp->chip_id;
429  cp->pdev = bp->pdev;
430  cp->io_base = bp->regview;
431  cp->drv_ctl = bnx2_drv_ctl;
432  cp->drv_register_cnic = bnx2_register_cnic;
433  cp->drv_unregister_cnic = bnx2_unregister_cnic;
434 
435  return cp;
436 }
438 
439 static void
440 bnx2_cnic_stop(struct bnx2 *bp)
441 {
442  struct cnic_ops *c_ops;
443  struct cnic_ctl_info info;
444 
445  mutex_lock(&bp->cnic_lock);
446  c_ops = rcu_dereference_protected(bp->cnic_ops,
447  lockdep_is_held(&bp->cnic_lock));
448  if (c_ops) {
449  info.cmd = CNIC_CTL_STOP_CMD;
450  c_ops->cnic_ctl(bp->cnic_data, &info);
451  }
452  mutex_unlock(&bp->cnic_lock);
453 }
454 
455 static void
456 bnx2_cnic_start(struct bnx2 *bp)
457 {
458  struct cnic_ops *c_ops;
459  struct cnic_ctl_info info;
460 
461  mutex_lock(&bp->cnic_lock);
462  c_ops = rcu_dereference_protected(bp->cnic_ops,
463  lockdep_is_held(&bp->cnic_lock));
464  if (c_ops) {
465  if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
466  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
467 
468  bnapi->cnic_tag = bnapi->last_status_idx;
469  }
470  info.cmd = CNIC_CTL_START_CMD;
471  c_ops->cnic_ctl(bp->cnic_data, &info);
472  }
473  mutex_unlock(&bp->cnic_lock);
474 }
475 
476 #else
477 
478 static void
479 bnx2_cnic_stop(struct bnx2 *bp)
480 {
481 }
482 
483 static void
484 bnx2_cnic_start(struct bnx2 *bp)
485 {
486 }
487 
488 #endif
489 
490 static int
491 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
492 {
493  u32 val1;
494  int i, ret;
495 
497  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
499 
500  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
502 
503  udelay(40);
504  }
505 
506  val1 = (bp->phy_addr << 21) | (reg << 16) |
509  REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 
511  for (i = 0; i < 50; i++) {
512  udelay(10);
513 
514  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
515  if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516  udelay(5);
517 
518  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
519  val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520 
521  break;
522  }
523  }
524 
525  if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
526  *val = 0x0;
527  ret = -EBUSY;
528  }
529  else {
530  *val = val1;
531  ret = 0;
532  }
533 
535  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
537 
538  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
540 
541  udelay(40);
542  }
543 
544  return ret;
545 }
546 
547 static int
548 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
549 {
550  u32 val1;
551  int i, ret;
552 
554  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
556 
557  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
559 
560  udelay(40);
561  }
562 
563  val1 = (bp->phy_addr << 21) | (reg << 16) | val |
565  BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566  REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 
568  for (i = 0; i < 50; i++) {
569  udelay(10);
570 
571  val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
572  if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573  udelay(5);
574  break;
575  }
576  }
577 
578  if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
579  ret = -EBUSY;
580  else
581  ret = 0;
582 
584  val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
586 
587  REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
589 
590  udelay(40);
591  }
592 
593  return ret;
594 }
595 
596 static void
597 bnx2_disable_int(struct bnx2 *bp)
598 {
599  int i;
600  struct bnx2_napi *bnapi;
601 
602  for (i = 0; i < bp->irq_nvecs; i++) {
603  bnapi = &bp->bnx2_napi[i];
606  }
608 }
609 
610 static void
611 bnx2_enable_int(struct bnx2 *bp)
612 {
613  int i;
614  struct bnx2_napi *bnapi;
615 
616  for (i = 0; i < bp->irq_nvecs; i++) {
617  bnapi = &bp->bnx2_napi[i];
618 
622  bnapi->last_status_idx);
623 
626  bnapi->last_status_idx);
627  }
629 }
630 
631 static void
632 bnx2_disable_int_sync(struct bnx2 *bp)
633 {
634  int i;
635 
636  atomic_inc(&bp->intr_sem);
637  if (!netif_running(bp->dev))
638  return;
639 
640  bnx2_disable_int(bp);
641  for (i = 0; i < bp->irq_nvecs; i++)
642  synchronize_irq(bp->irq_tbl[i].vector);
643 }
644 
645 static void
646 bnx2_napi_disable(struct bnx2 *bp)
647 {
648  int i;
649 
650  for (i = 0; i < bp->irq_nvecs; i++)
651  napi_disable(&bp->bnx2_napi[i].napi);
652 }
653 
654 static void
655 bnx2_napi_enable(struct bnx2 *bp)
656 {
657  int i;
658 
659  for (i = 0; i < bp->irq_nvecs; i++)
660  napi_enable(&bp->bnx2_napi[i].napi);
661 }
662 
663 static void
664 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
665 {
666  if (stop_cnic)
667  bnx2_cnic_stop(bp);
668  if (netif_running(bp->dev)) {
669  bnx2_napi_disable(bp);
670  netif_tx_disable(bp->dev);
671  }
672  bnx2_disable_int_sync(bp);
673  netif_carrier_off(bp->dev); /* prevent tx timeout */
674 }
675 
676 static void
677 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
678 {
679  if (atomic_dec_and_test(&bp->intr_sem)) {
680  if (netif_running(bp->dev)) {
681  netif_tx_wake_all_queues(bp->dev);
682  spin_lock_bh(&bp->phy_lock);
683  if (bp->link_up)
684  netif_carrier_on(bp->dev);
685  spin_unlock_bh(&bp->phy_lock);
686  bnx2_napi_enable(bp);
687  bnx2_enable_int(bp);
688  if (start_cnic)
689  bnx2_cnic_start(bp);
690  }
691  }
692 }
693 
694 static void
695 bnx2_free_tx_mem(struct bnx2 *bp)
696 {
697  int i;
698 
699  for (i = 0; i < bp->num_tx_rings; i++) {
700  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
701  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
702 
703  if (txr->tx_desc_ring) {
705  txr->tx_desc_ring,
706  txr->tx_desc_mapping);
707  txr->tx_desc_ring = NULL;
708  }
709  kfree(txr->tx_buf_ring);
710  txr->tx_buf_ring = NULL;
711  }
712 }
713 
714 static void
715 bnx2_free_rx_mem(struct bnx2 *bp)
716 {
717  int i;
718 
719  for (i = 0; i < bp->num_rx_rings; i++) {
720  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
721  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
722  int j;
723 
724  for (j = 0; j < bp->rx_max_ring; j++) {
725  if (rxr->rx_desc_ring[j])
727  rxr->rx_desc_ring[j],
728  rxr->rx_desc_mapping[j]);
729  rxr->rx_desc_ring[j] = NULL;
730  }
731  vfree(rxr->rx_buf_ring);
732  rxr->rx_buf_ring = NULL;
733 
734  for (j = 0; j < bp->rx_max_pg_ring; j++) {
735  if (rxr->rx_pg_desc_ring[j])
737  rxr->rx_pg_desc_ring[j],
738  rxr->rx_pg_desc_mapping[j]);
739  rxr->rx_pg_desc_ring[j] = NULL;
740  }
741  vfree(rxr->rx_pg_ring);
742  rxr->rx_pg_ring = NULL;
743  }
744 }
745 
746 static int
747 bnx2_alloc_tx_mem(struct bnx2 *bp)
748 {
749  int i;
750 
751  for (i = 0; i < bp->num_tx_rings; i++) {
752  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
754 
755  txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
756  if (txr->tx_buf_ring == NULL)
757  return -ENOMEM;
758 
759  txr->tx_desc_ring =
761  &txr->tx_desc_mapping, GFP_KERNEL);
762  if (txr->tx_desc_ring == NULL)
763  return -ENOMEM;
764  }
765  return 0;
766 }
767 
768 static int
769 bnx2_alloc_rx_mem(struct bnx2 *bp)
770 {
771  int i;
772 
773  for (i = 0; i < bp->num_rx_rings; i++) {
774  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
775  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
776  int j;
777 
778  rxr->rx_buf_ring =
780  if (rxr->rx_buf_ring == NULL)
781  return -ENOMEM;
782 
783  for (j = 0; j < bp->rx_max_ring; j++) {
784  rxr->rx_desc_ring[j] =
785  dma_alloc_coherent(&bp->pdev->dev,
787  &rxr->rx_desc_mapping[j],
788  GFP_KERNEL);
789  if (rxr->rx_desc_ring[j] == NULL)
790  return -ENOMEM;
791 
792  }
793 
794  if (bp->rx_pg_ring_size) {
796  bp->rx_max_pg_ring);
797  if (rxr->rx_pg_ring == NULL)
798  return -ENOMEM;
799 
800  }
801 
802  for (j = 0; j < bp->rx_max_pg_ring; j++) {
803  rxr->rx_pg_desc_ring[j] =
804  dma_alloc_coherent(&bp->pdev->dev,
806  &rxr->rx_pg_desc_mapping[j],
807  GFP_KERNEL);
808  if (rxr->rx_pg_desc_ring[j] == NULL)
809  return -ENOMEM;
810 
811  }
812  }
813  return 0;
814 }
815 
816 static void
817 bnx2_free_mem(struct bnx2 *bp)
818 {
819  int i;
820  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
821 
822  bnx2_free_tx_mem(bp);
823  bnx2_free_rx_mem(bp);
824 
825  for (i = 0; i < bp->ctx_pages; i++) {
826  if (bp->ctx_blk[i]) {
828  bp->ctx_blk[i],
829  bp->ctx_blk_mapping[i]);
830  bp->ctx_blk[i] = NULL;
831  }
832  }
833  if (bnapi->status_blk.msi) {
835  bnapi->status_blk.msi,
836  bp->status_blk_mapping);
837  bnapi->status_blk.msi = NULL;
838  bp->stats_blk = NULL;
839  }
840 }
841 
842 static int
843 bnx2_alloc_mem(struct bnx2 *bp)
844 {
845  int i, status_blk_size, err;
846  struct bnx2_napi *bnapi;
847  void *status_blk;
848 
849  /* Combine status and statistics blocks into one allocation. */
850  status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
851  if (bp->flags & BNX2_FLAG_MSIX_CAP)
852  status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
854  bp->status_stats_size = status_blk_size +
855  sizeof(struct statistics_block);
856 
857  status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
859  if (status_blk == NULL)
860  goto alloc_mem_err;
861 
862  memset(status_blk, 0, bp->status_stats_size);
863 
864  bnapi = &bp->bnx2_napi[0];
865  bnapi->status_blk.msi = status_blk;
866  bnapi->hw_tx_cons_ptr =
867  &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
868  bnapi->hw_rx_cons_ptr =
869  &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
870  if (bp->flags & BNX2_FLAG_MSIX_CAP) {
871  for (i = 1; i < bp->irq_nvecs; i++) {
872  struct status_block_msix *sblk;
873 
874  bnapi = &bp->bnx2_napi[i];
875 
876  sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
877  bnapi->status_blk.msix = sblk;
878  bnapi->hw_tx_cons_ptr =
879  &sblk->status_tx_quick_consumer_index;
880  bnapi->hw_rx_cons_ptr =
881  &sblk->status_rx_quick_consumer_index;
882  bnapi->int_num = i << 24;
883  }
884  }
885 
886  bp->stats_blk = status_blk + status_blk_size;
887 
888  bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 
890  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
891  bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
892  if (bp->ctx_pages == 0)
893  bp->ctx_pages = 1;
894  for (i = 0; i < bp->ctx_pages; i++) {
895  bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
897  &bp->ctx_blk_mapping[i],
898  GFP_KERNEL);
899  if (bp->ctx_blk[i] == NULL)
900  goto alloc_mem_err;
901  }
902  }
903 
904  err = bnx2_alloc_rx_mem(bp);
905  if (err)
906  goto alloc_mem_err;
907 
908  err = bnx2_alloc_tx_mem(bp);
909  if (err)
910  goto alloc_mem_err;
911 
912  return 0;
913 
914 alloc_mem_err:
915  bnx2_free_mem(bp);
916  return -ENOMEM;
917 }
918 
919 static void
920 bnx2_report_fw_link(struct bnx2 *bp)
921 {
922  u32 fw_link_status = 0;
923 
925  return;
926 
927  if (bp->link_up) {
928  u32 bmsr;
929 
930  switch (bp->line_speed) {
931  case SPEED_10:
932  if (bp->duplex == DUPLEX_HALF)
933  fw_link_status = BNX2_LINK_STATUS_10HALF;
934  else
935  fw_link_status = BNX2_LINK_STATUS_10FULL;
936  break;
937  case SPEED_100:
938  if (bp->duplex == DUPLEX_HALF)
939  fw_link_status = BNX2_LINK_STATUS_100HALF;
940  else
941  fw_link_status = BNX2_LINK_STATUS_100FULL;
942  break;
943  case SPEED_1000:
944  if (bp->duplex == DUPLEX_HALF)
945  fw_link_status = BNX2_LINK_STATUS_1000HALF;
946  else
947  fw_link_status = BNX2_LINK_STATUS_1000FULL;
948  break;
949  case SPEED_2500:
950  if (bp->duplex == DUPLEX_HALF)
951  fw_link_status = BNX2_LINK_STATUS_2500HALF;
952  else
953  fw_link_status = BNX2_LINK_STATUS_2500FULL;
954  break;
955  }
956 
957  fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
958 
959  if (bp->autoneg) {
960  fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
961 
962  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
964 
965  if (!(bmsr & BMSR_ANEGCOMPLETE) ||
967  fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
968  else
969  fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
970  }
971  }
972  else
973  fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
974 
975  bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
976 }
977 
978 static char *
979 bnx2_xceiver_str(struct bnx2 *bp)
980 {
981  return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
982  ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
983  "Copper");
984 }
985 
986 static void
987 bnx2_report_link(struct bnx2 *bp)
988 {
989  if (bp->link_up) {
990  netif_carrier_on(bp->dev);
991  netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
992  bnx2_xceiver_str(bp),
993  bp->line_speed,
994  bp->duplex == DUPLEX_FULL ? "full" : "half");
995 
996  if (bp->flow_ctrl) {
997  if (bp->flow_ctrl & FLOW_CTRL_RX) {
998  pr_cont(", receive ");
999  if (bp->flow_ctrl & FLOW_CTRL_TX)
1000  pr_cont("& transmit ");
1001  }
1002  else {
1003  pr_cont(", transmit ");
1004  }
1005  pr_cont("flow control ON");
1006  }
1007  pr_cont("\n");
1008  } else {
1009  netif_carrier_off(bp->dev);
1010  netdev_err(bp->dev, "NIC %s Link is Down\n",
1011  bnx2_xceiver_str(bp));
1012  }
1013 
1014  bnx2_report_fw_link(bp);
1015 }
1016 
1017 static void
1018 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1019 {
1020  u32 local_adv, remote_adv;
1021 
1022  bp->flow_ctrl = 0;
1023  if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1025 
1026  if (bp->duplex == DUPLEX_FULL) {
1027  bp->flow_ctrl = bp->req_flow_ctrl;
1028  }
1029  return;
1030  }
1031 
1032  if (bp->duplex != DUPLEX_FULL) {
1033  return;
1034  }
1035 
1036  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037  (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1038  u32 val;
1039 
1040  bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1042  bp->flow_ctrl |= FLOW_CTRL_TX;
1044  bp->flow_ctrl |= FLOW_CTRL_RX;
1045  return;
1046  }
1047 
1048  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1049  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1050 
1051  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1052  u32 new_local_adv = 0;
1053  u32 new_remote_adv = 0;
1054 
1055  if (local_adv & ADVERTISE_1000XPAUSE)
1056  new_local_adv |= ADVERTISE_PAUSE_CAP;
1057  if (local_adv & ADVERTISE_1000XPSE_ASYM)
1058  new_local_adv |= ADVERTISE_PAUSE_ASYM;
1059  if (remote_adv & ADVERTISE_1000XPAUSE)
1060  new_remote_adv |= ADVERTISE_PAUSE_CAP;
1061  if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1062  new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1063 
1064  local_adv = new_local_adv;
1065  remote_adv = new_remote_adv;
1066  }
1067 
1068  /* See Table 28B-3 of 802.3ab-1999 spec. */
1069  if (local_adv & ADVERTISE_PAUSE_CAP) {
1070  if(local_adv & ADVERTISE_PAUSE_ASYM) {
1071  if (remote_adv & ADVERTISE_PAUSE_CAP) {
1073  }
1074  else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1075  bp->flow_ctrl = FLOW_CTRL_RX;
1076  }
1077  }
1078  else {
1079  if (remote_adv & ADVERTISE_PAUSE_CAP) {
1081  }
1082  }
1083  }
1084  else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1085  if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1086  (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1087 
1088  bp->flow_ctrl = FLOW_CTRL_TX;
1089  }
1090  }
1091 }
1092 
1093 static int
1094 bnx2_5709s_linkup(struct bnx2 *bp)
1095 {
1096  u32 val, speed;
1097 
1098  bp->link_up = 1;
1099 
1100  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1101  bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1103 
1104  if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1105  bp->line_speed = bp->req_line_speed;
1106  bp->duplex = bp->req_duplex;
1107  return 0;
1108  }
1109  speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1110  switch (speed) {
1112  bp->line_speed = SPEED_10;
1113  break;
1115  bp->line_speed = SPEED_100;
1116  break;
1119  bp->line_speed = SPEED_1000;
1120  break;
1122  bp->line_speed = SPEED_2500;
1123  break;
1124  }
1125  if (val & MII_BNX2_GP_TOP_AN_FD)
1126  bp->duplex = DUPLEX_FULL;
1127  else
1128  bp->duplex = DUPLEX_HALF;
1129  return 0;
1130 }
1131 
1132 static int
1133 bnx2_5708s_linkup(struct bnx2 *bp)
1134 {
1135  u32 val;
1136 
1137  bp->link_up = 1;
1138  bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1139  switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1141  bp->line_speed = SPEED_10;
1142  break;
1144  bp->line_speed = SPEED_100;
1145  break;
1147  bp->line_speed = SPEED_1000;
1148  break;
1150  bp->line_speed = SPEED_2500;
1151  break;
1152  }
1153  if (val & BCM5708S_1000X_STAT1_FD)
1154  bp->duplex = DUPLEX_FULL;
1155  else
1156  bp->duplex = DUPLEX_HALF;
1157 
1158  return 0;
1159 }
1160 
1161 static int
1162 bnx2_5706s_linkup(struct bnx2 *bp)
1163 {
1164  u32 bmcr, local_adv, remote_adv, common;
1165 
1166  bp->link_up = 1;
1167  bp->line_speed = SPEED_1000;
1168 
1169  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1170  if (bmcr & BMCR_FULLDPLX) {
1171  bp->duplex = DUPLEX_FULL;
1172  }
1173  else {
1174  bp->duplex = DUPLEX_HALF;
1175  }
1176 
1177  if (!(bmcr & BMCR_ANENABLE)) {
1178  return 0;
1179  }
1180 
1181  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1182  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1183 
1184  common = local_adv & remote_adv;
1185  if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1186 
1187  if (common & ADVERTISE_1000XFULL) {
1188  bp->duplex = DUPLEX_FULL;
1189  }
1190  else {
1191  bp->duplex = DUPLEX_HALF;
1192  }
1193  }
1194 
1195  return 0;
1196 }
1197 
1198 static int
1199 bnx2_copper_linkup(struct bnx2 *bp)
1200 {
1201  u32 bmcr;
1202 
1203  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1204  if (bmcr & BMCR_ANENABLE) {
1205  u32 local_adv, remote_adv, common;
1206 
1207  bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1208  bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1209 
1210  common = local_adv & (remote_adv >> 2);
1211  if (common & ADVERTISE_1000FULL) {
1212  bp->line_speed = SPEED_1000;
1213  bp->duplex = DUPLEX_FULL;
1214  }
1215  else if (common & ADVERTISE_1000HALF) {
1216  bp->line_speed = SPEED_1000;
1217  bp->duplex = DUPLEX_HALF;
1218  }
1219  else {
1220  bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1221  bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1222 
1223  common = local_adv & remote_adv;
1224  if (common & ADVERTISE_100FULL) {
1225  bp->line_speed = SPEED_100;
1226  bp->duplex = DUPLEX_FULL;
1227  }
1228  else if (common & ADVERTISE_100HALF) {
1229  bp->line_speed = SPEED_100;
1230  bp->duplex = DUPLEX_HALF;
1231  }
1232  else if (common & ADVERTISE_10FULL) {
1233  bp->line_speed = SPEED_10;
1234  bp->duplex = DUPLEX_FULL;
1235  }
1236  else if (common & ADVERTISE_10HALF) {
1237  bp->line_speed = SPEED_10;
1238  bp->duplex = DUPLEX_HALF;
1239  }
1240  else {
1241  bp->line_speed = 0;
1242  bp->link_up = 0;
1243  }
1244  }
1245  }
1246  else {
1247  if (bmcr & BMCR_SPEED100) {
1248  bp->line_speed = SPEED_100;
1249  }
1250  else {
1251  bp->line_speed = SPEED_10;
1252  }
1253  if (bmcr & BMCR_FULLDPLX) {
1254  bp->duplex = DUPLEX_FULL;
1255  }
1256  else {
1257  bp->duplex = DUPLEX_HALF;
1258  }
1259  }
1260 
1261  return 0;
1262 }
1263 
1264 static void
1265 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1266 {
1267  u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1268 
1271  val |= 0x02 << 8;
1272 
1273  if (bp->flow_ctrl & FLOW_CTRL_TX)
1275 
1276  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1277 }
1278 
1279 static void
1280 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1281 {
1282  int i;
1283  u32 cid;
1284 
1285  for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1286  if (i == 1)
1287  cid = RX_RSS_CID;
1288  bnx2_init_rx_context(bp, cid);
1289  }
1290 }
1291 
1292 static void
1293 bnx2_set_mac_link(struct bnx2 *bp)
1294 {
1295  u32 val;
1296 
1297  REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298  if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299  (bp->duplex == DUPLEX_HALF)) {
1300  REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301  }
1302 
1303  /* Configure the EMAC mode register. */
1304  val = REG_RD(bp, BNX2_EMAC_MODE);
1305 
1309 
1310  if (bp->link_up) {
1311  switch (bp->line_speed) {
1312  case SPEED_10:
1313  if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1315  break;
1316  }
1317  /* fall through */
1318  case SPEED_100:
1319  val |= BNX2_EMAC_MODE_PORT_MII;
1320  break;
1321  case SPEED_2500:
1322  val |= BNX2_EMAC_MODE_25G_MODE;
1323  /* fall through */
1324  case SPEED_1000:
1325  val |= BNX2_EMAC_MODE_PORT_GMII;
1326  break;
1327  }
1328  }
1329  else {
1330  val |= BNX2_EMAC_MODE_PORT_GMII;
1331  }
1332 
1333  /* Set the MAC to operate in the appropriate duplex mode. */
1334  if (bp->duplex == DUPLEX_HALF)
1336  REG_WR(bp, BNX2_EMAC_MODE, val);
1337 
1338  /* Enable/disable rx PAUSE. */
1340 
1341  if (bp->flow_ctrl & FLOW_CTRL_RX)
1343  REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 
1345  /* Enable/disable tx PAUSE. */
1346  val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1347  val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 
1349  if (bp->flow_ctrl & FLOW_CTRL_TX)
1351  REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 
1353  /* Acknowledge the interrupt. */
1355 
1356  bnx2_init_all_rx_contexts(bp);
1357 }
1358 
1359 static void
1360 bnx2_enable_bmsr1(struct bnx2 *bp)
1361 {
1362  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363  (CHIP_NUM(bp) == CHIP_NUM_5709))
1364  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1366 }
1367 
1368 static void
1369 bnx2_disable_bmsr1(struct bnx2 *bp)
1370 {
1371  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372  (CHIP_NUM(bp) == CHIP_NUM_5709))
1373  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1375 }
1376 
1377 static int
1378 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1379 {
1380  u32 up1;
1381  int ret = 1;
1382 
1384  return 0;
1385 
1386  if (bp->autoneg & AUTONEG_SPEED)
1388 
1389  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1390  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 
1392  bnx2_read_phy(bp, bp->mii_up1, &up1);
1393  if (!(up1 & BCM5708S_UP1_2G5)) {
1394  up1 |= BCM5708S_UP1_2G5;
1395  bnx2_write_phy(bp, bp->mii_up1, up1);
1396  ret = 0;
1397  }
1398 
1399  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1402 
1403  return ret;
1404 }
1405 
1406 static int
1407 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1408 {
1409  u32 up1;
1410  int ret = 0;
1411 
1413  return 0;
1414 
1415  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1416  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 
1418  bnx2_read_phy(bp, bp->mii_up1, &up1);
1419  if (up1 & BCM5708S_UP1_2G5) {
1420  up1 &= ~BCM5708S_UP1_2G5;
1421  bnx2_write_phy(bp, bp->mii_up1, up1);
1422  ret = 1;
1423  }
1424 
1425  if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1428 
1429  return ret;
1430 }
1431 
1432 static void
1433 bnx2_enable_forced_2g5(struct bnx2 *bp)
1434 {
1435  u32 uninitialized_var(bmcr);
1436  int err;
1437 
1439  return;
1440 
1441  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1442  u32 val;
1443 
1444  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1446  if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1448  val |= MII_BNX2_SD_MISC1_FORCE |
1450  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1451  }
1452 
1453  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1455  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 
1457  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1458  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459  if (!err)
1460  bmcr |= BCM5708S_BMCR_FORCE_2500;
1461  } else {
1462  return;
1463  }
1464 
1465  if (err)
1466  return;
1467 
1468  if (bp->autoneg & AUTONEG_SPEED) {
1469  bmcr &= ~BMCR_ANENABLE;
1470  if (bp->req_duplex == DUPLEX_FULL)
1471  bmcr |= BMCR_FULLDPLX;
1472  }
1473  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1474 }
1475 
1476 static void
1477 bnx2_disable_forced_2g5(struct bnx2 *bp)
1478 {
1479  u32 uninitialized_var(bmcr);
1480  int err;
1481 
1483  return;
1484 
1485  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486  u32 val;
1487 
1488  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1490  if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1491  val &= ~MII_BNX2_SD_MISC1_FORCE;
1492  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493  }
1494 
1495  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1497  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 
1499  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1500  err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501  if (!err)
1502  bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1503  } else {
1504  return;
1505  }
1506 
1507  if (err)
1508  return;
1509 
1510  if (bp->autoneg & AUTONEG_SPEED)
1511  bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1512  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1513 }
1514 
1515 static void
1516 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1517 {
1518  u32 val;
1519 
1520  bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1521  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1522  if (start)
1523  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1524  else
1525  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1526 }
1527 
1528 static int
1529 bnx2_set_link(struct bnx2 *bp)
1530 {
1531  u32 bmsr;
1532  u8 link_up;
1533 
1534  if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1535  bp->link_up = 1;
1536  return 0;
1537  }
1538 
1540  return 0;
1541 
1542  link_up = bp->link_up;
1543 
1544  bnx2_enable_bmsr1(bp);
1545  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546  bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1547  bnx2_disable_bmsr1(bp);
1548 
1549  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550  (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1551  u32 val, an_dbg;
1552 
1554  bnx2_5706s_force_link_dn(bp, 0);
1556  }
1557  val = REG_RD(bp, BNX2_EMAC_STATUS);
1558 
1559  bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561  bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1562 
1563  if ((val & BNX2_EMAC_STATUS_LINK) &&
1564  !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1565  bmsr |= BMSR_LSTATUS;
1566  else
1567  bmsr &= ~BMSR_LSTATUS;
1568  }
1569 
1570  if (bmsr & BMSR_LSTATUS) {
1571  bp->link_up = 1;
1572 
1573  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574  if (CHIP_NUM(bp) == CHIP_NUM_5706)
1575  bnx2_5706s_linkup(bp);
1576  else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1577  bnx2_5708s_linkup(bp);
1578  else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1579  bnx2_5709s_linkup(bp);
1580  }
1581  else {
1582  bnx2_copper_linkup(bp);
1583  }
1584  bnx2_resolve_flow_ctrl(bp);
1585  }
1586  else {
1587  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1588  (bp->autoneg & AUTONEG_SPEED))
1589  bnx2_disable_forced_2g5(bp);
1590 
1592  u32 bmcr;
1593 
1594  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1595  bmcr |= BMCR_ANENABLE;
1596  bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1597 
1599  }
1600  bp->link_up = 0;
1601  }
1602 
1603  if (bp->link_up != link_up) {
1604  bnx2_report_link(bp);
1605  }
1606 
1607  bnx2_set_mac_link(bp);
1608 
1609  return 0;
1610 }
1611 
1612 static int
1613 bnx2_reset_phy(struct bnx2 *bp)
1614 {
1615  int i;
1616  u32 reg;
1617 
1618  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1619 
1620 #define PHY_RESET_MAX_WAIT 100
1621  for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1622  udelay(10);
1623 
1624  bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1625  if (!(reg & BMCR_RESET)) {
1626  udelay(20);
1627  break;
1628  }
1629  }
1630  if (i == PHY_RESET_MAX_WAIT) {
1631  return -EBUSY;
1632  }
1633  return 0;
1634 }
1635 
1636 static u32
1637 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1638 {
1639  u32 adv = 0;
1640 
1641  if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1642  (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1643 
1644  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1645  adv = ADVERTISE_1000XPAUSE;
1646  }
1647  else {
1648  adv = ADVERTISE_PAUSE_CAP;
1649  }
1650  }
1651  else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1652  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654  }
1655  else {
1656  adv = ADVERTISE_PAUSE_ASYM;
1657  }
1658  }
1659  else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1660  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1661  adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1662  }
1663  else {
1664  adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1665  }
1666  }
1667  return adv;
1668 }
1669 
1670 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1671 
1672 static int
1673 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1674 __releases(&bp->phy_lock)
1675 __acquires(&bp->phy_lock)
1676 {
1677  u32 speed_arg = 0, pause_adv;
1678 
1679  pause_adv = bnx2_phy_get_pause_adv(bp);
1680 
1681  if (bp->autoneg & AUTONEG_SPEED) {
1695  } else {
1696  if (bp->req_line_speed == SPEED_2500)
1698  else if (bp->req_line_speed == SPEED_1000)
1700  else if (bp->req_line_speed == SPEED_100) {
1701  if (bp->req_duplex == DUPLEX_FULL)
1703  else
1705  } else if (bp->req_line_speed == SPEED_10) {
1706  if (bp->req_duplex == DUPLEX_FULL)
1708  else
1710  }
1711  }
1712 
1713  if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1715  if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1717 
1718  if (port == PORT_TP)
1721 
1722  bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1723 
1724  spin_unlock_bh(&bp->phy_lock);
1725  bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1726  spin_lock_bh(&bp->phy_lock);
1727 
1728  return 0;
1729 }
1730 
1731 static int
1732 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1733 __releases(&bp->phy_lock)
1734 __acquires(&bp->phy_lock)
1735 {
1736  u32 adv, bmcr;
1737  u32 new_adv = 0;
1738 
1740  return bnx2_setup_remote_phy(bp, port);
1741 
1742  if (!(bp->autoneg & AUTONEG_SPEED)) {
1743  u32 new_bmcr;
1744  int force_link_down = 0;
1745 
1746  if (bp->req_line_speed == SPEED_2500) {
1747  if (!bnx2_test_and_enable_2g5(bp))
1748  force_link_down = 1;
1749  } else if (bp->req_line_speed == SPEED_1000) {
1750  if (bnx2_test_and_disable_2g5(bp))
1751  force_link_down = 1;
1752  }
1753  bnx2_read_phy(bp, bp->mii_adv, &adv);
1755 
1756  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1757  new_bmcr = bmcr & ~BMCR_ANENABLE;
1758  new_bmcr |= BMCR_SPEED1000;
1759 
1760  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1761  if (bp->req_line_speed == SPEED_2500)
1762  bnx2_enable_forced_2g5(bp);
1763  else if (bp->req_line_speed == SPEED_1000) {
1764  bnx2_disable_forced_2g5(bp);
1765  new_bmcr &= ~0x2000;
1766  }
1767 
1768  } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1769  if (bp->req_line_speed == SPEED_2500)
1770  new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771  else
1772  new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1773  }
1774 
1775  if (bp->req_duplex == DUPLEX_FULL) {
1776  adv |= ADVERTISE_1000XFULL;
1777  new_bmcr |= BMCR_FULLDPLX;
1778  }
1779  else {
1780  adv |= ADVERTISE_1000XHALF;
1781  new_bmcr &= ~BMCR_FULLDPLX;
1782  }
1783  if ((new_bmcr != bmcr) || (force_link_down)) {
1784  /* Force a link down visible on the other side */
1785  if (bp->link_up) {
1786  bnx2_write_phy(bp, bp->mii_adv, adv &
1789  bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1790  BMCR_ANRESTART | BMCR_ANENABLE);
1791 
1792  bp->link_up = 0;
1793  netif_carrier_off(bp->dev);
1794  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1795  bnx2_report_link(bp);
1796  }
1797  bnx2_write_phy(bp, bp->mii_adv, adv);
1798  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799  } else {
1800  bnx2_resolve_flow_ctrl(bp);
1801  bnx2_set_mac_link(bp);
1802  }
1803  return 0;
1804  }
1805 
1806  bnx2_test_and_enable_2g5(bp);
1807 
1809  new_adv |= ADVERTISE_1000XFULL;
1810 
1811  new_adv |= bnx2_phy_get_pause_adv(bp);
1812 
1813  bnx2_read_phy(bp, bp->mii_adv, &adv);
1814  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1815 
1816  bp->serdes_an_pending = 0;
1817  if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1818  /* Force a link down visible on the other side */
1819  if (bp->link_up) {
1820  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1821  spin_unlock_bh(&bp->phy_lock);
1822  msleep(20);
1823  spin_lock_bh(&bp->phy_lock);
1824  }
1825 
1826  bnx2_write_phy(bp, bp->mii_adv, new_adv);
1827  bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1828  BMCR_ANENABLE);
1829  /* Speed up link-up time when the link partner
1830  * does not autonegotiate which is very common
1831  * in blade servers. Some blade servers use
1832  * IPMI for kerboard input and it's important
1833  * to minimize link disruptions. Autoneg. involves
1834  * exchanging base pages plus 3 next pages and
1835  * normally completes in about 120 msec.
1836  */
1838  bp->serdes_an_pending = 1;
1839  mod_timer(&bp->timer, jiffies + bp->current_interval);
1840  } else {
1841  bnx2_resolve_flow_ctrl(bp);
1842  bnx2_set_mac_link(bp);
1843  }
1844 
1845  return 0;
1846 }
1847 
1848 #define ETHTOOL_ALL_FIBRE_SPEED \
1849  (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1850  (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1851  (ADVERTISED_1000baseT_Full)
1852 
1853 #define ETHTOOL_ALL_COPPER_SPEED \
1854  (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1855  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1856  ADVERTISED_1000baseT_Full)
1857 
1858 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1859  ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1860 
1861 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1862 
1863 static void
1864 bnx2_set_default_remote_link(struct bnx2 *bp)
1865 {
1866  u32 link;
1867 
1868  if (bp->phy_port == PORT_TP)
1869  link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1870  else
1871  link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1872 
1874  bp->req_line_speed = 0;
1875  bp->autoneg |= AUTONEG_SPEED;
1889  } else {
1890  bp->autoneg = 0;
1891  bp->advertising = 0;
1892  bp->req_duplex = DUPLEX_FULL;
1893  if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1894  bp->req_line_speed = SPEED_10;
1896  bp->req_duplex = DUPLEX_HALF;
1897  }
1898  if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1899  bp->req_line_speed = SPEED_100;
1901  bp->req_duplex = DUPLEX_HALF;
1902  }
1904  bp->req_line_speed = SPEED_1000;
1906  bp->req_line_speed = SPEED_2500;
1907  }
1908 }
1909 
1910 static void
1911 bnx2_set_default_link(struct bnx2 *bp)
1912 {
1914  bnx2_set_default_remote_link(bp);
1915  return;
1916  }
1917 
1919  bp->req_line_speed = 0;
1920  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1921  u32 reg;
1922 
1924 
1925  reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1927  if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1928  bp->autoneg = 0;
1929  bp->req_line_speed = bp->line_speed = SPEED_1000;
1930  bp->req_duplex = DUPLEX_FULL;
1931  }
1932  } else
1934 }
1935 
1936 static void
1937 bnx2_send_heart_beat(struct bnx2 *bp)
1938 {
1939  u32 msg;
1940  u32 addr;
1941 
1942  spin_lock(&bp->indirect_lock);
1944  addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1946  REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947  spin_unlock(&bp->indirect_lock);
1948 }
1949 
1950 static void
1951 bnx2_remote_phy_event(struct bnx2 *bp)
1952 {
1953  u32 msg;
1954  u8 link_up = bp->link_up;
1955  u8 old_port;
1956 
1957  msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1958 
1960  bnx2_send_heart_beat(bp);
1961 
1962  msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1963 
1965  bp->link_up = 0;
1966  else {
1967  u32 speed;
1968 
1969  bp->link_up = 1;
1970  speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1971  bp->duplex = DUPLEX_FULL;
1972  switch (speed) {
1974  bp->duplex = DUPLEX_HALF;
1975  /* fall through */
1977  bp->line_speed = SPEED_10;
1978  break;
1980  bp->duplex = DUPLEX_HALF;
1981  /* fall through */
1984  bp->line_speed = SPEED_100;
1985  break;
1987  bp->duplex = DUPLEX_HALF;
1988  /* fall through */
1990  bp->line_speed = SPEED_1000;
1991  break;
1993  bp->duplex = DUPLEX_HALF;
1994  /* fall through */
1996  bp->line_speed = SPEED_2500;
1997  break;
1998  default:
1999  bp->line_speed = 0;
2000  break;
2001  }
2002 
2003  bp->flow_ctrl = 0;
2004  if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2006  if (bp->duplex == DUPLEX_FULL)
2007  bp->flow_ctrl = bp->req_flow_ctrl;
2008  } else {
2010  bp->flow_ctrl |= FLOW_CTRL_TX;
2012  bp->flow_ctrl |= FLOW_CTRL_RX;
2013  }
2014 
2015  old_port = bp->phy_port;
2016  if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017  bp->phy_port = PORT_FIBRE;
2018  else
2019  bp->phy_port = PORT_TP;
2020 
2021  if (old_port != bp->phy_port)
2022  bnx2_set_default_link(bp);
2023 
2024  }
2025  if (bp->link_up != link_up)
2026  bnx2_report_link(bp);
2027 
2028  bnx2_set_mac_link(bp);
2029 }
2030 
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034  u32 evt_code;
2035 
2036  evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037  switch (evt_code) {
2039  bnx2_remote_phy_event(bp);
2040  break;
2042  default:
2043  bnx2_send_heart_beat(bp);
2044  break;
2045  }
2046  return 0;
2047 }
2048 
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054  u32 bmcr;
2055  u32 new_bmcr;
2056 
2057  bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058 
2059  if (bp->autoneg & AUTONEG_SPEED) {
2060  u32 adv_reg, adv1000_reg;
2061  u32 new_adv = 0;
2062  u32 new_adv1000 = 0;
2063 
2064  bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065  adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2067 
2068  bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069  adv1000_reg &= PHY_ALL_1000_SPEED;
2070 
2071  new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2072  new_adv |= ADVERTISE_CSMA;
2073  new_adv |= bnx2_phy_get_pause_adv(bp);
2074 
2075  new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2076 
2077  if ((adv1000_reg != new_adv1000) ||
2078  (adv_reg != new_adv) ||
2079  ((bmcr & BMCR_ANENABLE) == 0)) {
2080 
2081  bnx2_write_phy(bp, bp->mii_adv, new_adv);
2082  bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2083  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2084  BMCR_ANENABLE);
2085  }
2086  else if (bp->link_up) {
2087  /* Flow ctrl may have changed from auto to forced */
2088  /* or vice-versa. */
2089 
2090  bnx2_resolve_flow_ctrl(bp);
2091  bnx2_set_mac_link(bp);
2092  }
2093  return 0;
2094  }
2095 
2096  new_bmcr = 0;
2097  if (bp->req_line_speed == SPEED_100) {
2098  new_bmcr |= BMCR_SPEED100;
2099  }
2100  if (bp->req_duplex == DUPLEX_FULL) {
2101  new_bmcr |= BMCR_FULLDPLX;
2102  }
2103  if (new_bmcr != bmcr) {
2104  u32 bmsr;
2105 
2106  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2108 
2109  if (bmsr & BMSR_LSTATUS) {
2110  /* Force link down */
2111  bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2112  spin_unlock_bh(&bp->phy_lock);
2113  msleep(50);
2114  spin_lock_bh(&bp->phy_lock);
2115 
2116  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117  bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2118  }
2119 
2120  bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2121 
2122  /* Normally, the new speed is setup after the link has
2123  * gone down and up again. In some cases, link will not go
2124  * down so we need to set up the new speed here.
2125  */
2126  if (bmsr & BMSR_LSTATUS) {
2127  bp->line_speed = bp->req_line_speed;
2128  bp->duplex = bp->req_duplex;
2129  bnx2_resolve_flow_ctrl(bp);
2130  bnx2_set_mac_link(bp);
2131  }
2132  } else {
2133  bnx2_resolve_flow_ctrl(bp);
2134  bnx2_set_mac_link(bp);
2135  }
2136  return 0;
2137 }
2138 
2139 static int
2140 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2141 __releases(&bp->phy_lock)
2142 __acquires(&bp->phy_lock)
2143 {
2144  if (bp->loopback == MAC_LOOPBACK)
2145  return 0;
2146 
2147  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2148  return bnx2_setup_serdes_phy(bp, port);
2149  }
2150  else {
2151  return bnx2_setup_copper_phy(bp);
2152  }
2153 }
2154 
2155 static int
2156 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2157 {
2158  u32 val;
2159 
2160  bp->mii_bmcr = MII_BMCR + 0x10;
2161  bp->mii_bmsr = MII_BMSR + 0x10;
2163  bp->mii_adv = MII_ADVERTISE + 0x10;
2164  bp->mii_lpa = MII_LPA + 0x10;
2166 
2167  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2168  bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2169 
2171  if (reset_phy)
2172  bnx2_reset_phy(bp);
2173 
2174  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2175 
2176  bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2179  bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2180 
2181  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2182  bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2184  val |= BCM5708S_UP1_2G5;
2185  else
2186  val &= ~BCM5708S_UP1_2G5;
2187  bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2188 
2189  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2190  bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2192  bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2193 
2194  bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2195 
2198  bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2199 
2201 
2202  return 0;
2203 }
2204 
2205 static int
2206 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2207 {
2208  u32 val;
2209 
2210  if (reset_phy)
2211  bnx2_reset_phy(bp);
2212 
2213  bp->mii_up1 = BCM5708S_UP1;
2214 
2215  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2216  bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2217  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2218 
2219  bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2221  bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2222 
2223  bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2225  bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2226 
2228  bnx2_read_phy(bp, BCM5708S_UP1, &val);
2229  val |= BCM5708S_UP1_2G5;
2230  bnx2_write_phy(bp, BCM5708S_UP1, val);
2231  }
2232 
2233  if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2234  (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2235  (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2236  /* increase tx signal amplitude */
2237  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2239  bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2241  bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2242  bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243  }
2244 
2245  val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2247 
2248  if (val) {
2249  u32 is_backplane;
2250 
2251  is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2252  if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2253  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255  bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2256  bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258  }
2259  }
2260  return 0;
2261 }
2262 
2263 static int
2264 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2265 {
2266  if (reset_phy)
2267  bnx2_reset_phy(bp);
2268 
2270 
2271  if (CHIP_NUM(bp) == CHIP_NUM_5706)
2272  REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273 
2274  if (bp->dev->mtu > 1500) {
2275  u32 val;
2276 
2277  /* Set extended packet length bit */
2278  bnx2_write_phy(bp, 0x18, 0x7);
2279  bnx2_read_phy(bp, 0x18, &val);
2280  bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2281 
2282  bnx2_write_phy(bp, 0x1c, 0x6c00);
2283  bnx2_read_phy(bp, 0x1c, &val);
2284  bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2285  }
2286  else {
2287  u32 val;
2288 
2289  bnx2_write_phy(bp, 0x18, 0x7);
2290  bnx2_read_phy(bp, 0x18, &val);
2291  bnx2_write_phy(bp, 0x18, val & ~0x4007);
2292 
2293  bnx2_write_phy(bp, 0x1c, 0x6c00);
2294  bnx2_read_phy(bp, 0x1c, &val);
2295  bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2296  }
2297 
2298  return 0;
2299 }
2300 
2301 static int
2302 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2303 {
2304  u32 val;
2305 
2306  if (reset_phy)
2307  bnx2_reset_phy(bp);
2308 
2309  if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2310  bnx2_write_phy(bp, 0x18, 0x0c00);
2311  bnx2_write_phy(bp, 0x17, 0x000a);
2312  bnx2_write_phy(bp, 0x15, 0x310b);
2313  bnx2_write_phy(bp, 0x17, 0x201f);
2314  bnx2_write_phy(bp, 0x15, 0x9506);
2315  bnx2_write_phy(bp, 0x17, 0x401f);
2316  bnx2_write_phy(bp, 0x15, 0x14e2);
2317  bnx2_write_phy(bp, 0x18, 0x0400);
2318  }
2319 
2321  bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2322  MII_BNX2_DSP_EXPAND_REG | 0x8);
2323  bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2324  val &= ~(1 << 8);
2325  bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2326  }
2327 
2328  if (bp->dev->mtu > 1500) {
2329  /* Set extended packet length bit */
2330  bnx2_write_phy(bp, 0x18, 0x7);
2331  bnx2_read_phy(bp, 0x18, &val);
2332  bnx2_write_phy(bp, 0x18, val | 0x4000);
2333 
2334  bnx2_read_phy(bp, 0x10, &val);
2335  bnx2_write_phy(bp, 0x10, val | 0x1);
2336  }
2337  else {
2338  bnx2_write_phy(bp, 0x18, 0x7);
2339  bnx2_read_phy(bp, 0x18, &val);
2340  bnx2_write_phy(bp, 0x18, val & ~0x4007);
2341 
2342  bnx2_read_phy(bp, 0x10, &val);
2343  bnx2_write_phy(bp, 0x10, val & ~0x1);
2344  }
2345 
2346  /* ethernet@wirespeed */
2347  bnx2_write_phy(bp, 0x18, 0x7007);
2348  bnx2_read_phy(bp, 0x18, &val);
2349  bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2350  return 0;
2351 }
2352 
2353 
2354 static int
2355 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2356 __releases(&bp->phy_lock)
2357 __acquires(&bp->phy_lock)
2358 {
2359  u32 val;
2360  int rc = 0;
2361 
2364 
2365  bp->mii_bmcr = MII_BMCR;
2366  bp->mii_bmsr = MII_BMSR;
2367  bp->mii_bmsr1 = MII_BMSR;
2368  bp->mii_adv = MII_ADVERTISE;
2369  bp->mii_lpa = MII_LPA;
2370 
2372 
2374  goto setup_phy;
2375 
2376  bnx2_read_phy(bp, MII_PHYSID1, &val);
2377  bp->phy_id = val << 16;
2378  bnx2_read_phy(bp, MII_PHYSID2, &val);
2379  bp->phy_id |= val & 0xffff;
2380 
2381  if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382  if (CHIP_NUM(bp) == CHIP_NUM_5706)
2383  rc = bnx2_init_5706s_phy(bp, reset_phy);
2384  else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2385  rc = bnx2_init_5708s_phy(bp, reset_phy);
2386  else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2387  rc = bnx2_init_5709s_phy(bp, reset_phy);
2388  }
2389  else {
2390  rc = bnx2_init_copper_phy(bp, reset_phy);
2391  }
2392 
2393 setup_phy:
2394  if (!rc)
2395  rc = bnx2_setup_phy(bp, bp->phy_port);
2396 
2397  return rc;
2398 }
2399 
2400 static int
2401 bnx2_set_mac_loopback(struct bnx2 *bp)
2402 {
2403  u32 mac_mode;
2404 
2405  mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2406  mac_mode &= ~BNX2_EMAC_MODE_PORT;
2408  REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409  bp->link_up = 1;
2410  return 0;
2411 }
2412 
2413 static int bnx2_test_link(struct bnx2 *);
2414 
2415 static int
2416 bnx2_set_phy_loopback(struct bnx2 *bp)
2417 {
2418  u32 mac_mode;
2419  int rc, i;
2420 
2421  spin_lock_bh(&bp->phy_lock);
2422  rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2423  BMCR_SPEED1000);
2424  spin_unlock_bh(&bp->phy_lock);
2425  if (rc)
2426  return rc;
2427 
2428  for (i = 0; i < 10; i++) {
2429  if (bnx2_test_link(bp) == 0)
2430  break;
2431  msleep(100);
2432  }
2433 
2434  mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2438 
2439  mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440  REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441  bp->link_up = 1;
2442  return 0;
2443 }
2444 
2445 static void
2446 bnx2_dump_mcp_state(struct bnx2 *bp)
2447 {
2448  struct net_device *dev = bp->dev;
2449  u32 mcp_p0, mcp_p1;
2450 
2451  netdev_err(dev, "<--- start MCP states dump --->\n");
2452  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2453  mcp_p0 = BNX2_MCP_STATE_P0;
2454  mcp_p1 = BNX2_MCP_STATE_P1;
2455  } else {
2456  mcp_p0 = BNX2_MCP_STATE_P0_5708;
2457  mcp_p1 = BNX2_MCP_STATE_P1_5708;
2458  }
2459  netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2460  bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2461  netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2462  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2463  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2464  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2465  netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2466  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2467  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2468  bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2469  netdev_err(dev, "DEBUG: shmem states:\n");
2470  netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2471  bnx2_shmem_rd(bp, BNX2_DRV_MB),
2472  bnx2_shmem_rd(bp, BNX2_FW_MB),
2473  bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2474  pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2475  netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2476  bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2477  bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2478  pr_cont(" condition[%08x]\n",
2479  bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2481  DP_SHMEM_LINE(bp, 0x3cc);
2482  DP_SHMEM_LINE(bp, 0x3dc);
2483  DP_SHMEM_LINE(bp, 0x3ec);
2484  netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2485  netdev_err(dev, "<--- end MCP states dump --->\n");
2486 }
2487 
2488 static int
2489 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2490 {
2491  int i;
2492  u32 val;
2493 
2494  bp->fw_wr_seq++;
2495  msg_data |= bp->fw_wr_seq;
2496 
2497  bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2498 
2499  if (!ack)
2500  return 0;
2501 
2502  /* wait for an acknowledgement. */
2503  for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2504  msleep(10);
2505 
2506  val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2507 
2508  if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2509  break;
2510  }
2511  if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2512  return 0;
2513 
2514  /* If we timed out, inform the firmware that this is the case. */
2515  if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2516  msg_data &= ~BNX2_DRV_MSG_CODE;
2517  msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2518 
2519  bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2520  if (!silent) {
2521  pr_err("fw sync timeout, reset code = %x\n", msg_data);
2522  bnx2_dump_mcp_state(bp);
2523  }
2524 
2525  return -EBUSY;
2526  }
2527 
2529  return -EIO;
2530 
2531  return 0;
2532 }
2533 
2534 static int
2535 bnx2_init_5709_context(struct bnx2 *bp)
2536 {
2537  int i, ret = 0;
2538  u32 val;
2539 
2541  val |= (BCM_PAGE_BITS - 8) << 16;
2542  REG_WR(bp, BNX2_CTX_COMMAND, val);
2543  for (i = 0; i < 10; i++) {
2544  val = REG_RD(bp, BNX2_CTX_COMMAND);
2545  if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546  break;
2547  udelay(2);
2548  }
2549  if (val & BNX2_CTX_COMMAND_MEM_INIT)
2550  return -EBUSY;
2551 
2552  for (i = 0; i < bp->ctx_pages; i++) {
2553  int j;
2554 
2555  if (bp->ctx_blk[i])
2556  memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2557  else
2558  return -ENOMEM;
2559 
2561  (bp->ctx_blk_mapping[i] & 0xffffffff) |
2564  (u64) bp->ctx_blk_mapping[i] >> 32);
2567  for (j = 0; j < 10; j++) {
2568 
2571  break;
2572  udelay(5);
2573  }
2575  ret = -EBUSY;
2576  break;
2577  }
2578  }
2579  return ret;
2580 }
2581 
2582 static void
2583 bnx2_init_context(struct bnx2 *bp)
2584 {
2585  u32 vcid;
2586 
2587  vcid = 96;
2588  while (vcid) {
2589  u32 vcid_addr, pcid_addr, offset;
2590  int i;
2591 
2592  vcid--;
2593 
2594  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2595  u32 new_vcid;
2596 
2597  vcid_addr = GET_PCID_ADDR(vcid);
2598  if (vcid & 0x8) {
2599  new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2600  }
2601  else {
2602  new_vcid = vcid;
2603  }
2604  pcid_addr = GET_PCID_ADDR(new_vcid);
2605  }
2606  else {
2607  vcid_addr = GET_CID_ADDR(vcid);
2608  pcid_addr = vcid_addr;
2609  }
2610 
2611  for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2612  vcid_addr += (i << PHY_CTX_SHIFT);
2613  pcid_addr += (i << PHY_CTX_SHIFT);
2614 
2615  REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2616  REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2617 
2618  /* Zero out the context. */
2619  for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2620  bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2621  }
2622  }
2623 }
2624 
2625 static int
2626 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2627 {
2628  u16 *good_mbuf;
2629  u32 good_mbuf_cnt;
2630  u32 val;
2631 
2632  good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2633  if (good_mbuf == NULL)
2634  return -ENOMEM;
2635 
2638 
2639  good_mbuf_cnt = 0;
2640 
2641  /* Allocate a bunch of mbufs and save the good ones in an array. */
2642  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2643  while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2644  bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2646 
2647  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2648 
2650 
2651  /* The addresses with Bit 9 set are bad memory blocks. */
2652  if (!(val & (1 << 9))) {
2653  good_mbuf[good_mbuf_cnt] = (u16) val;
2654  good_mbuf_cnt++;
2655  }
2656 
2657  val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2658  }
2659 
2660  /* Free the good ones back to the mbuf pool thus discarding
2661  * all the bad ones. */
2662  while (good_mbuf_cnt) {
2663  good_mbuf_cnt--;
2664 
2665  val = good_mbuf[good_mbuf_cnt];
2666  val = (val << 9) | val | 1;
2667 
2668  bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2669  }
2670  kfree(good_mbuf);
2671  return 0;
2672 }
2673 
2674 static void
2675 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2676 {
2677  u32 val;
2678 
2679  val = (mac_addr[0] << 8) | mac_addr[1];
2680 
2681  REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2682 
2683  val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2684  (mac_addr[4] << 8) | mac_addr[5];
2685 
2686  REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687 }
2688 
2689 static inline int
2690 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691 {
2693  struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2694  struct rx_bd *rxbd =
2695  &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2696  struct page *page = alloc_page(gfp);
2697 
2698  if (!page)
2699  return -ENOMEM;
2700  mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2702  if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2703  __free_page(page);
2704  return -EIO;
2705  }
2706 
2707  rx_pg->page = page;
2708  dma_unmap_addr_set(rx_pg, mapping, mapping);
2709  rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2710  rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2711  return 0;
2712 }
2713 
2714 static void
2715 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2716 {
2717  struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2718  struct page *page = rx_pg->page;
2719 
2720  if (!page)
2721  return;
2722 
2723  dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2725 
2726  __free_page(page);
2727  rx_pg->page = NULL;
2728 }
2729 
2730 static inline int
2731 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2732 {
2733  u8 *data;
2734  struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2736  struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2737 
2738  data = kmalloc(bp->rx_buf_size, gfp);
2739  if (!data)
2740  return -ENOMEM;
2741 
2742  mapping = dma_map_single(&bp->pdev->dev,
2743  get_l2_fhdr(data),
2744  bp->rx_buf_use_size,
2746  if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2747  kfree(data);
2748  return -EIO;
2749  }
2750 
2751  rx_buf->data = data;
2752  dma_unmap_addr_set(rx_buf, mapping, mapping);
2753 
2754  rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2755  rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2756 
2757  rxr->rx_prod_bseq += bp->rx_buf_use_size;
2758 
2759  return 0;
2760 }
2761 
2762 static int
2763 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2764 {
2765  struct status_block *sblk = bnapi->status_blk.msi;
2766  u32 new_link_state, old_link_state;
2767  int is_set = 1;
2768 
2769  new_link_state = sblk->status_attn_bits & event;
2770  old_link_state = sblk->status_attn_bits_ack & event;
2771  if (new_link_state != old_link_state) {
2772  if (new_link_state)
2774  else
2776  } else
2777  is_set = 0;
2778 
2779  return is_set;
2780 }
2781 
2782 static void
2783 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2784 {
2785  spin_lock(&bp->phy_lock);
2786 
2787  if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2788  bnx2_set_link(bp);
2789  if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2790  bnx2_set_remote_link(bp);
2791 
2792  spin_unlock(&bp->phy_lock);
2793 
2794 }
2795 
2796 static inline u16
2797 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2798 {
2799  u16 cons;
2800 
2801  /* Tell compiler that status block fields can change. */
2802  barrier();
2803  cons = *bnapi->hw_tx_cons_ptr;
2804  barrier();
2805  if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2806  cons++;
2807  return cons;
2808 }
2809 
2810 static int
2811 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2812 {
2813  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2814  u16 hw_cons, sw_cons, sw_ring_cons;
2815  int tx_pkt = 0, index;
2816  unsigned int tx_bytes = 0;
2817  struct netdev_queue *txq;
2818 
2819  index = (bnapi - bp->bnx2_napi);
2820  txq = netdev_get_tx_queue(bp->dev, index);
2821 
2822  hw_cons = bnx2_get_hw_tx_cons(bnapi);
2823  sw_cons = txr->tx_cons;
2824 
2825  while (sw_cons != hw_cons) {
2826  struct sw_tx_bd *tx_buf;
2827  struct sk_buff *skb;
2828  int i, last;
2829 
2830  sw_ring_cons = TX_RING_IDX(sw_cons);
2831 
2832  tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833  skb = tx_buf->skb;
2834 
2835  /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2836  prefetch(&skb->end);
2837 
2838  /* partial BD completions possible with TSO packets */
2839  if (tx_buf->is_gso) {
2840  u16 last_idx, last_ring_idx;
2841 
2842  last_idx = sw_cons + tx_buf->nr_frags + 1;
2843  last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2844  if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2845  last_idx++;
2846  }
2847  if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2848  break;
2849  }
2850  }
2851 
2852  dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2853  skb_headlen(skb), PCI_DMA_TODEVICE);
2854 
2855  tx_buf->skb = NULL;
2856  last = tx_buf->nr_frags;
2857 
2858  for (i = 0; i < last; i++) {
2859  sw_cons = NEXT_TX_BD(sw_cons);
2860 
2861  dma_unmap_page(&bp->pdev->dev,
2863  &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2864  mapping),
2865  skb_frag_size(&skb_shinfo(skb)->frags[i]),
2867  }
2868 
2869  sw_cons = NEXT_TX_BD(sw_cons);
2870 
2871  tx_bytes += skb->len;
2872  dev_kfree_skb(skb);
2873  tx_pkt++;
2874  if (tx_pkt == budget)
2875  break;
2876 
2877  if (hw_cons == sw_cons)
2878  hw_cons = bnx2_get_hw_tx_cons(bnapi);
2879  }
2880 
2881  netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2882  txr->hw_tx_cons = hw_cons;
2883  txr->tx_cons = sw_cons;
2884 
2885  /* Need to make the tx_cons update visible to bnx2_start_xmit()
2886  * before checking for netif_tx_queue_stopped(). Without the
2887  * memory barrier, there is a small possibility that bnx2_start_xmit()
2888  * will miss it and cause the queue to be stopped forever.
2889  */
2890  smp_mb();
2891 
2892  if (unlikely(netif_tx_queue_stopped(txq)) &&
2893  (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2894  __netif_tx_lock(txq, smp_processor_id());
2895  if ((netif_tx_queue_stopped(txq)) &&
2896  (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2897  netif_tx_wake_queue(txq);
2898  __netif_tx_unlock(txq);
2899  }
2900 
2901  return tx_pkt;
2902 }
2903 
2904 static void
2905 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906  struct sk_buff *skb, int count)
2907 {
2908  struct sw_pg *cons_rx_pg, *prod_rx_pg;
2909  struct rx_bd *cons_bd, *prod_bd;
2910  int i;
2911  u16 hw_prod, prod;
2912  u16 cons = rxr->rx_pg_cons;
2913 
2914  cons_rx_pg = &rxr->rx_pg_ring[cons];
2915 
2916  /* The caller was unable to allocate a new page to replace the
2917  * last one in the frags array, so we need to recycle that page
2918  * and then free the skb.
2919  */
2920  if (skb) {
2921  struct page *page;
2922  struct skb_shared_info *shinfo;
2923 
2924  shinfo = skb_shinfo(skb);
2925  shinfo->nr_frags--;
2926  page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2927  __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2928 
2929  cons_rx_pg->page = page;
2930  dev_kfree_skb(skb);
2931  }
2932 
2933  hw_prod = rxr->rx_pg_prod;
2934 
2935  for (i = 0; i < count; i++) {
2936  prod = RX_PG_RING_IDX(hw_prod);
2937 
2938  prod_rx_pg = &rxr->rx_pg_ring[prod];
2939  cons_rx_pg = &rxr->rx_pg_ring[cons];
2940  cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2941  prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2942 
2943  if (prod != cons) {
2944  prod_rx_pg->page = cons_rx_pg->page;
2945  cons_rx_pg->page = NULL;
2946  dma_unmap_addr_set(prod_rx_pg, mapping,
2947  dma_unmap_addr(cons_rx_pg, mapping));
2948 
2949  prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2950  prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 
2952  }
2953  cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2954  hw_prod = NEXT_RX_BD(hw_prod);
2955  }
2956  rxr->rx_pg_prod = hw_prod;
2957  rxr->rx_pg_cons = cons;
2958 }
2959 
2960 static inline void
2961 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2962  u8 *data, u16 cons, u16 prod)
2963 {
2964  struct sw_bd *cons_rx_buf, *prod_rx_buf;
2965  struct rx_bd *cons_bd, *prod_bd;
2966 
2967  cons_rx_buf = &rxr->rx_buf_ring[cons];
2968  prod_rx_buf = &rxr->rx_buf_ring[prod];
2969 
2970  dma_sync_single_for_device(&bp->pdev->dev,
2971  dma_unmap_addr(cons_rx_buf, mapping),
2973 
2974  rxr->rx_prod_bseq += bp->rx_buf_use_size;
2975 
2976  prod_rx_buf->data = data;
2977 
2978  if (cons == prod)
2979  return;
2980 
2981  dma_unmap_addr_set(prod_rx_buf, mapping,
2982  dma_unmap_addr(cons_rx_buf, mapping));
2983 
2984  cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2985  prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2986  prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2987  prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2988 }
2989 
2990 static struct sk_buff *
2991 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2992  unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2993  u32 ring_idx)
2994 {
2995  int err;
2996  u16 prod = ring_idx & 0xffff;
2997  struct sk_buff *skb;
2998 
2999  err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3000  if (unlikely(err)) {
3001  bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3002 error:
3003  if (hdr_len) {
3004  unsigned int raw_len = len + 4;
3005  int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3006 
3007  bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3008  }
3009  return NULL;
3010  }
3011 
3012  dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3014  skb = build_skb(data, 0);
3015  if (!skb) {
3016  kfree(data);
3017  goto error;
3018  }
3019  skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3020  if (hdr_len == 0) {
3021  skb_put(skb, len);
3022  return skb;
3023  } else {
3024  unsigned int i, frag_len, frag_size, pages;
3025  struct sw_pg *rx_pg;
3026  u16 pg_cons = rxr->rx_pg_cons;
3027  u16 pg_prod = rxr->rx_pg_prod;
3028 
3029  frag_size = len + 4 - hdr_len;
3030  pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3031  skb_put(skb, hdr_len);
3032 
3033  for (i = 0; i < pages; i++) {
3034  dma_addr_t mapping_old;
3035 
3036  frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3037  if (unlikely(frag_len <= 4)) {
3038  unsigned int tail = 4 - frag_len;
3039 
3040  rxr->rx_pg_cons = pg_cons;
3041  rxr->rx_pg_prod = pg_prod;
3042  bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3043  pages - i);
3044  skb->len -= tail;
3045  if (i == 0) {
3046  skb->tail -= tail;
3047  } else {
3048  skb_frag_t *frag =
3049  &skb_shinfo(skb)->frags[i - 1];
3050  skb_frag_size_sub(frag, tail);
3051  skb->data_len -= tail;
3052  }
3053  return skb;
3054  }
3055  rx_pg = &rxr->rx_pg_ring[pg_cons];
3056 
3057  /* Don't unmap yet. If we're unable to allocate a new
3058  * page, we need to recycle the page and the DMA addr.
3059  */
3060  mapping_old = dma_unmap_addr(rx_pg, mapping);
3061  if (i == pages - 1)
3062  frag_len -= 4;
3063 
3064  skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3065  rx_pg->page = NULL;
3066 
3067  err = bnx2_alloc_rx_page(bp, rxr,
3068  RX_PG_RING_IDX(pg_prod),
3069  GFP_ATOMIC);
3070  if (unlikely(err)) {
3071  rxr->rx_pg_cons = pg_cons;
3072  rxr->rx_pg_prod = pg_prod;
3073  bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3074  pages - i);
3075  return NULL;
3076  }
3077 
3078  dma_unmap_page(&bp->pdev->dev, mapping_old,
3080 
3081  frag_size -= frag_len;
3082  skb->data_len += frag_len;
3083  skb->truesize += PAGE_SIZE;
3084  skb->len += frag_len;
3085 
3086  pg_prod = NEXT_RX_BD(pg_prod);
3087  pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3088  }
3089  rxr->rx_pg_prod = pg_prod;
3090  rxr->rx_pg_cons = pg_cons;
3091  }
3092  return skb;
3093 }
3094 
3095 static inline u16
3096 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3097 {
3098  u16 cons;
3099 
3100  /* Tell compiler that status block fields can change. */
3101  barrier();
3102  cons = *bnapi->hw_rx_cons_ptr;
3103  barrier();
3104  if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3105  cons++;
3106  return cons;
3107 }
3108 
3109 static int
3110 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3111 {
3112  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3113  u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3114  struct l2_fhdr *rx_hdr;
3115  int rx_pkt = 0, pg_ring_used = 0;
3116 
3117  hw_cons = bnx2_get_hw_rx_cons(bnapi);
3118  sw_cons = rxr->rx_cons;
3119  sw_prod = rxr->rx_prod;
3120 
3121  /* Memory barrier necessary as speculative reads of the rx
3122  * buffer can be ahead of the index in the status block
3123  */
3124  rmb();
3125  while (sw_cons != hw_cons) {
3126  unsigned int len, hdr_len;
3127  u32 status;
3128  struct sw_bd *rx_buf, *next_rx_buf;
3129  struct sk_buff *skb;
3131  u8 *data;
3132 
3133  sw_ring_cons = RX_RING_IDX(sw_cons);
3134  sw_ring_prod = RX_RING_IDX(sw_prod);
3135 
3136  rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3137  data = rx_buf->data;
3138  rx_buf->data = NULL;
3139 
3140  rx_hdr = get_l2_fhdr(data);
3141  prefetch(rx_hdr);
3142 
3143  dma_addr = dma_unmap_addr(rx_buf, mapping);
3144 
3145  dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3148 
3149  next_rx_buf =
3150  &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3151  prefetch(get_l2_fhdr(next_rx_buf->data));
3152 
3153  len = rx_hdr->l2_fhdr_pkt_len;
3154  status = rx_hdr->l2_fhdr_status;
3155 
3156  hdr_len = 0;
3157  if (status & L2_FHDR_STATUS_SPLIT) {
3158  hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3159  pg_ring_used = 1;
3160  } else if (len > bp->rx_jumbo_thresh) {
3161  hdr_len = bp->rx_jumbo_thresh;
3162  pg_ring_used = 1;
3163  }
3164 
3165  if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3170 
3171  bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3172  sw_ring_prod);
3173  if (pg_ring_used) {
3174  int pages;
3175 
3176  pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3177 
3178  bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3179  }
3180  goto next_rx;
3181  }
3182 
3183  len -= 4;
3184 
3185  if (len <= bp->rx_copy_thresh) {
3186  skb = netdev_alloc_skb(bp->dev, len + 6);
3187  if (skb == NULL) {
3188  bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3189  sw_ring_prod);
3190  goto next_rx;
3191  }
3192 
3193  /* aligned copy */
3194  memcpy(skb->data,
3195  (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3196  len + 6);
3197  skb_reserve(skb, 6);
3198  skb_put(skb, len);
3199 
3200  bnx2_reuse_rx_data(bp, rxr, data,
3201  sw_ring_cons, sw_ring_prod);
3202 
3203  } else {
3204  skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3205  (sw_ring_cons << 16) | sw_ring_prod);
3206  if (!skb)
3207  goto next_rx;
3208  }
3209  if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3211  __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3212 
3213  skb->protocol = eth_type_trans(skb, bp->dev);
3214 
3215  if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3216  (ntohs(skb->protocol) != 0x8100)) {
3217 
3218  dev_kfree_skb(skb);
3219  goto next_rx;
3220 
3221  }
3222 
3223  skb_checksum_none_assert(skb);
3224  if ((bp->dev->features & NETIF_F_RXCSUM) &&
3225  (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3227 
3228  if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3229  L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3231  }
3232  if ((bp->dev->features & NETIF_F_RXHASH) &&
3233  ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3234  L2_FHDR_STATUS_USE_RXHASH))
3235  skb->rxhash = rx_hdr->l2_fhdr_hash;
3236 
3237  skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3238  napi_gro_receive(&bnapi->napi, skb);
3239  rx_pkt++;
3240 
3241 next_rx:
3242  sw_cons = NEXT_RX_BD(sw_cons);
3243  sw_prod = NEXT_RX_BD(sw_prod);
3244 
3245  if ((rx_pkt == budget))
3246  break;
3247 
3248  /* Refresh hw_cons to see if there is new work */
3249  if (sw_cons == hw_cons) {
3250  hw_cons = bnx2_get_hw_rx_cons(bnapi);
3251  rmb();
3252  }
3253  }
3254  rxr->rx_cons = sw_cons;
3255  rxr->rx_prod = sw_prod;
3256 
3257  if (pg_ring_used)
3258  REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3259 
3260  REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3261 
3262  REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3263 
3264  mmiowb();
3265 
3266  return rx_pkt;
3267 
3268 }
3269 
3270 /* MSI ISR - The only difference between this and the INTx ISR
3271  * is that the MSI interrupt is always serviced.
3272  */
3273 static irqreturn_t
3274 bnx2_msi(int irq, void *dev_instance)
3275 {
3276  struct bnx2_napi *bnapi = dev_instance;
3277  struct bnx2 *bp = bnapi->bp;
3278 
3279  prefetch(bnapi->status_blk.msi);
3283 
3284  /* Return here if interrupt is disabled. */
3285  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3286  return IRQ_HANDLED;
3287 
3288  napi_schedule(&bnapi->napi);
3289 
3290  return IRQ_HANDLED;
3291 }
3292 
3293 static irqreturn_t
3294 bnx2_msi_1shot(int irq, void *dev_instance)
3295 {
3296  struct bnx2_napi *bnapi = dev_instance;
3297  struct bnx2 *bp = bnapi->bp;
3298 
3299  prefetch(bnapi->status_blk.msi);
3300 
3301  /* Return here if interrupt is disabled. */
3302  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3303  return IRQ_HANDLED;
3304 
3305  napi_schedule(&bnapi->napi);
3306 
3307  return IRQ_HANDLED;
3308 }
3309 
3310 static irqreturn_t
3311 bnx2_interrupt(int irq, void *dev_instance)
3312 {
3313  struct bnx2_napi *bnapi = dev_instance;
3314  struct bnx2 *bp = bnapi->bp;
3315  struct status_block *sblk = bnapi->status_blk.msi;
3316 
3317  /* When using INTx, it is possible for the interrupt to arrive
3318  * at the CPU before the status block posted prior to the
3319  * interrupt. Reading a register will flush the status block.
3320  * When using MSI, the MSI message will always complete after
3321  * the status block write.
3322  */
3323  if ((sblk->status_idx == bnapi->last_status_idx) &&
3326  return IRQ_NONE;
3327 
3331 
3332  /* Read back to deassert IRQ immediately to avoid too many
3333  * spurious interrupts.
3334  */
3336 
3337  /* Return here if interrupt is shared and is disabled. */
3338  if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339  return IRQ_HANDLED;
3340 
3341  if (napi_schedule_prep(&bnapi->napi)) {
3342  bnapi->last_status_idx = sblk->status_idx;
3343  __napi_schedule(&bnapi->napi);
3344  }
3345 
3346  return IRQ_HANDLED;
3347 }
3348 
3349 static inline int
3350 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3351 {
3352  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3353  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3354 
3355  if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3356  (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3357  return 1;
3358  return 0;
3359 }
3360 
3361 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3362  STATUS_ATTN_BITS_TIMER_ABORT)
3363 
3364 static inline int
3365 bnx2_has_work(struct bnx2_napi *bnapi)
3366 {
3367  struct status_block *sblk = bnapi->status_blk.msi;
3368 
3369  if (bnx2_has_fast_work(bnapi))
3370  return 1;
3371 
3372 #ifdef BCM_CNIC
3373  if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3374  return 1;
3375 #endif
3376 
3377  if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3379  return 1;
3380 
3381  return 0;
3382 }
3383 
3384 static void
3385 bnx2_chk_missed_msi(struct bnx2 *bp)
3386 {
3387  struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3388  u32 msi_ctrl;
3389 
3390  if (bnx2_has_work(bnapi)) {
3391  msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3392  if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3393  return;
3394 
3395  if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3396  REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3397  ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3398  REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3399  bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3400  }
3401  }
3402 
3403  bp->idle_chk_status_idx = bnapi->last_status_idx;
3404 }
3405 
3406 #ifdef BCM_CNIC
3407 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3408 {
3409  struct cnic_ops *c_ops;
3410 
3411  if (!bnapi->cnic_present)
3412  return;
3413 
3414  rcu_read_lock();
3415  c_ops = rcu_dereference(bp->cnic_ops);
3416  if (c_ops)
3417  bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3418  bnapi->status_blk.msi);
3419  rcu_read_unlock();
3420 }
3421 #endif
3422 
3423 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3424 {
3425  struct status_block *sblk = bnapi->status_blk.msi;
3428 
3429  if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3430  (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3431 
3432  bnx2_phy_int(bp, bnapi);
3433 
3434  /* This is needed to take care of transient status
3435  * during link changes.
3436  */
3437  REG_WR(bp, BNX2_HC_COMMAND,
3439  REG_RD(bp, BNX2_HC_COMMAND);
3440  }
3441 }
3442 
3443 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3444  int work_done, int budget)
3445 {
3446  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3447  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3448 
3449  if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3450  bnx2_tx_int(bp, bnapi, 0);
3451 
3452  if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3453  work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3454 
3455  return work_done;
3456 }
3457 
3458 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3459 {
3460  struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3461  struct bnx2 *bp = bnapi->bp;
3462  int work_done = 0;
3463  struct status_block_msix *sblk = bnapi->status_blk.msix;
3464 
3465  while (1) {
3466  work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3467  if (unlikely(work_done >= budget))
3468  break;
3469 
3470  bnapi->last_status_idx = sblk->status_idx;
3471  /* status idx must be read before checking for more work. */
3472  rmb();
3473  if (likely(!bnx2_has_fast_work(bnapi))) {
3474 
3475  napi_complete(napi);
3476  REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3478  bnapi->last_status_idx);
3479  break;
3480  }
3481  }
3482  return work_done;
3483 }
3484 
3485 static int bnx2_poll(struct napi_struct *napi, int budget)
3486 {
3487  struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3488  struct bnx2 *bp = bnapi->bp;
3489  int work_done = 0;
3490  struct status_block *sblk = bnapi->status_blk.msi;
3491 
3492  while (1) {
3493  bnx2_poll_link(bp, bnapi);
3494 
3495  work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3496 
3497 #ifdef BCM_CNIC
3498  bnx2_poll_cnic(bp, bnapi);
3499 #endif
3500 
3501  /* bnapi->last_status_idx is used below to tell the hw how
3502  * much work has been processed, so we must read it before
3503  * checking for more work.
3504  */
3505  bnapi->last_status_idx = sblk->status_idx;
3506 
3507  if (unlikely(work_done >= budget))
3508  break;
3509 
3510  rmb();
3511  if (likely(!bnx2_has_work(bnapi))) {
3512  napi_complete(napi);
3516  bnapi->last_status_idx);
3517  break;
3518  }
3522  bnapi->last_status_idx);
3523 
3526  bnapi->last_status_idx);
3527  break;
3528  }
3529  }
3530 
3531  return work_done;
3532 }
3533 
3534 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3535  * from set_multicast.
3536  */
3537 static void
3538 bnx2_set_rx_mode(struct net_device *dev)
3539 {
3540  struct bnx2 *bp = netdev_priv(dev);
3541  u32 rx_mode, sort_mode;
3542  struct netdev_hw_addr *ha;
3543  int i;
3544 
3545  if (!netif_running(dev))
3546  return;
3547 
3548  spin_lock_bh(&bp->phy_lock);
3549 
3550  rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3552  sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3553  if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556  if (dev->flags & IFF_PROMISC) {
3557  /* Promiscuous mode. */
3558  rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559  sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3561  }
3562  else if (dev->flags & IFF_ALLMULTI) {
3563  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565  0xffffffff);
3566  }
3567  sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3568  }
3569  else {
3570  /* Accept one or more multicast(s). */
3571  u32 mc_filter[NUM_MC_HASH_REGISTERS];
3572  u32 regidx;
3573  u32 bit;
3574  u32 crc;
3575 
3576  memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3577 
3578  netdev_for_each_mc_addr(ha, dev) {
3579  crc = ether_crc_le(ETH_ALEN, ha->addr);
3580  bit = crc & 0xff;
3581  regidx = (bit & 0xe0) >> 5;
3582  bit &= 0x1f;
3583  mc_filter[regidx] |= (1 << bit);
3584  }
3585 
3586  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3588  mc_filter[i]);
3589  }
3590 
3591  sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3592  }
3593 
3595  rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3596  sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3598  } else if (!(dev->flags & IFF_PROMISC)) {
3599  /* Add all entries into to the match filter list */
3600  i = 0;
3601  netdev_for_each_uc_addr(ha, dev) {
3602  bnx2_set_mac_addr(bp, ha->addr,
3604  sort_mode |= (1 <<
3606  i++;
3607  }
3608 
3609  }
3610 
3611  if (rx_mode != bp->rx_mode) {
3612  bp->rx_mode = rx_mode;
3613  REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3614  }
3615 
3616  REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617  REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3619 
3620  spin_unlock_bh(&bp->phy_lock);
3621 }
3622 
3623 static int
3624 check_fw_section(const struct firmware *fw,
3625  const struct bnx2_fw_file_section *section,
3626  u32 alignment, bool non_empty)
3627 {
3628  u32 offset = be32_to_cpu(section->offset);
3629  u32 len = be32_to_cpu(section->len);
3630 
3631  if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3632  return -EINVAL;
3633  if ((non_empty && len == 0) || len > fw->size - offset ||
3634  len & (alignment - 1))
3635  return -EINVAL;
3636  return 0;
3637 }
3638 
3639 static int
3640 check_mips_fw_entry(const struct firmware *fw,
3641  const struct bnx2_mips_fw_file_entry *entry)
3642 {
3643  if (check_fw_section(fw, &entry->text, 4, true) ||
3644  check_fw_section(fw, &entry->data, 4, false) ||
3645  check_fw_section(fw, &entry->rodata, 4, false))
3646  return -EINVAL;
3647  return 0;
3648 }
3649 
3650 static void bnx2_release_firmware(struct bnx2 *bp)
3651 {
3652  if (bp->rv2p_firmware) {
3655  bp->rv2p_firmware = NULL;
3656  }
3657 }
3658 
3659 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3660 {
3661  const char *mips_fw_file, *rv2p_fw_file;
3662  const struct bnx2_mips_fw_file *mips_fw;
3663  const struct bnx2_rv2p_fw_file *rv2p_fw;
3664  int rc;
3665 
3666  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3667  mips_fw_file = FW_MIPS_FILE_09;
3668  if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3669  (CHIP_ID(bp) == CHIP_ID_5709_A1))
3670  rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3671  else
3672  rv2p_fw_file = FW_RV2P_FILE_09;
3673  } else {
3674  mips_fw_file = FW_MIPS_FILE_06;
3675  rv2p_fw_file = FW_RV2P_FILE_06;
3676  }
3677 
3678  rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3679  if (rc) {
3680  pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3681  goto out;
3682  }
3683 
3684  rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3685  if (rc) {
3686  pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3687  goto err_release_mips_firmware;
3688  }
3689  mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3690  rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3691  if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3692  check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3693  check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3694  check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3695  check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3696  check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3697  pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3698  rc = -EINVAL;
3699  goto err_release_firmware;
3700  }
3701  if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3702  check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3703  check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3704  pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3705  rc = -EINVAL;
3706  goto err_release_firmware;
3707  }
3708 out:
3709  return rc;
3710 
3711 err_release_firmware:
3713  bp->rv2p_firmware = NULL;
3714 err_release_mips_firmware:
3716  goto out;
3717 }
3718 
3719 static int bnx2_request_firmware(struct bnx2 *bp)
3720 {
3721  return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3722 }
3723 
3724 static u32
3725 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3726 {
3727  switch (idx) {
3729  rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3730  rv2p_code |= RV2P_BD_PAGE_SIZE;
3731  break;
3732  }
3733  return rv2p_code;
3734 }
3735 
3736 static int
3737 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3738  const struct bnx2_rv2p_fw_file_entry *fw_entry)
3739 {
3740  u32 rv2p_code_len, file_offset;
3741  __be32 *rv2p_code;
3742  int i;
3743  u32 val, cmd, addr;
3744 
3745  rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3746  file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3747 
3748  rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749 
3750  if (rv2p_proc == RV2P_PROC1) {
3752  addr = BNX2_RV2P_PROC1_ADDR_CMD;
3753  } else {
3755  addr = BNX2_RV2P_PROC2_ADDR_CMD;
3756  }
3757 
3758  for (i = 0; i < rv2p_code_len; i += 8) {
3759  REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3760  rv2p_code++;
3761  REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3762  rv2p_code++;
3763 
3764  val = (i / 8) | cmd;
3765  REG_WR(bp, addr, val);
3766  }
3767 
3768  rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3769  for (i = 0; i < 8; i++) {
3770  u32 loc, code;
3771 
3772  loc = be32_to_cpu(fw_entry->fixup[i]);
3773  if (loc && ((loc * 4) < rv2p_code_len)) {
3774  code = be32_to_cpu(*(rv2p_code + loc - 1));
3775  REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3776  code = be32_to_cpu(*(rv2p_code + loc));
3777  code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3778  REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3779 
3780  val = (loc / 2) | cmd;
3781  REG_WR(bp, addr, val);
3782  }
3783  }
3784 
3785  /* Reset the processor, un-stall is done later. */
3786  if (rv2p_proc == RV2P_PROC1) {
3788  }
3789  else {
3791  }
3792 
3793  return 0;
3794 }
3795 
3796 static int
3797 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3798  const struct bnx2_mips_fw_file_entry *fw_entry)
3799 {
3800  u32 addr, len, file_offset;
3801  __be32 *data;
3802  u32 offset;
3803  u32 val;
3804 
3805  /* Halt the CPU. */
3806  val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3807  val |= cpu_reg->mode_value_halt;
3808  bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3809  bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3810 
3811  /* Load the Text area. */
3812  addr = be32_to_cpu(fw_entry->text.addr);
3813  len = be32_to_cpu(fw_entry->text.len);
3814  file_offset = be32_to_cpu(fw_entry->text.offset);
3815  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3816 
3817  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3818  if (len) {
3819  int j;
3820 
3821  for (j = 0; j < (len / 4); j++, offset += 4)
3822  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3823  }
3824 
3825  /* Load the Data area. */
3826  addr = be32_to_cpu(fw_entry->data.addr);
3827  len = be32_to_cpu(fw_entry->data.len);
3828  file_offset = be32_to_cpu(fw_entry->data.offset);
3829  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3830 
3831  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3832  if (len) {
3833  int j;
3834 
3835  for (j = 0; j < (len / 4); j++, offset += 4)
3836  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3837  }
3838 
3839  /* Load the Read-Only area. */
3840  addr = be32_to_cpu(fw_entry->rodata.addr);
3841  len = be32_to_cpu(fw_entry->rodata.len);
3842  file_offset = be32_to_cpu(fw_entry->rodata.offset);
3843  data = (__be32 *)(bp->mips_firmware->data + file_offset);
3844 
3845  offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3846  if (len) {
3847  int j;
3848 
3849  for (j = 0; j < (len / 4); j++, offset += 4)
3850  bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3851  }
3852 
3853  /* Clear the pre-fetch instruction. */
3854  bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3855 
3856  val = be32_to_cpu(fw_entry->start_addr);
3857  bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3858 
3859  /* Start the CPU. */
3860  val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3861  val &= ~cpu_reg->mode_value_halt;
3862  bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3863  bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3864 
3865  return 0;
3866 }
3867 
3868 static int
3869 bnx2_init_cpus(struct bnx2 *bp)
3870 {
3871  const struct bnx2_mips_fw_file *mips_fw =
3872  (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3873  const struct bnx2_rv2p_fw_file *rv2p_fw =
3874  (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3875  int rc;
3876 
3877  /* Initialize the RV2P processor. */
3878  load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3879  load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3880 
3881  /* Initialize the RX Processor. */
3882  rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3883  if (rc)
3884  goto init_cpu_err;
3885 
3886  /* Initialize the TX Processor. */
3887  rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3888  if (rc)
3889  goto init_cpu_err;
3890 
3891  /* Initialize the TX Patch-up Processor. */
3892  rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3893  if (rc)
3894  goto init_cpu_err;
3895 
3896  /* Initialize the Completion Processor. */
3897  rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3898  if (rc)
3899  goto init_cpu_err;
3900 
3901  /* Initialize the Command Processor. */
3902  rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3903 
3904 init_cpu_err:
3905  return rc;
3906 }
3907 
3908 static int
3909 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3910 {
3911  u16 pmcsr;
3912 
3913  pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3914 
3915  switch (state) {
3916  case PCI_D0: {
3917  u32 val;
3918 
3919  pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3920  (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3922 
3923  if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3924  /* delay required during transition out of D3hot */
3925  msleep(20);
3926 
3927  val = REG_RD(bp, BNX2_EMAC_MODE);
3929  val &= ~BNX2_EMAC_MODE_MPKT;
3930  REG_WR(bp, BNX2_EMAC_MODE, val);
3931 
3932  val = REG_RD(bp, BNX2_RPM_CONFIG);
3933  val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3934  REG_WR(bp, BNX2_RPM_CONFIG, val);
3935  break;
3936  }
3937  case PCI_D3hot: {
3938  int i;
3939  u32 val, wol_msg;
3940 
3941  if (bp->wol) {
3942  u32 advertising;
3943  u8 autoneg;
3944 
3945  autoneg = bp->autoneg;
3946  advertising = bp->advertising;
3947 
3948  if (bp->phy_port == PORT_TP) {
3949  bp->autoneg = AUTONEG_SPEED;
3955  }
3956 
3957  spin_lock_bh(&bp->phy_lock);
3958  bnx2_setup_phy(bp, bp->phy_port);
3959  spin_unlock_bh(&bp->phy_lock);
3960 
3961  bp->autoneg = autoneg;
3962  bp->advertising = advertising;
3963 
3964  bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3965 
3966  val = REG_RD(bp, BNX2_EMAC_MODE);
3967 
3968  /* Enable port mode. */
3969  val &= ~BNX2_EMAC_MODE_PORT;
3970  val |= BNX2_EMAC_MODE_MPKT_RCVD |
3973  if (bp->phy_port == PORT_TP)
3974  val |= BNX2_EMAC_MODE_PORT_MII;
3975  else {
3976  val |= BNX2_EMAC_MODE_PORT_GMII;
3977  if (bp->line_speed == SPEED_2500)
3978  val |= BNX2_EMAC_MODE_25G_MODE;
3979  }
3980 
3981  REG_WR(bp, BNX2_EMAC_MODE, val);
3982 
3983  /* receive all multicast */
3984  for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3985  REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3986  0xffffffff);
3987  }
3990 
3991  val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3993  REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3994  REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3995  REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3997 
3998  /* Need to enable EMAC and RPM for WOL. */
4003 
4004  val = REG_RD(bp, BNX2_RPM_CONFIG);
4005  val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4006  REG_WR(bp, BNX2_RPM_CONFIG, val);
4007 
4009  }
4010  else {
4012  }
4013 
4014  if (!(bp->flags & BNX2_FLAG_NO_WOL))
4015  bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4016  1, 0);
4017 
4018  pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4019  if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4020  (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4021 
4022  if (bp->wol)
4023  pmcsr |= 3;
4024  }
4025  else {
4026  pmcsr |= 3;
4027  }
4028  if (bp->wol) {
4029  pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4030  }
4031  pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4032  pmcsr);
4033 
4034  /* No more memory access after this point until
4035  * device is brought back to D0.
4036  */
4037  udelay(50);
4038  break;
4039  }
4040  default:
4041  return -EINVAL;
4042  }
4043  return 0;
4044 }
4045 
4046 static int
4047 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4048 {
4049  u32 val;
4050  int j;
4051 
4052  /* Request access to the flash interface. */
4054  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4055  val = REG_RD(bp, BNX2_NVM_SW_ARB);
4056  if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4057  break;
4058 
4059  udelay(5);
4060  }
4061 
4062  if (j >= NVRAM_TIMEOUT_COUNT)
4063  return -EBUSY;
4064 
4065  return 0;
4066 }
4067 
4068 static int
4069 bnx2_release_nvram_lock(struct bnx2 *bp)
4070 {
4071  int j;
4072  u32 val;
4073 
4074  /* Relinquish nvram interface. */
4076 
4077  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4078  val = REG_RD(bp, BNX2_NVM_SW_ARB);
4079  if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4080  break;
4081 
4082  udelay(5);
4083  }
4084 
4085  if (j >= NVRAM_TIMEOUT_COUNT)
4086  return -EBUSY;
4087 
4088  return 0;
4089 }
4090 
4091 
4092 static int
4093 bnx2_enable_nvram_write(struct bnx2 *bp)
4094 {
4095  u32 val;
4096 
4097  val = REG_RD(bp, BNX2_MISC_CFG);
4099 
4100  if (bp->flash_info->flags & BNX2_NV_WREN) {
4101  int j;
4102 
4106 
4107  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108  udelay(5);
4109 
4110  val = REG_RD(bp, BNX2_NVM_COMMAND);
4111  if (val & BNX2_NVM_COMMAND_DONE)
4112  break;
4113  }
4114 
4115  if (j >= NVRAM_TIMEOUT_COUNT)
4116  return -EBUSY;
4117  }
4118  return 0;
4119 }
4120 
4121 static void
4122 bnx2_disable_nvram_write(struct bnx2 *bp)
4123 {
4124  u32 val;
4125 
4126  val = REG_RD(bp, BNX2_MISC_CFG);
4128 }
4129 
4130 
4131 static void
4132 bnx2_enable_nvram_access(struct bnx2 *bp)
4133 {
4134  u32 val;
4135 
4136  val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4137  /* Enable both bits, even on read. */
4140 }
4141 
4142 static void
4143 bnx2_disable_nvram_access(struct bnx2 *bp)
4144 {
4145  u32 val;
4146 
4147  val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4148  /* Disable both bits, even after read. */
4150  val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4152 }
4153 
4154 static int
4155 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4156 {
4157  u32 cmd;
4158  int j;
4159 
4160  if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4161  /* Buffered flash, no erase needed */
4162  return 0;
4163 
4164  /* Build an erase command */
4167 
4168  /* Need to clear DONE bit separately. */
4170 
4171  /* Address of the NVRAM to read from. */
4173 
4174  /* Issue an erase command. */
4175  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4176 
4177  /* Wait for completion. */
4178  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4179  u32 val;
4180 
4181  udelay(5);
4182 
4183  val = REG_RD(bp, BNX2_NVM_COMMAND);
4184  if (val & BNX2_NVM_COMMAND_DONE)
4185  break;
4186  }
4187 
4188  if (j >= NVRAM_TIMEOUT_COUNT)
4189  return -EBUSY;
4190 
4191  return 0;
4192 }
4193 
4194 static int
4195 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4196 {
4197  u32 cmd;
4198  int j;
4199 
4200  /* Build the command word. */
4201  cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4202 
4203  /* Calculate an offset of a buffered flash, not needed for 5709. */
4204  if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4205  offset = ((offset / bp->flash_info->page_size) <<
4206  bp->flash_info->page_bits) +
4207  (offset % bp->flash_info->page_size);
4208  }
4209 
4210  /* Need to clear DONE bit separately. */
4212 
4213  /* Address of the NVRAM to read from. */
4215 
4216  /* Issue a read command. */
4217  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4218 
4219  /* Wait for completion. */
4220  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4221  u32 val;
4222 
4223  udelay(5);
4224 
4225  val = REG_RD(bp, BNX2_NVM_COMMAND);
4226  if (val & BNX2_NVM_COMMAND_DONE) {
4228  memcpy(ret_val, &v, 4);
4229  break;
4230  }
4231  }
4232  if (j >= NVRAM_TIMEOUT_COUNT)
4233  return -EBUSY;
4234 
4235  return 0;
4236 }
4237 
4238 
4239 static int
4240 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4241 {
4242  u32 cmd;
4243  __be32 val32;
4244  int j;
4245 
4246  /* Build the command word. */
4247  cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4248 
4249  /* Calculate an offset of a buffered flash, not needed for 5709. */
4250  if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4251  offset = ((offset / bp->flash_info->page_size) <<
4252  bp->flash_info->page_bits) +
4253  (offset % bp->flash_info->page_size);
4254  }
4255 
4256  /* Need to clear DONE bit separately. */
4257  REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4258 
4259  memcpy(&val32, val, 4);
4260 
4261  /* Write the data. */
4262  REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4263 
4264  /* Address of the NVRAM to write to. */
4266 
4267  /* Issue the write command. */
4268  REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4269 
4270  /* Wait for completion. */
4271  for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4272  udelay(5);
4273 
4274  if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4275  break;
4276  }
4277  if (j >= NVRAM_TIMEOUT_COUNT)
4278  return -EBUSY;
4279 
4280  return 0;
4281 }
4282 
4283 static int
4284 bnx2_init_nvram(struct bnx2 *bp)
4285 {
4286  u32 val;
4287  int j, entry_count, rc = 0;
4288  const struct flash_spec *flash;
4289 
4290  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4291  bp->flash_info = &flash_5709;
4292  goto get_flash_size;
4293  }
4294 
4295  /* Determine the selected interface. */
4296  val = REG_RD(bp, BNX2_NVM_CFG1);
4297 
4298  entry_count = ARRAY_SIZE(flash_table);
4299 
4300  if (val & 0x40000000) {
4301 
4302  /* Flash interface has been reconfigured */
4303  for (j = 0, flash = &flash_table[0]; j < entry_count;
4304  j++, flash++) {
4305  if ((val & FLASH_BACKUP_STRAP_MASK) ==
4306  (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4307  bp->flash_info = flash;
4308  break;
4309  }
4310  }
4311  }
4312  else {
4313  u32 mask;
4314  /* Not yet been reconfigured */
4315 
4316  if (val & (1 << 23))
4317  mask = FLASH_BACKUP_STRAP_MASK;
4318  else
4319  mask = FLASH_STRAP_MASK;
4320 
4321  for (j = 0, flash = &flash_table[0]; j < entry_count;
4322  j++, flash++) {
4323 
4324  if ((val & mask) == (flash->strapping & mask)) {
4325  bp->flash_info = flash;
4326 
4327  /* Request access to the flash interface. */
4328  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4329  return rc;
4330 
4331  /* Enable access to flash interface */
4332  bnx2_enable_nvram_access(bp);
4333 
4334  /* Reconfigure the flash interface */
4335  REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4336  REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4337  REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4338  REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4339 
4340  /* Disable access to flash interface */
4341  bnx2_disable_nvram_access(bp);
4342  bnx2_release_nvram_lock(bp);
4343 
4344  break;
4345  }
4346  }
4347  } /* if (val & 0x40000000) */
4348 
4349  if (j == entry_count) {
4350  bp->flash_info = NULL;
4351  pr_alert("Unknown flash/EEPROM type\n");
4352  return -ENODEV;
4353  }
4354 
4355 get_flash_size:
4356  val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4358  if (val)
4359  bp->flash_size = val;
4360  else
4361  bp->flash_size = bp->flash_info->total_size;
4362 
4363  return rc;
4364 }
4365 
4366 static int
4367 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4368  int buf_size)
4369 {
4370  int rc = 0;
4371  u32 cmd_flags, offset32, len32, extra;
4372 
4373  if (buf_size == 0)
4374  return 0;
4375 
4376  /* Request access to the flash interface. */
4377  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378  return rc;
4379 
4380  /* Enable access to flash interface */
4381  bnx2_enable_nvram_access(bp);
4382 
4383  len32 = buf_size;
4384  offset32 = offset;
4385  extra = 0;
4386 
4387  cmd_flags = 0;
4388 
4389  if (offset32 & 3) {
4390  u8 buf[4];
4391  u32 pre_len;
4392 
4393  offset32 &= ~3;
4394  pre_len = 4 - (offset & 3);
4395 
4396  if (pre_len >= len32) {
4397  pre_len = len32;
4398  cmd_flags = BNX2_NVM_COMMAND_FIRST |
4400  }
4401  else {
4402  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4403  }
4404 
4405  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4406 
4407  if (rc)
4408  return rc;
4409 
4410  memcpy(ret_buf, buf + (offset & 3), pre_len);
4411 
4412  offset32 += 4;
4413  ret_buf += pre_len;
4414  len32 -= pre_len;
4415  }
4416  if (len32 & 3) {
4417  extra = 4 - (len32 & 3);
4418  len32 = (len32 + 4) & ~3;
4419  }
4420 
4421  if (len32 == 4) {
4422  u8 buf[4];
4423 
4424  if (cmd_flags)
4425  cmd_flags = BNX2_NVM_COMMAND_LAST;
4426  else
4427  cmd_flags = BNX2_NVM_COMMAND_FIRST |
4429 
4430  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4431 
4432  memcpy(ret_buf, buf, 4 - extra);
4433  }
4434  else if (len32 > 0) {
4435  u8 buf[4];
4436 
4437  /* Read the first word. */
4438  if (cmd_flags)
4439  cmd_flags = 0;
4440  else
4441  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4442 
4443  rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4444 
4445  /* Advance to the next dword. */
4446  offset32 += 4;
4447  ret_buf += 4;
4448  len32 -= 4;
4449 
4450  while (len32 > 4 && rc == 0) {
4451  rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4452 
4453  /* Advance to the next dword. */
4454  offset32 += 4;
4455  ret_buf += 4;
4456  len32 -= 4;
4457  }
4458 
4459  if (rc)
4460  return rc;
4461 
4462  cmd_flags = BNX2_NVM_COMMAND_LAST;
4463  rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4464 
4465  memcpy(ret_buf, buf, 4 - extra);
4466  }
4467 
4468  /* Disable access to flash interface */
4469  bnx2_disable_nvram_access(bp);
4470 
4471  bnx2_release_nvram_lock(bp);
4472 
4473  return rc;
4474 }
4475 
4476 static int
4477 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4478  int buf_size)
4479 {
4480  u32 written, offset32, len32;
4481  u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4482  int rc = 0;
4483  int align_start, align_end;
4484 
4485  buf = data_buf;
4486  offset32 = offset;
4487  len32 = buf_size;
4488  align_start = align_end = 0;
4489 
4490  if ((align_start = (offset32 & 3))) {
4491  offset32 &= ~3;
4492  len32 += align_start;
4493  if (len32 < 4)
4494  len32 = 4;
4495  if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4496  return rc;
4497  }
4498 
4499  if (len32 & 3) {
4500  align_end = 4 - (len32 & 3);
4501  len32 += align_end;
4502  if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4503  return rc;
4504  }
4505 
4506  if (align_start || align_end) {
4507  align_buf = kmalloc(len32, GFP_KERNEL);
4508  if (align_buf == NULL)
4509  return -ENOMEM;
4510  if (align_start) {
4511  memcpy(align_buf, start, 4);
4512  }
4513  if (align_end) {
4514  memcpy(align_buf + len32 - 4, end, 4);
4515  }
4516  memcpy(align_buf + align_start, data_buf, buf_size);
4517  buf = align_buf;
4518  }
4519 
4520  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4521  flash_buffer = kmalloc(264, GFP_KERNEL);
4522  if (flash_buffer == NULL) {
4523  rc = -ENOMEM;
4524  goto nvram_write_end;
4525  }
4526  }
4527 
4528  written = 0;
4529  while ((written < len32) && (rc == 0)) {
4530  u32 page_start, page_end, data_start, data_end;
4531  u32 addr, cmd_flags;
4532  int i;
4533 
4534  /* Find the page_start addr */
4535  page_start = offset32 + written;
4536  page_start -= (page_start % bp->flash_info->page_size);
4537  /* Find the page_end addr */
4538  page_end = page_start + bp->flash_info->page_size;
4539  /* Find the data_start addr */
4540  data_start = (written == 0) ? offset32 : page_start;
4541  /* Find the data_end addr */
4542  data_end = (page_end > offset32 + len32) ?
4543  (offset32 + len32) : page_end;
4544 
4545  /* Request access to the flash interface. */
4546  if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4547  goto nvram_write_end;
4548 
4549  /* Enable access to flash interface */
4550  bnx2_enable_nvram_access(bp);
4551 
4552  cmd_flags = BNX2_NVM_COMMAND_FIRST;
4553  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554  int j;
4555 
4556  /* Read the whole page into the buffer
4557  * (non-buffer flash only) */
4558  for (j = 0; j < bp->flash_info->page_size; j += 4) {
4559  if (j == (bp->flash_info->page_size - 4)) {
4560  cmd_flags |= BNX2_NVM_COMMAND_LAST;
4561  }
4562  rc = bnx2_nvram_read_dword(bp,
4563  page_start + j,
4564  &flash_buffer[j],
4565  cmd_flags);
4566 
4567  if (rc)
4568  goto nvram_write_end;
4569 
4570  cmd_flags = 0;
4571  }
4572  }
4573 
4574  /* Enable writes to flash interface (unlock write-protect) */
4575  if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4576  goto nvram_write_end;
4577 
4578  /* Loop to write back the buffer data from page_start to
4579  * data_start */
4580  i = 0;
4581  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4582  /* Erase the page */
4583  if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4584  goto nvram_write_end;
4585 
4586  /* Re-enable the write again for the actual write */
4587  bnx2_enable_nvram_write(bp);
4588 
4589  for (addr = page_start; addr < data_start;
4590  addr += 4, i += 4) {
4591 
4592  rc = bnx2_nvram_write_dword(bp, addr,
4593  &flash_buffer[i], cmd_flags);
4594 
4595  if (rc != 0)
4596  goto nvram_write_end;
4597 
4598  cmd_flags = 0;
4599  }
4600  }
4601 
4602  /* Loop to write the new data from data_start to data_end */
4603  for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4604  if ((addr == page_end - 4) ||
4605  ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4606  (addr == data_end - 4))) {
4607 
4608  cmd_flags |= BNX2_NVM_COMMAND_LAST;
4609  }
4610  rc = bnx2_nvram_write_dword(bp, addr, buf,
4611  cmd_flags);
4612 
4613  if (rc != 0)
4614  goto nvram_write_end;
4615 
4616  cmd_flags = 0;
4617  buf += 4;
4618  }
4619 
4620  /* Loop to write back the buffer data from data_end
4621  * to page_end */
4622  if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4623  for (addr = data_end; addr < page_end;
4624  addr += 4, i += 4) {
4625 
4626  if (addr == page_end-4) {
4627  cmd_flags = BNX2_NVM_COMMAND_LAST;
4628  }
4629  rc = bnx2_nvram_write_dword(bp, addr,
4630  &flash_buffer[i], cmd_flags);
4631 
4632  if (rc != 0)
4633  goto nvram_write_end;
4634 
4635  cmd_flags = 0;
4636  }
4637  }
4638 
4639  /* Disable writes to flash interface (lock write-protect) */
4640  bnx2_disable_nvram_write(bp);
4641 
4642  /* Disable access to flash interface */
4643  bnx2_disable_nvram_access(bp);
4644  bnx2_release_nvram_lock(bp);
4645 
4646  /* Increment written */
4647  written += data_end - data_start;
4648  }
4649 
4650 nvram_write_end:
4651  kfree(flash_buffer);
4652  kfree(align_buf);
4653  return rc;
4654 }
4655 
4656 static void
4657 bnx2_init_fw_cap(struct bnx2 *bp)
4658 {
4659  u32 val, sig = 0;
4660 
4663 
4664  if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4666 
4667  val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4669  return;
4670 
4671  if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4674  }
4675 
4676  if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4678  u32 link;
4679 
4681 
4682  link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4683  if (link & BNX2_LINK_STATUS_SERDES_LINK)
4684  bp->phy_port = PORT_FIBRE;
4685  else
4686  bp->phy_port = PORT_TP;
4687 
4690  }
4691 
4692  if (netif_running(bp->dev) && sig)
4693  bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4694 }
4695 
4696 static void
4697 bnx2_setup_msix_tbl(struct bnx2 *bp)
4698 {
4700 
4703 }
4704 
4705 static int
4706 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4707 {
4708  u32 val;
4709  int i, rc = 0;
4710  u8 old_port;
4711 
4712  /* Wait for the current PCI transaction to complete before
4713  * issuing a reset. */
4714  if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4715  (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4721  val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4722  udelay(5);
4723  } else { /* 5709 */
4724  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4726  REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4727  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4728 
4729  for (i = 0; i < 100; i++) {
4730  msleep(1);
4732  if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4733  break;
4734  }
4735  }
4736 
4737  /* Wait for the firmware to tell us it is ok to issue a reset. */
4738  bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4739 
4740  /* Deposit a driver reset signature so the firmware knows that
4741  * this is a soft reset. */
4742  bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4744 
4745  /* Do a dummy read to force the chip to complete all current transaction
4746  * before we issue a reset. */
4747  val = REG_RD(bp, BNX2_MISC_ID);
4748 
4749  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4752  udelay(5);
4753 
4756 
4757  REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4758 
4759  } else {
4763 
4764  /* Chip reset. */
4765  REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4766 
4767  /* Reading back any register after chip reset will hang the
4768  * bus on 5706 A0 and A1. The msleep below provides plenty
4769  * of margin for write posting.
4770  */
4771  if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4772  (CHIP_ID(bp) == CHIP_ID_5706_A1))
4773  msleep(20);
4774 
4775  /* Reset takes approximate 30 usec */
4776  for (i = 0; i < 10; i++) {
4777  val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4780  break;
4781  udelay(10);
4782  }
4783 
4786  pr_err("Chip reset did not complete\n");
4787  return -EBUSY;
4788  }
4789  }
4790 
4791  /* Make sure byte swapping is properly configured. */
4792  val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4793  if (val != 0x01020304) {
4794  pr_err("Chip not in correct endian mode\n");
4795  return -ENODEV;
4796  }
4797 
4798  /* Wait for the firmware to finish its initialization. */
4799  rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4800  if (rc)
4801  return rc;
4802 
4803  spin_lock_bh(&bp->phy_lock);
4804  old_port = bp->phy_port;
4805  bnx2_init_fw_cap(bp);
4807  old_port != bp->phy_port)
4808  bnx2_set_default_remote_link(bp);
4809  spin_unlock_bh(&bp->phy_lock);
4810 
4811  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4812  /* Adjust the voltage regular to two steps lower. The default
4813  * of this register is 0x0000000e. */
4814  REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4815 
4816  /* Remove bad rbuf memory from the free pool. */
4817  rc = bnx2_alloc_bad_rbuf(bp);
4818  }
4819 
4820  if (bp->flags & BNX2_FLAG_USING_MSIX) {
4821  bnx2_setup_msix_tbl(bp);
4822  /* Prevent MSIX table reads and write from timing out */
4825  }
4826 
4827  return rc;
4828 }
4829 
4830 static int
4831 bnx2_init_chip(struct bnx2 *bp)
4832 {
4833  u32 val, mtu;
4834  int rc, i;
4835 
4836  /* Make sure the interrupt is not active. */
4838 
4841 #ifdef __BIG_ENDIAN
4843 #endif
4845  DMA_READ_CHANS << 12 |
4846  DMA_WRITE_CHANS << 16;
4847 
4848  val |= (0x2 << 20) | (1 << 11);
4849 
4850  if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4851  val |= (1 << 23);
4852 
4853  if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4854  (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4856 
4857  REG_WR(bp, BNX2_DMA_CONFIG, val);
4858 
4859  if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4860  val = REG_RD(bp, BNX2_TDMA_CONFIG);
4861  val |= BNX2_TDMA_CONFIG_ONE_DMA;
4862  REG_WR(bp, BNX2_TDMA_CONFIG, val);
4863  }
4864 
4865  if (bp->flags & BNX2_FLAG_PCIX) {
4866  u16 val16;
4867 
4868  pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4869  &val16);
4870  pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4871  val16 & ~PCI_X_CMD_ERO);
4872  }
4873 
4878 
4879  /* Initialize context mapping and zero out the quick contexts. The
4880  * context block must have already been enabled. */
4881  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4882  rc = bnx2_init_5709_context(bp);
4883  if (rc)
4884  return rc;
4885  } else
4886  bnx2_init_context(bp);
4887 
4888  if ((rc = bnx2_init_cpus(bp)) != 0)
4889  return rc;
4890 
4891  bnx2_init_nvram(bp);
4892 
4893  bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4894 
4895  val = REG_RD(bp, BNX2_MQ_CONFIG);
4898  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4900  if (CHIP_REV(bp) == CHIP_REV_Ax)
4901  val |= BNX2_MQ_CONFIG_HALT_DIS;
4902  }
4903 
4904  REG_WR(bp, BNX2_MQ_CONFIG, val);
4905 
4906  val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4908  REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4909 
4910  val = (BCM_PAGE_BITS - 8) << 24;
4911  REG_WR(bp, BNX2_RV2P_CONFIG, val);
4912 
4913  /* Configure page size. */
4914  val = REG_RD(bp, BNX2_TBDR_CONFIG);
4916  val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4917  REG_WR(bp, BNX2_TBDR_CONFIG, val);
4918 
4919  val = bp->mac_addr[0] +
4920  (bp->mac_addr[1] << 8) +
4921  (bp->mac_addr[2] << 16) +
4922  bp->mac_addr[3] +
4923  (bp->mac_addr[4] << 8) +
4924  (bp->mac_addr[5] << 16);
4925  REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4926 
4927  /* Program the MTU. Also include 4 bytes for CRC32. */
4928  mtu = bp->dev->mtu;
4929  val = mtu + ETH_HLEN + ETH_FCS_LEN;
4930  if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4932  REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4933 
4934  if (mtu < 1500)
4935  mtu = 1500;
4936 
4937  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4938  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4939  bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4940 
4941  memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4942  for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4943  bp->bnx2_napi[i].last_status_idx = 0;
4944 
4945  bp->idle_chk_status_idx = 0xffff;
4946 
4948 
4949  /* Set up how to generate a link change interrupt. */
4951 
4953  (u64) bp->status_blk_mapping & 0xffffffff);
4955 
4957  (u64) bp->stats_blk_mapping & 0xffffffff);
4959  (u64) bp->stats_blk_mapping >> 32);
4960 
4962  (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4963 
4965  (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4966 
4968  (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4969 
4970  REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4971 
4972  REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4973 
4975  (bp->com_ticks_int << 16) | bp->com_ticks);
4976 
4978  (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4979 
4980  if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4981  REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4982  else
4984  REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4985 
4986  if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4988  else {
4991  }
4992 
4993  if (bp->flags & BNX2_FLAG_USING_MSIX) {
4996 
4998  }
4999 
5000  if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5002 
5003  REG_WR(bp, BNX2_HC_CONFIG, val);
5004 
5005  if (bp->rx_ticks < 25)
5006  bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5007  else
5008  bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5009 
5010  for (i = 1; i < bp->irq_nvecs; i++) {
5011  u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5013 
5014  REG_WR(bp, base,
5018 
5020  (bp->tx_quick_cons_trip_int << 16) |
5021  bp->tx_quick_cons_trip);
5022 
5023  REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5024  (bp->tx_ticks_int << 16) | bp->tx_ticks);
5025 
5027  (bp->rx_quick_cons_trip_int << 16) |
5028  bp->rx_quick_cons_trip);
5029 
5030  REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5031  (bp->rx_ticks_int << 16) | bp->rx_ticks);
5032  }
5033 
5034  /* Clear internal stats counters. */
5036 
5037  REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5038 
5039  /* Initialize the receive filter. */
5040  bnx2_set_rx_mode(bp->dev);
5041 
5042  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5043  val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5045  REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5046  }
5047  rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5048  1, 0);
5049 
5052 
5053  udelay(20);
5054 
5055  bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5056 
5057  return rc;
5058 }
5059 
5060 static void
5061 bnx2_clear_ring_states(struct bnx2 *bp)
5062 {
5063  struct bnx2_napi *bnapi;
5064  struct bnx2_tx_ring_info *txr;
5065  struct bnx2_rx_ring_info *rxr;
5066  int i;
5067 
5068  for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5069  bnapi = &bp->bnx2_napi[i];
5070  txr = &bnapi->tx_ring;
5071  rxr = &bnapi->rx_ring;
5072 
5073  txr->tx_cons = 0;
5074  txr->hw_tx_cons = 0;
5075  rxr->rx_prod_bseq = 0;
5076  rxr->rx_prod = 0;
5077  rxr->rx_cons = 0;
5078  rxr->rx_pg_prod = 0;
5079  rxr->rx_pg_cons = 0;
5080  }
5081 }
5082 
5083 static void
5084 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5085 {
5086  u32 val, offset0, offset1, offset2, offset3;
5087  u32 cid_addr = GET_CID_ADDR(cid);
5088 
5089  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5090  offset0 = BNX2_L2CTX_TYPE_XI;
5091  offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5092  offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5093  offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5094  } else {
5095  offset0 = BNX2_L2CTX_TYPE;
5096  offset1 = BNX2_L2CTX_CMD_TYPE;
5097  offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5098  offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5099  }
5101  bnx2_ctx_wr(bp, cid_addr, offset0, val);
5102 
5103  val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5104  bnx2_ctx_wr(bp, cid_addr, offset1, val);
5105 
5106  val = (u64) txr->tx_desc_mapping >> 32;
5107  bnx2_ctx_wr(bp, cid_addr, offset2, val);
5108 
5109  val = (u64) txr->tx_desc_mapping & 0xffffffff;
5110  bnx2_ctx_wr(bp, cid_addr, offset3, val);
5111 }
5112 
5113 static void
5114 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5115 {
5116  struct tx_bd *txbd;
5117  u32 cid = TX_CID;
5118  struct bnx2_napi *bnapi;
5119  struct bnx2_tx_ring_info *txr;
5120 
5121  bnapi = &bp->bnx2_napi[ring_num];
5122  txr = &bnapi->tx_ring;
5123 
5124  if (ring_num == 0)
5125  cid = TX_CID;
5126  else
5127  cid = TX_TSS_CID + ring_num - 1;
5128 
5129  bp->tx_wake_thresh = bp->tx_ring_size / 2;
5130 
5131  txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5132 
5133  txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5134  txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5135 
5136  txr->tx_prod = 0;
5137  txr->tx_prod_bseq = 0;
5138 
5141 
5142  bnx2_init_tx_context(bp, cid, txr);
5143 }
5144 
5145 static void
5146 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5147  int num_rings)
5148 {
5149  int i;
5150  struct rx_bd *rxbd;
5151 
5152  for (i = 0; i < num_rings; i++) {
5153  int j;
5154 
5155  rxbd = &rx_ring[i][0];
5156  for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5157  rxbd->rx_bd_len = buf_size;
5159  }
5160  if (i == (num_rings - 1))
5161  j = 0;
5162  else
5163  j = i + 1;
5164  rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5165  rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5166  }
5167 }
5168 
5169 static void
5170 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5171 {
5172  int i;
5173  u16 prod, ring_prod;
5174  u32 cid, rx_cid_addr, val;
5175  struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5176  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5177 
5178  if (ring_num == 0)
5179  cid = RX_CID;
5180  else
5181  cid = RX_RSS_CID + ring_num - 1;
5182 
5183  rx_cid_addr = GET_CID_ADDR(cid);
5184 
5185  bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5186  bp->rx_buf_use_size, bp->rx_max_ring);
5187 
5188  bnx2_init_rx_context(bp, cid);
5189 
5190  if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5191  val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5193  }
5194 
5195  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5196  if (bp->rx_pg_ring_size) {
5197  bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5198  rxr->rx_pg_desc_mapping,
5199  PAGE_SIZE, bp->rx_max_pg_ring);
5200  val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5201  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5202  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5203  BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5204 
5205  val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5206  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5207 
5208  val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5209  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5210 
5211  if (CHIP_NUM(bp) == CHIP_NUM_5709)
5213  }
5214 
5215  val = (u64) rxr->rx_desc_mapping[0] >> 32;
5216  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5217 
5218  val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5219  bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5220 
5221  ring_prod = prod = rxr->rx_pg_prod;
5222  for (i = 0; i < bp->rx_pg_ring_size; i++) {
5223  if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5224  netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5225  ring_num, i, bp->rx_pg_ring_size);
5226  break;
5227  }
5228  prod = NEXT_RX_BD(prod);
5229  ring_prod = RX_PG_RING_IDX(prod);
5230  }
5231  rxr->rx_pg_prod = prod;
5232 
5233  ring_prod = prod = rxr->rx_prod;
5234  for (i = 0; i < bp->rx_ring_size; i++) {
5235  if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5236  netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5237  ring_num, i, bp->rx_ring_size);
5238  break;
5239  }
5240  prod = NEXT_RX_BD(prod);
5241  ring_prod = RX_RING_IDX(prod);
5242  }
5243  rxr->rx_prod = prod;
5244 
5248 
5249  REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5250  REG_WR16(bp, rxr->rx_bidx_addr, prod);
5251 
5252  REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5253 }
5254 
5255 static void
5256 bnx2_init_all_rings(struct bnx2 *bp)
5257 {
5258  int i;
5259  u32 val;
5260 
5261  bnx2_clear_ring_states(bp);
5262 
5263  REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5264  for (i = 0; i < bp->num_tx_rings; i++)
5265  bnx2_init_tx_ring(bp, i);
5266 
5267  if (bp->num_tx_rings > 1)
5268  REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5269  (TX_TSS_CID << 7));
5270 
5271  REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5272  bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5273 
5274  for (i = 0; i < bp->num_rx_rings; i++)
5275  bnx2_init_rx_ring(bp, i);
5276 
5277  if (bp->num_rx_rings > 1) {
5278  u32 tbl_32 = 0;
5279 
5280  for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5281  int shift = (i % 8) << 2;
5282 
5283  tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5284  if ((i % 8) == 7) {
5285  REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5286  REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5290  tbl_32 = 0;
5291  }
5292  }
5293 
5296 
5297  REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5298 
5299  }
5300 }
5301 
5302 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5303 {
5304  u32 max, num_rings = 1;
5305 
5306  while (ring_size > MAX_RX_DESC_CNT) {
5307  ring_size -= MAX_RX_DESC_CNT;
5308  num_rings++;
5309  }
5310  /* round to next power of 2 */
5311  max = max_size;
5312  while ((max & num_rings) == 0)
5313  max >>= 1;
5314 
5315  if (num_rings != max)
5316  max <<= 1;
5317 
5318  return max;
5319 }
5320 
5321 static void
5322 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5323 {
5324  u32 rx_size, rx_space, jumbo_size;
5325 
5326  /* 8 for CRC and VLAN */
5327  rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5328 
5329  rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5330  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5331 
5333  bp->rx_pg_ring_size = 0;
5334  bp->rx_max_pg_ring = 0;
5335  bp->rx_max_pg_ring_idx = 0;
5336  if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5337  int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5338 
5339  jumbo_size = size * pages;
5340  if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5341  jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5342 
5343  bp->rx_pg_ring_size = jumbo_size;
5344  bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5345  MAX_RX_PG_RINGS);
5346  bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5347  rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5348  bp->rx_copy_thresh = 0;
5349  }
5350 
5351  bp->rx_buf_use_size = rx_size;
5352  /* hw alignment + build_skb() overhead*/
5354  NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5355  bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5356  bp->rx_ring_size = size;
5357  bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5358  bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5359 }
5360 
5361 static void
5362 bnx2_free_tx_skbs(struct bnx2 *bp)
5363 {
5364  int i;
5365 
5366  for (i = 0; i < bp->num_tx_rings; i++) {
5367  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5368  struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5369  int j;
5370 
5371  if (txr->tx_buf_ring == NULL)
5372  continue;
5373 
5374  for (j = 0; j < TX_DESC_CNT; ) {
5375  struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5376  struct sk_buff *skb = tx_buf->skb;
5377  int k, last;
5378 
5379  if (skb == NULL) {
5380  j = NEXT_TX_BD(j);
5381  continue;
5382  }
5383 
5384  dma_unmap_single(&bp->pdev->dev,
5385  dma_unmap_addr(tx_buf, mapping),
5386  skb_headlen(skb),
5388 
5389  tx_buf->skb = NULL;
5390 
5391  last = tx_buf->nr_frags;
5392  j = NEXT_TX_BD(j);
5393  for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
5394  tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5395  dma_unmap_page(&bp->pdev->dev,
5396  dma_unmap_addr(tx_buf, mapping),
5397  skb_frag_size(&skb_shinfo(skb)->frags[k]),
5399  }
5400  dev_kfree_skb(skb);
5401  }
5402  netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5403  }
5404 }
5405 
5406 static void
5407 bnx2_free_rx_skbs(struct bnx2 *bp)
5408 {
5409  int i;
5410 
5411  for (i = 0; i < bp->num_rx_rings; i++) {
5412  struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5413  struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5414  int j;
5415 
5416  if (rxr->rx_buf_ring == NULL)
5417  return;
5418 
5419  for (j = 0; j < bp->rx_max_ring_idx; j++) {
5420  struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5421  u8 *data = rx_buf->data;
5422 
5423  if (data == NULL)
5424  continue;
5425 
5426  dma_unmap_single(&bp->pdev->dev,
5427  dma_unmap_addr(rx_buf, mapping),
5428  bp->rx_buf_use_size,
5430 
5431  rx_buf->data = NULL;
5432 
5433  kfree(data);
5434  }
5435  for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5436  bnx2_free_rx_page(bp, rxr, j);
5437  }
5438 }
5439 
5440 static void
5441 bnx2_free_skbs(struct bnx2 *bp)
5442 {
5443  bnx2_free_tx_skbs(bp);
5444  bnx2_free_rx_skbs(bp);
5445 }
5446 
5447 static int
5448 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5449 {
5450  int rc;
5451 
5452  rc = bnx2_reset_chip(bp, reset_code);
5453  bnx2_free_skbs(bp);
5454  if (rc)
5455  return rc;
5456 
5457  if ((rc = bnx2_init_chip(bp)) != 0)
5458  return rc;
5459 
5460  bnx2_init_all_rings(bp);
5461  return 0;
5462 }
5463 
5464 static int
5465 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5466 {
5467  int rc;
5468 
5469  if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5470  return rc;
5471 
5472  spin_lock_bh(&bp->phy_lock);
5473  bnx2_init_phy(bp, reset_phy);
5474  bnx2_set_link(bp);
5476  bnx2_remote_phy_event(bp);
5477  spin_unlock_bh(&bp->phy_lock);
5478  return 0;
5479 }
5480 
5481 static int
5482 bnx2_shutdown_chip(struct bnx2 *bp)
5483 {
5484  u32 reset_code;
5485 
5486  if (bp->flags & BNX2_FLAG_NO_WOL)
5487  reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5488  else if (bp->wol)
5489  reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5490  else
5491  reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5492 
5493  return bnx2_reset_chip(bp, reset_code);
5494 }
5495 
5496 static int
5497 bnx2_test_registers(struct bnx2 *bp)
5498 {
5499  int ret;
5500  int i, is_5709;
5501  static const struct {
5502  u16 offset;
5503  u16 flags;
5504 #define BNX2_FL_NOT_5709 1
5505  u32 rw_mask;
5506  u32 ro_mask;
5507  } reg_tbl[] = {
5508  { 0x006c, 0, 0x00000000, 0x0000003f },
5509  { 0x0090, 0, 0xffffffff, 0x00000000 },
5510  { 0x0094, 0, 0x00000000, 0x00000000 },
5511 
5512  { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5513  { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5514  { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5515  { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5516  { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5517  { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5518  { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5519  { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520  { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 
5522  { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5523  { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5524  { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5525  { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5526  { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5527  { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5528 
5529  { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5530  { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5531  { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5532 
5533  { 0x1000, 0, 0x00000000, 0x00000001 },
5534  { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5535 
5536  { 0x1408, 0, 0x01c00800, 0x00000000 },
5537  { 0x149c, 0, 0x8000ffff, 0x00000000 },
5538  { 0x14a8, 0, 0x00000000, 0x000001ff },
5539  { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5540  { 0x14b0, 0, 0x00000002, 0x00000001 },
5541  { 0x14b8, 0, 0x00000000, 0x00000000 },
5542  { 0x14c0, 0, 0x00000000, 0x00000009 },
5543  { 0x14c4, 0, 0x00003fff, 0x00000000 },
5544  { 0x14cc, 0, 0x00000000, 0x00000001 },
5545  { 0x14d0, 0, 0xffffffff, 0x00000000 },
5546 
5547  { 0x1800, 0, 0x00000000, 0x00000001 },
5548  { 0x1804, 0, 0x00000000, 0x00000003 },
5549 
5550  { 0x2800, 0, 0x00000000, 0x00000001 },
5551  { 0x2804, 0, 0x00000000, 0x00003f01 },
5552  { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5553  { 0x2810, 0, 0xffff0000, 0x00000000 },
5554  { 0x2814, 0, 0xffff0000, 0x00000000 },
5555  { 0x2818, 0, 0xffff0000, 0x00000000 },
5556  { 0x281c, 0, 0xffff0000, 0x00000000 },
5557  { 0x2834, 0, 0xffffffff, 0x00000000 },
5558  { 0x2840, 0, 0x00000000, 0xffffffff },
5559  { 0x2844, 0, 0x00000000, 0xffffffff },
5560  { 0x2848, 0, 0xffffffff, 0x00000000 },
5561  { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5562 
5563  { 0x2c00, 0, 0x00000000, 0x00000011 },
5564  { 0x2c04, 0, 0x00000000, 0x00030007 },
5565 
5566  { 0x3c00, 0, 0x00000000, 0x00000001 },
5567  { 0x3c04, 0, 0x00000000, 0x00070000 },
5568  { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5569  { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5570  { 0x3c10, 0, 0xffffffff, 0x00000000 },
5571  { 0x3c14, 0, 0x00000000, 0xffffffff },
5572  { 0x3c18, 0, 0x00000000, 0xffffffff },
5573  { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5574  { 0x3c20, 0, 0xffffff00, 0x00000000 },
5575 
5576  { 0x5004, 0, 0x00000000, 0x0000007f },
5577  { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5578 
5579  { 0x5c00, 0, 0x00000000, 0x00000001 },
5580  { 0x5c04, 0, 0x00000000, 0x0003000f },
5581  { 0x5c08, 0, 0x00000003, 0x00000000 },
5582  { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5583  { 0x5c10, 0, 0x00000000, 0xffffffff },
5584  { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5585  { 0x5c84, 0, 0x00000000, 0x0000f333 },
5586  { 0x5c88, 0, 0x00000000, 0x00077373 },
5587  { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5588 
5589  { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5590  { 0x680c, 0, 0xffffffff, 0x00000000 },
5591  { 0x6810, 0, 0xffffffff, 0x00000000 },
5592  { 0x6814, 0, 0xffffffff, 0x00000000 },
5593  { 0x6818, 0, 0xffffffff, 0x00000000 },
5594  { 0x681c, 0, 0xffffffff, 0x00000000 },
5595  { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5596  { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5597  { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5598  { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5599  { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5600  { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5601  { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5602  { 0x683c, 0, 0x0000ffff, 0x00000000 },
5603  { 0x6840, 0, 0x00000ff0, 0x00000000 },
5604  { 0x6844, 0, 0x00ffff00, 0x00000000 },
5605  { 0x684c, 0, 0xffffffff, 0x00000000 },
5606  { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5607  { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5608  { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5609  { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5610  { 0x6908, 0, 0x00000000, 0x0001ff0f },
5611  { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5612 
5613  { 0xffff, 0, 0x00000000, 0x00000000 },
5614  };
5615 
5616  ret = 0;
5617  is_5709 = 0;
5618  if (CHIP_NUM(bp) == CHIP_NUM_5709)
5619  is_5709 = 1;
5620 
5621  for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5622  u32 offset, rw_mask, ro_mask, save_val, val;
5623  u16 flags = reg_tbl[i].flags;
5624 
5625  if (is_5709 && (flags & BNX2_FL_NOT_5709))
5626  continue;
5627 
5628  offset = (u32) reg_tbl[i].offset;
5629  rw_mask = reg_tbl[i].rw_mask;
5630  ro_mask = reg_tbl[i].ro_mask;
5631 
5632  save_val = readl(bp->regview + offset);
5633 
5634  writel(0, bp->regview + offset);
5635 
5636  val = readl(bp->regview + offset);
5637  if ((val & rw_mask) != 0) {
5638  goto reg_test_err;
5639  }
5640 
5641  if ((val & ro_mask) != (save_val & ro_mask)) {
5642  goto reg_test_err;
5643  }
5644 
5645  writel(0xffffffff, bp->regview + offset);
5646 
5647  val = readl(bp->regview + offset);
5648  if ((val & rw_mask) != rw_mask) {
5649  goto reg_test_err;
5650  }
5651 
5652  if ((val & ro_mask) != (save_val & ro_mask)) {
5653  goto reg_test_err;
5654  }
5655 
5656  writel(save_val, bp->regview + offset);
5657  continue;
5658 
5659 reg_test_err:
5660  writel(save_val, bp->regview + offset);
5661  ret = -ENODEV;
5662  break;
5663  }
5664  return ret;
5665 }
5666 
5667 static int
5668 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5669 {
5670  static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5671  0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5672  int i;
5673 
5674  for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5675  u32 offset;
5676 
5677  for (offset = 0; offset < size; offset += 4) {
5678 
5679  bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5680 
5681  if (bnx2_reg_rd_ind(bp, start + offset) !=
5682  test_pattern[i]) {
5683  return -ENODEV;
5684  }
5685  }
5686  }
5687  return 0;
5688 }
5689 
5690 static int