Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qla3xxx.c
Go to the documentation of this file.
1 /*
2  * QLogic QLA3xxx NIC HBA Driver
3  * Copyright (c) 2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla3xxx for copyright and licensing details.
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/ip.h>
27 #include <linux/in.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/skbuff.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/if_vlan.h>
36 #include <linux/delay.h>
37 #include <linux/mm.h>
38 #include <linux/prefetch.h>
39 
40 #include "qla3xxx.h"
41 
42 #define DRV_NAME "qla3xxx"
43 #define DRV_STRING "QLogic ISP3XXX Network Driver"
44 #define DRV_VERSION "v2.03.00-k5"
45 
46 static const char ql3xxx_driver_name[] = DRV_NAME;
47 static const char ql3xxx_driver_version[] = DRV_VERSION;
48 
49 #define TIMED_OUT_MSG \
50 "Timed out waiting for management port to get free before issuing command\n"
51 
52 MODULE_AUTHOR("QLogic Corporation");
53 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
54 MODULE_LICENSE("GPL");
56 
57 static const u32 default_msg
60 
61 static int debug = -1; /* defaults above */
62 module_param(debug, int, 0);
63 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
64 
65 static int msi;
66 module_param(msi, int, 0);
67 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
68 
69 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
72  /* required last entry */
73  {0,}
74 };
75 
76 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
77 
78 /*
79  * These are the known PHY's which are used
80  */
86 };
87 
90  const u32 phyIdOUI;
91  const u16 phyIdModel;
92  const char *name;
93 };
94 
95 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
96  {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
97  {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
98  {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
99 };
100 
101 
102 /*
103  * Caller must take hw_lock.
104  */
105 static int ql_sem_spinlock(struct ql3_adapter *qdev,
106  u32 sem_mask, u32 sem_bits)
107 {
108  struct ql3xxx_port_registers __iomem *port_regs =
109  qdev->mem_map_registers;
110  u32 value;
111  unsigned int seconds = 3;
112 
113  do {
114  writel((sem_mask | sem_bits),
115  &port_regs->CommonRegs.semaphoreReg);
116  value = readl(&port_regs->CommonRegs.semaphoreReg);
117  if ((value & (sem_mask >> 16)) == sem_bits)
118  return 0;
119  ssleep(1);
120  } while (--seconds);
121  return -1;
122 }
123 
124 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
125 {
126  struct ql3xxx_port_registers __iomem *port_regs =
127  qdev->mem_map_registers;
128  writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
129  readl(&port_regs->CommonRegs.semaphoreReg);
130 }
131 
132 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
133 {
134  struct ql3xxx_port_registers __iomem *port_regs =
135  qdev->mem_map_registers;
136  u32 value;
137 
138  writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
139  value = readl(&port_regs->CommonRegs.semaphoreReg);
140  return ((value & (sem_mask >> 16)) == sem_bits);
141 }
142 
143 /*
144  * Caller holds hw_lock.
145  */
146 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
147 {
148  int i = 0;
149 
150  while (i < 10) {
151  if (i)
152  ssleep(1);
153 
154  if (ql_sem_lock(qdev,
157  * 2) << 1)) {
159  "driver lock acquired\n");
160  return 1;
161  }
162  }
163 
164  netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
165  return 0;
166 }
167 
168 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
169 {
170  struct ql3xxx_port_registers __iomem *port_regs =
171  qdev->mem_map_registers;
172 
173  writel(((ISP_CONTROL_NP_MASK << 16) | page),
174  &port_regs->CommonRegs.ispControlStatus);
175  readl(&port_regs->CommonRegs.ispControlStatus);
176  qdev->current_page = page;
177 }
178 
179 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
180 {
181  u32 value;
182  unsigned long hw_flags;
183 
184  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
185  value = readl(reg);
186  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
187 
188  return value;
189 }
190 
191 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
192 {
193  return readl(reg);
194 }
195 
196 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
197 {
198  u32 value;
199  unsigned long hw_flags;
200 
201  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
202 
203  if (qdev->current_page != 0)
204  ql_set_register_page(qdev, 0);
205  value = readl(reg);
206 
207  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
208  return value;
209 }
210 
211 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
212 {
213  if (qdev->current_page != 0)
214  ql_set_register_page(qdev, 0);
215  return readl(reg);
216 }
217 
218 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
219  u32 __iomem *reg, u32 value)
220 {
221  unsigned long hw_flags;
222 
223  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
224  writel(value, reg);
225  readl(reg);
226  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
227 }
228 
229 static void ql_write_common_reg(struct ql3_adapter *qdev,
230  u32 __iomem *reg, u32 value)
231 {
232  writel(value, reg);
233  readl(reg);
234 }
235 
236 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
237  u32 __iomem *reg, u32 value)
238 {
239  writel(value, reg);
240  readl(reg);
241  udelay(1);
242 }
243 
244 static void ql_write_page0_reg(struct ql3_adapter *qdev,
245  u32 __iomem *reg, u32 value)
246 {
247  if (qdev->current_page != 0)
248  ql_set_register_page(qdev, 0);
249  writel(value, reg);
250  readl(reg);
251 }
252 
253 /*
254  * Caller holds hw_lock. Only called during init.
255  */
256 static void ql_write_page1_reg(struct ql3_adapter *qdev,
257  u32 __iomem *reg, u32 value)
258 {
259  if (qdev->current_page != 1)
260  ql_set_register_page(qdev, 1);
261  writel(value, reg);
262  readl(reg);
263 }
264 
265 /*
266  * Caller holds hw_lock. Only called during init.
267  */
268 static void ql_write_page2_reg(struct ql3_adapter *qdev,
269  u32 __iomem *reg, u32 value)
270 {
271  if (qdev->current_page != 2)
272  ql_set_register_page(qdev, 2);
273  writel(value, reg);
274  readl(reg);
275 }
276 
277 static void ql_disable_interrupts(struct ql3_adapter *qdev)
278 {
279  struct ql3xxx_port_registers __iomem *port_regs =
280  qdev->mem_map_registers;
281 
282  ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
283  (ISP_IMR_ENABLE_INT << 16));
284 
285 }
286 
287 static void ql_enable_interrupts(struct ql3_adapter *qdev)
288 {
289  struct ql3xxx_port_registers __iomem *port_regs =
290  qdev->mem_map_registers;
291 
292  ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
293  ((0xff << 16) | ISP_IMR_ENABLE_INT));
294 
295 }
296 
297 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
298  struct ql_rcv_buf_cb *lrg_buf_cb)
299 {
300  dma_addr_t map;
301  int err;
302  lrg_buf_cb->next = NULL;
303 
304  if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
305  qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
306  } else {
307  qdev->lrg_buf_free_tail->next = lrg_buf_cb;
308  qdev->lrg_buf_free_tail = lrg_buf_cb;
309  }
310 
311  if (!lrg_buf_cb->skb) {
312  lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
313  qdev->lrg_buffer_len);
314  if (unlikely(!lrg_buf_cb->skb)) {
315  netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
316  qdev->lrg_buf_skb_check++;
317  } else {
318  /*
319  * We save some space to copy the ethhdr from first
320  * buffer
321  */
322  skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
323  map = pci_map_single(qdev->pdev,
324  lrg_buf_cb->skb->data,
325  qdev->lrg_buffer_len -
328  err = pci_dma_mapping_error(qdev->pdev, map);
329  if (err) {
330  netdev_err(qdev->ndev,
331  "PCI mapping failed with error: %d\n",
332  err);
333  dev_kfree_skb(lrg_buf_cb->skb);
334  lrg_buf_cb->skb = NULL;
335 
336  qdev->lrg_buf_skb_check++;
337  return;
338  }
339 
340  lrg_buf_cb->buf_phy_addr_low =
341  cpu_to_le32(LS_64BITS(map));
342  lrg_buf_cb->buf_phy_addr_high =
343  cpu_to_le32(MS_64BITS(map));
344  dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
345  dma_unmap_len_set(lrg_buf_cb, maplen,
346  qdev->lrg_buffer_len -
348  }
349  }
350 
351  qdev->lrg_buf_free_count++;
352 }
353 
354 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
355  *qdev)
356 {
357  struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
358 
359  if (lrg_buf_cb != NULL) {
360  qdev->lrg_buf_free_head = lrg_buf_cb->next;
361  if (qdev->lrg_buf_free_head == NULL)
362  qdev->lrg_buf_free_tail = NULL;
363  qdev->lrg_buf_free_count--;
364  }
365 
366  return lrg_buf_cb;
367 }
368 
369 static u32 addrBits = EEPROM_NO_ADDR_BITS;
370 static u32 dataBits = EEPROM_NO_DATA_BITS;
371 
372 static void fm93c56a_deselect(struct ql3_adapter *qdev);
373 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
374  unsigned short *value);
375 
376 /*
377  * Caller holds hw_lock.
378  */
379 static void fm93c56a_select(struct ql3_adapter *qdev)
380 {
381  struct ql3xxx_port_registers __iomem *port_regs =
382  qdev->mem_map_registers;
383  __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
384 
386  ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
387  ql_write_nvram_reg(qdev, spir,
388  ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
389 }
390 
391 /*
392  * Caller holds hw_lock.
393  */
394 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
395 {
396  int i;
397  u32 mask;
398  u32 dataBit;
399  u32 previousBit;
400  struct ql3xxx_port_registers __iomem *port_regs =
401  qdev->mem_map_registers;
402  __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
403 
404  /* Clock in a zero, then do the start bit */
405  ql_write_nvram_reg(qdev, spir,
408  ql_write_nvram_reg(qdev, spir,
411  ql_write_nvram_reg(qdev, spir,
414 
415  mask = 1 << (FM93C56A_CMD_BITS - 1);
416  /* Force the previous data bit to be different */
417  previousBit = 0xffff;
418  for (i = 0; i < FM93C56A_CMD_BITS; i++) {
419  dataBit = (cmd & mask)
422  if (previousBit != dataBit) {
423  /* If the bit changed, change the DO state to match */
424  ql_write_nvram_reg(qdev, spir,
425  (ISP_NVRAM_MASK |
426  qdev->eeprom_cmd_data | dataBit));
427  previousBit = dataBit;
428  }
429  ql_write_nvram_reg(qdev, spir,
431  dataBit | AUBURN_EEPROM_CLK_RISE));
432  ql_write_nvram_reg(qdev, spir,
434  dataBit | AUBURN_EEPROM_CLK_FALL));
435  cmd = cmd << 1;
436  }
437 
438  mask = 1 << (addrBits - 1);
439  /* Force the previous data bit to be different */
440  previousBit = 0xffff;
441  for (i = 0; i < addrBits; i++) {
442  dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
444  if (previousBit != dataBit) {
445  /*
446  * If the bit changed, then change the DO state to
447  * match
448  */
449  ql_write_nvram_reg(qdev, spir,
450  (ISP_NVRAM_MASK |
451  qdev->eeprom_cmd_data | dataBit));
452  previousBit = dataBit;
453  }
454  ql_write_nvram_reg(qdev, spir,
456  dataBit | AUBURN_EEPROM_CLK_RISE));
457  ql_write_nvram_reg(qdev, spir,
459  dataBit | AUBURN_EEPROM_CLK_FALL));
460  eepromAddr = eepromAddr << 1;
461  }
462 }
463 
464 /*
465  * Caller holds hw_lock.
466  */
467 static void fm93c56a_deselect(struct ql3_adapter *qdev)
468 {
469  struct ql3xxx_port_registers __iomem *port_regs =
470  qdev->mem_map_registers;
471  __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
472 
474  ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
475 }
476 
477 /*
478  * Caller holds hw_lock.
479  */
480 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
481 {
482  int i;
483  u32 data = 0;
484  u32 dataBit;
485  struct ql3xxx_port_registers __iomem *port_regs =
486  qdev->mem_map_registers;
487  __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
488 
489  /* Read the data bits */
490  /* The first bit is a dummy. Clock right over it. */
491  for (i = 0; i < dataBits; i++) {
492  ql_write_nvram_reg(qdev, spir,
495  ql_write_nvram_reg(qdev, spir,
498  dataBit = (ql_read_common_reg(qdev, spir) &
499  AUBURN_EEPROM_DI_1) ? 1 : 0;
500  data = (data << 1) | dataBit;
501  }
502  *value = (u16)data;
503 }
504 
505 /*
506  * Caller holds hw_lock.
507  */
508 static void eeprom_readword(struct ql3_adapter *qdev,
509  u32 eepromAddr, unsigned short *value)
510 {
511  fm93c56a_select(qdev);
512  fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
513  fm93c56a_datain(qdev, value);
514  fm93c56a_deselect(qdev);
515 }
516 
517 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
518 {
519  __le16 *p = (__le16 *)ndev->dev_addr;
520  p[0] = cpu_to_le16(addr[0]);
521  p[1] = cpu_to_le16(addr[1]);
522  p[2] = cpu_to_le16(addr[2]);
523 }
524 
525 static int ql_get_nvram_params(struct ql3_adapter *qdev)
526 {
527  u16 *pEEPROMData;
528  u16 checksum = 0;
529  u32 index;
530  unsigned long hw_flags;
531 
532  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
533 
534  pEEPROMData = (u16 *)&qdev->nvram_data;
535  qdev->eeprom_cmd_data = 0;
536  if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
538  2) << 10)) {
539  pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
540  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
541  return -1;
542  }
543 
544  for (index = 0; index < EEPROM_SIZE; index++) {
545  eeprom_readword(qdev, index, pEEPROMData);
546  checksum += *pEEPROMData;
547  pEEPROMData++;
548  }
549  ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
550 
551  if (checksum != 0) {
552  netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
553  checksum);
554  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
555  return -1;
556  }
557 
558  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
559  return checksum;
560 }
561 
562 static const u32 PHYAddr[2] = {
564 };
565 
566 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
567 {
568  struct ql3xxx_port_registers __iomem *port_regs =
569  qdev->mem_map_registers;
570  u32 temp;
571  int count = 1000;
572 
573  while (count) {
574  temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575  if (!(temp & MAC_MII_STATUS_BSY))
576  return 0;
577  udelay(10);
578  count--;
579  }
580  return -1;
581 }
582 
583 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
584 {
585  struct ql3xxx_port_registers __iomem *port_regs =
586  qdev->mem_map_registers;
587  u32 scanControl;
588 
589  if (qdev->numPorts > 1) {
590  /* Auto scan will cycle through multiple ports */
591  scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
592  } else {
593  scanControl = MAC_MII_CONTROL_SC;
594  }
595 
596  /*
597  * Scan register 1 of PHY/PETBI,
598  * Set up to scan both devices
599  * The autoscan starts from the first register, completes
600  * the last one before rolling over to the first
601  */
602  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
603  PHYAddr[0] | MII_SCAN_REGISTER);
604 
605  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
606  (scanControl) |
608 }
609 
610 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
611 {
612  u8 ret;
613  struct ql3xxx_port_registers __iomem *port_regs =
614  qdev->mem_map_registers;
615 
616  /* See if scan mode is enabled before we turn it off */
617  if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
619  /* Scan is enabled */
620  ret = 1;
621  } else {
622  /* Scan is disabled */
623  ret = 0;
624  }
625 
626  /*
627  * When disabling scan mode you must first change the MII register
628  * address
629  */
630  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
631  PHYAddr[0] | MII_SCAN_REGISTER);
632 
633  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
635  MAC_MII_CONTROL_RC) << 16));
636 
637  return ret;
638 }
639 
640 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
641  u16 regAddr, u16 value, u32 phyAddr)
642 {
643  struct ql3xxx_port_registers __iomem *port_regs =
644  qdev->mem_map_registers;
645  u8 scanWasEnabled;
646 
647  scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648 
649  if (ql_wait_for_mii_ready(qdev)) {
650  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
651  return -1;
652  }
653 
654  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
655  phyAddr | regAddr);
656 
657  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
658 
659  /* Wait for write to complete 9/10/04 SJP */
660  if (ql_wait_for_mii_ready(qdev)) {
661  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
662  return -1;
663  }
664 
665  if (scanWasEnabled)
666  ql_mii_enable_scan_mode(qdev);
667 
668  return 0;
669 }
670 
671 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
672  u16 *value, u32 phyAddr)
673 {
674  struct ql3xxx_port_registers __iomem *port_regs =
675  qdev->mem_map_registers;
676  u8 scanWasEnabled;
677  u32 temp;
678 
679  scanWasEnabled = ql_mii_disable_scan_mode(qdev);
680 
681  if (ql_wait_for_mii_ready(qdev)) {
682  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
683  return -1;
684  }
685 
686  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
687  phyAddr | regAddr);
688 
689  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
690  (MAC_MII_CONTROL_RC << 16));
691 
692  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
694 
695  /* Wait for the read to complete */
696  if (ql_wait_for_mii_ready(qdev)) {
697  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
698  return -1;
699  }
700 
701  temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
702  *value = (u16) temp;
703 
704  if (scanWasEnabled)
705  ql_mii_enable_scan_mode(qdev);
706 
707  return 0;
708 }
709 
710 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
711 {
712  struct ql3xxx_port_registers __iomem *port_regs =
713  qdev->mem_map_registers;
714 
715  ql_mii_disable_scan_mode(qdev);
716 
717  if (ql_wait_for_mii_ready(qdev)) {
718  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
719  return -1;
720  }
721 
722  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
723  qdev->PHYAddr | regAddr);
724 
725  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
726 
727  /* Wait for write to complete. */
728  if (ql_wait_for_mii_ready(qdev)) {
729  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
730  return -1;
731  }
732 
733  ql_mii_enable_scan_mode(qdev);
734 
735  return 0;
736 }
737 
738 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
739 {
740  u32 temp;
741  struct ql3xxx_port_registers __iomem *port_regs =
742  qdev->mem_map_registers;
743 
744  ql_mii_disable_scan_mode(qdev);
745 
746  if (ql_wait_for_mii_ready(qdev)) {
747  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
748  return -1;
749  }
750 
751  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
752  qdev->PHYAddr | regAddr);
753 
754  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
755  (MAC_MII_CONTROL_RC << 16));
756 
757  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
759 
760  /* Wait for the read to complete */
761  if (ql_wait_for_mii_ready(qdev)) {
762  netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
763  return -1;
764  }
765 
766  temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
767  *value = (u16) temp;
768 
769  ql_mii_enable_scan_mode(qdev);
770 
771  return 0;
772 }
773 
774 static void ql_petbi_reset(struct ql3_adapter *qdev)
775 {
776  ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
777 }
778 
779 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
780 {
781  u16 reg;
782 
783  /* Enable Auto-negotiation sense */
784  ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
785  reg |= PETBI_TBI_AUTO_SENSE;
786  ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
787 
788  ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
790 
791  ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
794 
795 }
796 
797 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
798 {
799  ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
800  PHYAddr[qdev->mac_index]);
801 }
802 
803 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
804 {
805  u16 reg;
806 
807  /* Enable Auto-negotiation sense */
808  ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
809  PHYAddr[qdev->mac_index]);
810  reg |= PETBI_TBI_AUTO_SENSE;
811  ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
812  PHYAddr[qdev->mac_index]);
813 
814  ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
816  PHYAddr[qdev->mac_index]);
817 
818  ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
821  PHYAddr[qdev->mac_index]);
822 }
823 
824 static void ql_petbi_init(struct ql3_adapter *qdev)
825 {
826  ql_petbi_reset(qdev);
827  ql_petbi_start_neg(qdev);
828 }
829 
830 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
831 {
832  ql_petbi_reset_ex(qdev);
833  ql_petbi_start_neg_ex(qdev);
834 }
835 
836 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
837 {
838  u16 reg;
839 
840  if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
841  return 0;
842 
843  return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
844 }
845 
846 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
847 {
848  netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
849  /* power down device bit 11 = 1 */
850  ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
851  /* enable diagnostic mode bit 2 = 1 */
852  ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
853  /* 1000MB amplitude adjust (see Agere errata) */
854  ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
855  /* 1000MB amplitude adjust (see Agere errata) */
856  ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
857  /* 100MB amplitude adjust (see Agere errata) */
858  ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
859  /* 100MB amplitude adjust (see Agere errata) */
860  ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
861  /* 10MB amplitude adjust (see Agere errata) */
862  ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
863  /* 10MB amplitude adjust (see Agere errata) */
864  ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
865  /* point to hidden reg 0x2806 */
866  ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
867  /* Write new PHYAD w/bit 5 set */
868  ql_mii_write_reg_ex(qdev, 0x11,
869  0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
870  /*
871  * Disable diagnostic mode bit 2 = 0
872  * Power up device bit 11 = 0
873  * Link up (on) and activity (blink)
874  */
875  ql_mii_write_reg(qdev, 0x12, 0x840a);
876  ql_mii_write_reg(qdev, 0x00, 0x1140);
877  ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
878 }
879 
880 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
881  u16 phyIdReg0, u16 phyIdReg1)
882 {
884  u32 oui;
885  u16 model;
886  int i;
887 
888  if (phyIdReg0 == 0xffff)
889  return result;
890 
891  if (phyIdReg1 == 0xffff)
892  return result;
893 
894  /* oui is split between two registers */
895  oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
896 
897  model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
898 
899  /* Scan table for this PHY */
900  for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
901  if ((oui == PHY_DEVICES[i].phyIdOUI) &&
902  (model == PHY_DEVICES[i].phyIdModel)) {
903  netdev_info(qdev->ndev, "Phy: %s\n",
904  PHY_DEVICES[i].name);
905  result = PHY_DEVICES[i].phyDevice;
906  break;
907  }
908  }
909 
910  return result;
911 }
912 
913 static int ql_phy_get_speed(struct ql3_adapter *qdev)
914 {
915  u16 reg;
916 
917  switch (qdev->phyType) {
918  case PHY_AGERE_ET1011C: {
919  if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
920  return 0;
921 
922  reg = (reg >> 8) & 3;
923  break;
924  }
925  default:
926  if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
927  return 0;
928 
929  reg = (((reg & 0x18) >> 3) & 3);
930  }
931 
932  switch (reg) {
933  case 2:
934  return SPEED_1000;
935  case 1:
936  return SPEED_100;
937  case 0:
938  return SPEED_10;
939  default:
940  return -1;
941  }
942 }
943 
944 static int ql_is_full_dup(struct ql3_adapter *qdev)
945 {
946  u16 reg;
947 
948  switch (qdev->phyType) {
949  case PHY_AGERE_ET1011C: {
950  if (ql_mii_read_reg(qdev, 0x1A, &reg))
951  return 0;
952 
953  return ((reg & 0x0080) && (reg & 0x1000)) != 0;
954  }
955  case PHY_VITESSE_VSC8211:
956  default: {
957  if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
958  return 0;
959  return (reg & PHY_AUX_DUPLEX_STAT) != 0;
960  }
961  }
962 }
963 
964 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
965 {
966  u16 reg;
967 
968  if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
969  return 0;
970 
971  return (reg & PHY_NEG_PAUSE) != 0;
972 }
973 
974 static int PHY_Setup(struct ql3_adapter *qdev)
975 {
976  u16 reg1;
977  u16 reg2;
978  bool agereAddrChangeNeeded = false;
979  u32 miiAddr = 0;
980  int err;
981 
982  /* Determine the PHY we are using by reading the ID's */
983  err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
984  if (err != 0) {
985  netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
986  return err;
987  }
988 
989  err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
990  if (err != 0) {
991  netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
992  return err;
993  }
994 
995  /* Check if we have a Agere PHY */
996  if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
997 
998  /* Determine which MII address we should be using
999  determined by the index of the card */
1000  if (qdev->mac_index == 0)
1001  miiAddr = MII_AGERE_ADDR_1;
1002  else
1003  miiAddr = MII_AGERE_ADDR_2;
1004 
1005  err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1006  if (err != 0) {
1007  netdev_err(qdev->ndev,
1008  "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1009  return err;
1010  }
1011 
1012  err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1013  if (err != 0) {
1014  netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1015  return err;
1016  }
1017 
1018  /* We need to remember to initialize the Agere PHY */
1019  agereAddrChangeNeeded = true;
1020  }
1021 
1022  /* Determine the particular PHY we have on board to apply
1023  PHY specific initializations */
1024  qdev->phyType = getPhyType(qdev, reg1, reg2);
1025 
1026  if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1027  /* need this here so address gets changed */
1028  phyAgereSpecificInit(qdev, miiAddr);
1029  } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1030  netdev_err(qdev->ndev, "PHY is unknown\n");
1031  return -EIO;
1032  }
1033 
1034  return 0;
1035 }
1036 
1037 /*
1038  * Caller holds hw_lock.
1039  */
1040 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1041 {
1042  struct ql3xxx_port_registers __iomem *port_regs =
1043  qdev->mem_map_registers;
1044  u32 value;
1045 
1046  if (enable)
1047  value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1048  else
1049  value = (MAC_CONFIG_REG_PE << 16);
1050 
1051  if (qdev->mac_index)
1052  ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1053  else
1054  ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1055 }
1056 
1057 /*
1058  * Caller holds hw_lock.
1059  */
1060 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1061 {
1062  struct ql3xxx_port_registers __iomem *port_regs =
1063  qdev->mem_map_registers;
1064  u32 value;
1065 
1066  if (enable)
1067  value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1068  else
1069  value = (MAC_CONFIG_REG_SR << 16);
1070 
1071  if (qdev->mac_index)
1072  ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1073  else
1074  ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1075 }
1076 
1077 /*
1078  * Caller holds hw_lock.
1079  */
1080 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1081 {
1082  struct ql3xxx_port_registers __iomem *port_regs =
1083  qdev->mem_map_registers;
1084  u32 value;
1085 
1086  if (enable)
1087  value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1088  else
1089  value = (MAC_CONFIG_REG_GM << 16);
1090 
1091  if (qdev->mac_index)
1092  ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1093  else
1094  ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1095 }
1096 
1097 /*
1098  * Caller holds hw_lock.
1099  */
1100 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1101 {
1102  struct ql3xxx_port_registers __iomem *port_regs =
1103  qdev->mem_map_registers;
1104  u32 value;
1105 
1106  if (enable)
1107  value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1108  else
1109  value = (MAC_CONFIG_REG_FD << 16);
1110 
1111  if (qdev->mac_index)
1112  ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1113  else
1114  ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1115 }
1116 
1117 /*
1118  * Caller holds hw_lock.
1119  */
1120 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1121 {
1122  struct ql3xxx_port_registers __iomem *port_regs =
1123  qdev->mem_map_registers;
1124  u32 value;
1125 
1126  if (enable)
1127  value =
1129  ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1130  else
1131  value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1132 
1133  if (qdev->mac_index)
1134  ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1135  else
1136  ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1137 }
1138 
1139 /*
1140  * Caller holds hw_lock.
1141  */
1142 static int ql_is_fiber(struct ql3_adapter *qdev)
1143 {
1144  struct ql3xxx_port_registers __iomem *port_regs =
1145  qdev->mem_map_registers;
1146  u32 bitToCheck = 0;
1147  u32 temp;
1148 
1149  switch (qdev->mac_index) {
1150  case 0:
1151  bitToCheck = PORT_STATUS_SM0;
1152  break;
1153  case 1:
1154  bitToCheck = PORT_STATUS_SM1;
1155  break;
1156  }
1157 
1158  temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1159  return (temp & bitToCheck) != 0;
1160 }
1161 
1162 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1163 {
1164  u16 reg;
1165  ql_mii_read_reg(qdev, 0x00, &reg);
1166  return (reg & 0x1000) != 0;
1167 }
1168 
1169 /*
1170  * Caller holds hw_lock.
1171  */
1172 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1173 {
1174  struct ql3xxx_port_registers __iomem *port_regs =
1175  qdev->mem_map_registers;
1176  u32 bitToCheck = 0;
1177  u32 temp;
1178 
1179  switch (qdev->mac_index) {
1180  case 0:
1181  bitToCheck = PORT_STATUS_AC0;
1182  break;
1183  case 1:
1184  bitToCheck = PORT_STATUS_AC1;
1185  break;
1186  }
1187 
1188  temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1189  if (temp & bitToCheck) {
1190  netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1191  return 1;
1192  }
1193  netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1194  return 0;
1195 }
1196 
1197 /*
1198  * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1199  */
1200 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1201 {
1202  if (ql_is_fiber(qdev))
1203  return ql_is_petbi_neg_pause(qdev);
1204  else
1205  return ql_is_phy_neg_pause(qdev);
1206 }
1207 
1208 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1209 {
1210  struct ql3xxx_port_registers __iomem *port_regs =
1211  qdev->mem_map_registers;
1212  u32 bitToCheck = 0;
1213  u32 temp;
1214 
1215  switch (qdev->mac_index) {
1216  case 0:
1217  bitToCheck = PORT_STATUS_AE0;
1218  break;
1219  case 1:
1220  bitToCheck = PORT_STATUS_AE1;
1221  break;
1222  }
1223  temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1224  return (temp & bitToCheck) != 0;
1225 }
1226 
1227 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1228 {
1229  if (ql_is_fiber(qdev))
1230  return SPEED_1000;
1231  else
1232  return ql_phy_get_speed(qdev);
1233 }
1234 
1235 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1236 {
1237  if (ql_is_fiber(qdev))
1238  return 1;
1239  else
1240  return ql_is_full_dup(qdev);
1241 }
1242 
1243 /*
1244  * Caller holds hw_lock.
1245  */
1246 static int ql_link_down_detect(struct ql3_adapter *qdev)
1247 {
1248  struct ql3xxx_port_registers __iomem *port_regs =
1249  qdev->mem_map_registers;
1250  u32 bitToCheck = 0;
1251  u32 temp;
1252 
1253  switch (qdev->mac_index) {
1254  case 0:
1255  bitToCheck = ISP_CONTROL_LINK_DN_0;
1256  break;
1257  case 1:
1258  bitToCheck = ISP_CONTROL_LINK_DN_1;
1259  break;
1260  }
1261 
1262  temp =
1263  ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1264  return (temp & bitToCheck) != 0;
1265 }
1266 
1267 /*
1268  * Caller holds hw_lock.
1269  */
1270 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1271 {
1272  struct ql3xxx_port_registers __iomem *port_regs =
1273  qdev->mem_map_registers;
1274 
1275  switch (qdev->mac_index) {
1276  case 0:
1277  ql_write_common_reg(qdev,
1278  &port_regs->CommonRegs.ispControlStatus,
1280  (ISP_CONTROL_LINK_DN_0 << 16));
1281  break;
1282 
1283  case 1:
1284  ql_write_common_reg(qdev,
1285  &port_regs->CommonRegs.ispControlStatus,
1287  (ISP_CONTROL_LINK_DN_1 << 16));
1288  break;
1289 
1290  default:
1291  return 1;
1292  }
1293 
1294  return 0;
1295 }
1296 
1297 /*
1298  * Caller holds hw_lock.
1299  */
1300 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1301 {
1302  struct ql3xxx_port_registers __iomem *port_regs =
1303  qdev->mem_map_registers;
1304  u32 bitToCheck = 0;
1305  u32 temp;
1306 
1307  switch (qdev->mac_index) {
1308  case 0:
1309  bitToCheck = PORT_STATUS_F1_ENABLED;
1310  break;
1311  case 1:
1312  bitToCheck = PORT_STATUS_F3_ENABLED;
1313  break;
1314  default:
1315  break;
1316  }
1317 
1318  temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1319  if (temp & bitToCheck) {
1320  netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1321  "not link master\n");
1322  return 0;
1323  }
1324 
1325  netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1326  return 1;
1327 }
1328 
1329 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1330 {
1331  ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1332  PHYAddr[qdev->mac_index]);
1333 }
1334 
1335 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1336 {
1337  u16 reg;
1338  u16 portConfiguration;
1339 
1340  if (qdev->phyType == PHY_AGERE_ET1011C)
1341  ql_mii_write_reg(qdev, 0x13, 0x0000);
1342  /* turn off external loopback */
1343 
1344  if (qdev->mac_index == 0)
1345  portConfiguration =
1346  qdev->nvram_data.macCfg_port0.portConfiguration;
1347  else
1348  portConfiguration =
1349  qdev->nvram_data.macCfg_port1.portConfiguration;
1350 
1351  /* Some HBA's in the field are set to 0 and they need to
1352  be reinterpreted with a default value */
1353  if (portConfiguration == 0)
1354  portConfiguration = PORT_CONFIG_DEFAULT;
1355 
1356  /* Set the 1000 advertisements */
1357  ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1358  PHYAddr[qdev->mac_index]);
1359  reg &= ~PHY_GIG_ALL_PARAMS;
1360 
1361  if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1362  if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1363  reg |= PHY_GIG_ADV_1000F;
1364  else
1365  reg |= PHY_GIG_ADV_1000H;
1366  }
1367 
1368  ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1369  PHYAddr[qdev->mac_index]);
1370 
1371  /* Set the 10/100 & pause negotiation advertisements */
1372  ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1373  PHYAddr[qdev->mac_index]);
1374  reg &= ~PHY_NEG_ALL_PARAMS;
1375 
1376  if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1378 
1379  if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1380  if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1381  reg |= PHY_NEG_ADV_100F;
1382 
1383  if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1384  reg |= PHY_NEG_ADV_10F;
1385  }
1386 
1387  if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1388  if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1389  reg |= PHY_NEG_ADV_100H;
1390 
1391  if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1392  reg |= PHY_NEG_ADV_10H;
1393  }
1394 
1395  if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1396  reg |= 1;
1397 
1398  ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1399  PHYAddr[qdev->mac_index]);
1400 
1401  ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1402 
1403  ql_mii_write_reg_ex(qdev, CONTROL_REG,
1405  PHYAddr[qdev->mac_index]);
1406 }
1407 
1408 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1409 {
1410  ql_phy_reset_ex(qdev);
1411  PHY_Setup(qdev);
1412  ql_phy_start_neg_ex(qdev);
1413 }
1414 
1415 /*
1416  * Caller holds hw_lock.
1417  */
1418 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1419 {
1420  struct ql3xxx_port_registers __iomem *port_regs =
1421  qdev->mem_map_registers;
1422  u32 bitToCheck = 0;
1423  u32 temp, linkState;
1424 
1425  switch (qdev->mac_index) {
1426  case 0:
1427  bitToCheck = PORT_STATUS_UP0;
1428  break;
1429  case 1:
1430  bitToCheck = PORT_STATUS_UP1;
1431  break;
1432  }
1433 
1434  temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1435  if (temp & bitToCheck)
1436  linkState = LS_UP;
1437  else
1438  linkState = LS_DOWN;
1439 
1440  return linkState;
1441 }
1442 
1443 static int ql_port_start(struct ql3_adapter *qdev)
1444 {
1445  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1447  2) << 7)) {
1448  netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1449  return -1;
1450  }
1451 
1452  if (ql_is_fiber(qdev)) {
1453  ql_petbi_init(qdev);
1454  } else {
1455  /* Copper port */
1456  ql_phy_init_ex(qdev);
1457  }
1458 
1459  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1460  return 0;
1461 }
1462 
1463 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1464 {
1465 
1466  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1468  2) << 7))
1469  return -1;
1470 
1471  if (!ql_auto_neg_error(qdev)) {
1472  if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1473  /* configure the MAC */
1474  netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1475  "Configuring link\n");
1476  ql_mac_cfg_soft_reset(qdev, 1);
1477  ql_mac_cfg_gig(qdev,
1478  (ql_get_link_speed
1479  (qdev) ==
1480  SPEED_1000));
1481  ql_mac_cfg_full_dup(qdev,
1482  ql_is_link_full_dup
1483  (qdev));
1484  ql_mac_cfg_pause(qdev,
1485  ql_is_neg_pause
1486  (qdev));
1487  ql_mac_cfg_soft_reset(qdev, 0);
1488 
1489  /* enable the MAC */
1490  netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1491  "Enabling mac\n");
1492  ql_mac_enable(qdev, 1);
1493  }
1494 
1495  qdev->port_link_state = LS_UP;
1496  netif_start_queue(qdev->ndev);
1497  netif_carrier_on(qdev->ndev);
1498  netif_info(qdev, link, qdev->ndev,
1499  "Link is up at %d Mbps, %s duplex\n",
1500  ql_get_link_speed(qdev),
1501  ql_is_link_full_dup(qdev) ? "full" : "half");
1502 
1503  } else { /* Remote error detected */
1504 
1505  if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1506  netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1507  "Remote error detected. Calling ql_port_start()\n");
1508  /*
1509  * ql_port_start() is shared code and needs
1510  * to lock the PHY on it's own.
1511  */
1512  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1513  if (ql_port_start(qdev)) /* Restart port */
1514  return -1;
1515  return 0;
1516  }
1517  }
1518  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1519  return 0;
1520 }
1521 
1522 static void ql_link_state_machine_work(struct work_struct *work)
1523 {
1524  struct ql3_adapter *qdev =
1525  container_of(work, struct ql3_adapter, link_state_work.work);
1526 
1527  u32 curr_link_state;
1528  unsigned long hw_flags;
1529 
1530  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1531 
1532  curr_link_state = ql_get_link_state(qdev);
1533 
1534  if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1535  netif_info(qdev, link, qdev->ndev,
1536  "Reset in progress, skip processing link state\n");
1537 
1538  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1539 
1540  /* Restart timer on 2 second interval. */
1541  mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1542 
1543  return;
1544  }
1545 
1546  switch (qdev->port_link_state) {
1547  default:
1548  if (test_bit(QL_LINK_MASTER, &qdev->flags))
1549  ql_port_start(qdev);
1550  qdev->port_link_state = LS_DOWN;
1551  /* Fall Through */
1552 
1553  case LS_DOWN:
1554  if (curr_link_state == LS_UP) {
1555  netif_info(qdev, link, qdev->ndev, "Link is up\n");
1556  if (ql_is_auto_neg_complete(qdev))
1557  ql_finish_auto_neg(qdev);
1558 
1559  if (qdev->port_link_state == LS_UP)
1560  ql_link_down_detect_clear(qdev);
1561 
1562  qdev->port_link_state = LS_UP;
1563  }
1564  break;
1565 
1566  case LS_UP:
1567  /*
1568  * See if the link is currently down or went down and came
1569  * back up
1570  */
1571  if (curr_link_state == LS_DOWN) {
1572  netif_info(qdev, link, qdev->ndev, "Link is down\n");
1573  qdev->port_link_state = LS_DOWN;
1574  }
1575  if (ql_link_down_detect(qdev))
1576  qdev->port_link_state = LS_DOWN;
1577  break;
1578  }
1579  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1580 
1581  /* Restart timer on 2 second interval. */
1582  mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1583 }
1584 
1585 /*
1586  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1587  */
1588 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1589 {
1590  if (ql_this_adapter_controls_port(qdev))
1591  set_bit(QL_LINK_MASTER, &qdev->flags);
1592  else
1593  clear_bit(QL_LINK_MASTER, &qdev->flags);
1594 }
1595 
1596 /*
1597  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1598  */
1599 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1600 {
1601  ql_mii_enable_scan_mode(qdev);
1602 
1603  if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1604  if (ql_this_adapter_controls_port(qdev))
1605  ql_petbi_init_ex(qdev);
1606  } else {
1607  if (ql_this_adapter_controls_port(qdev))
1608  ql_phy_init_ex(qdev);
1609  }
1610 }
1611 
1612 /*
1613  * MII_Setup needs to be called before taking the PHY out of reset
1614  * so that the management interface clock speed can be set properly.
1615  * It would be better if we had a way to disable MDC until after the
1616  * PHY is out of reset, but we don't have that capability.
1617  */
1618 static int ql_mii_setup(struct ql3_adapter *qdev)
1619 {
1620  u32 reg;
1621  struct ql3xxx_port_registers __iomem *port_regs =
1622  qdev->mem_map_registers;
1623 
1624  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1626  2) << 7))
1627  return -1;
1628 
1629  if (qdev->device_id == QL3032_DEVICE_ID)
1630  ql_write_page0_reg(qdev,
1631  &port_regs->macMIIMgmtControlReg, 0x0f00000);
1632 
1633  /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1635 
1636  ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1637  reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1638 
1639  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1640  return 0;
1641 }
1642 
1643 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
1644  SUPPORTED_FIBRE | \
1645  SUPPORTED_Autoneg)
1646 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
1647  SUPPORTED_10baseT_Full | \
1648  SUPPORTED_100baseT_Half | \
1649  SUPPORTED_100baseT_Full | \
1650  SUPPORTED_1000baseT_Half | \
1651  SUPPORTED_1000baseT_Full | \
1652  SUPPORTED_Autoneg | \
1653  SUPPORTED_TP) \
1654 
1655 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1656 {
1657  if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1658  return SUPPORTED_OPTICAL_MODES;
1659 
1660  return SUPPORTED_TP_MODES;
1661 }
1662 
1663 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1664 {
1665  int status;
1666  unsigned long hw_flags;
1667  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1668  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1670  (qdev->mac_index) * 2) << 7)) {
1671  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1672  return 0;
1673  }
1674  status = ql_is_auto_cfg(qdev);
1675  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1676  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1677  return status;
1678 }
1679 
1680 static u32 ql_get_speed(struct ql3_adapter *qdev)
1681 {
1682  u32 status;
1683  unsigned long hw_flags;
1684  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1685  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1687  (qdev->mac_index) * 2) << 7)) {
1688  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1689  return 0;
1690  }
1691  status = ql_get_link_speed(qdev);
1692  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1693  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1694  return status;
1695 }
1696 
1697 static int ql_get_full_dup(struct ql3_adapter *qdev)
1698 {
1699  int status;
1700  unsigned long hw_flags;
1701  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1702  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1704  (qdev->mac_index) * 2) << 7)) {
1705  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1706  return 0;
1707  }
1708  status = ql_is_link_full_dup(qdev);
1709  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1710  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1711  return status;
1712 }
1713 
1714 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1715 {
1716  struct ql3_adapter *qdev = netdev_priv(ndev);
1717 
1718  ecmd->transceiver = XCVR_INTERNAL;
1719  ecmd->supported = ql_supported_modes(qdev);
1720 
1721  if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1722  ecmd->port = PORT_FIBRE;
1723  } else {
1724  ecmd->port = PORT_TP;
1725  ecmd->phy_address = qdev->PHYAddr;
1726  }
1727  ecmd->advertising = ql_supported_modes(qdev);
1728  ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1729  ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1730  ecmd->duplex = ql_get_full_dup(qdev);
1731  return 0;
1732 }
1733 
1734 static void ql_get_drvinfo(struct net_device *ndev,
1735  struct ethtool_drvinfo *drvinfo)
1736 {
1737  struct ql3_adapter *qdev = netdev_priv(ndev);
1738  strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1739  strlcpy(drvinfo->version, ql3xxx_driver_version,
1740  sizeof(drvinfo->version));
1741  strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1742  sizeof(drvinfo->bus_info));
1743  drvinfo->regdump_len = 0;
1744  drvinfo->eedump_len = 0;
1745 }
1746 
1747 static u32 ql_get_msglevel(struct net_device *ndev)
1748 {
1749  struct ql3_adapter *qdev = netdev_priv(ndev);
1750  return qdev->msg_enable;
1751 }
1752 
1753 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1754 {
1755  struct ql3_adapter *qdev = netdev_priv(ndev);
1756  qdev->msg_enable = value;
1757 }
1758 
1759 static void ql_get_pauseparam(struct net_device *ndev,
1760  struct ethtool_pauseparam *pause)
1761 {
1762  struct ql3_adapter *qdev = netdev_priv(ndev);
1763  struct ql3xxx_port_registers __iomem *port_regs =
1764  qdev->mem_map_registers;
1765 
1766  u32 reg;
1767  if (qdev->mac_index == 0)
1768  reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1769  else
1770  reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1771 
1772  pause->autoneg = ql_get_auto_cfg_status(qdev);
1773  pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1774  pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1775 }
1776 
1777 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1778  .get_settings = ql_get_settings,
1779  .get_drvinfo = ql_get_drvinfo,
1780  .get_link = ethtool_op_get_link,
1781  .get_msglevel = ql_get_msglevel,
1782  .set_msglevel = ql_set_msglevel,
1783  .get_pauseparam = ql_get_pauseparam,
1784 };
1785 
1786 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1787 {
1788  struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1789  dma_addr_t map;
1790  int err;
1791 
1792  while (lrg_buf_cb) {
1793  if (!lrg_buf_cb->skb) {
1794  lrg_buf_cb->skb =
1795  netdev_alloc_skb(qdev->ndev,
1796  qdev->lrg_buffer_len);
1797  if (unlikely(!lrg_buf_cb->skb)) {
1798  netdev_printk(KERN_DEBUG, qdev->ndev,
1799  "Failed netdev_alloc_skb()\n");
1800  break;
1801  } else {
1802  /*
1803  * We save some space to copy the ethhdr from
1804  * first buffer
1805  */
1806  skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1807  map = pci_map_single(qdev->pdev,
1808  lrg_buf_cb->skb->data,
1809  qdev->lrg_buffer_len -
1812 
1813  err = pci_dma_mapping_error(qdev->pdev, map);
1814  if (err) {
1815  netdev_err(qdev->ndev,
1816  "PCI mapping failed with error: %d\n",
1817  err);
1818  dev_kfree_skb(lrg_buf_cb->skb);
1819  lrg_buf_cb->skb = NULL;
1820  break;
1821  }
1822 
1823 
1824  lrg_buf_cb->buf_phy_addr_low =
1825  cpu_to_le32(LS_64BITS(map));
1826  lrg_buf_cb->buf_phy_addr_high =
1827  cpu_to_le32(MS_64BITS(map));
1828  dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1829  dma_unmap_len_set(lrg_buf_cb, maplen,
1830  qdev->lrg_buffer_len -
1831  QL_HEADER_SPACE);
1832  --qdev->lrg_buf_skb_check;
1833  if (!qdev->lrg_buf_skb_check)
1834  return 1;
1835  }
1836  }
1837  lrg_buf_cb = lrg_buf_cb->next;
1838  }
1839  return 0;
1840 }
1841 
1842 /*
1843  * Caller holds hw_lock.
1844  */
1845 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1846 {
1847  struct ql3xxx_port_registers __iomem *port_regs =
1848  qdev->mem_map_registers;
1849 
1850  if (qdev->small_buf_release_cnt >= 16) {
1851  while (qdev->small_buf_release_cnt >= 16) {
1853 
1854  if (qdev->small_buf_q_producer_index ==
1856  qdev->small_buf_q_producer_index = 0;
1857  qdev->small_buf_release_cnt -= 8;
1858  }
1859  wmb();
1861  &port_regs->CommonRegs.rxSmallQProducerIndex);
1862  }
1863 }
1864 
1865 /*
1866  * Caller holds hw_lock.
1867  */
1868 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1869 {
1870  struct bufq_addr_element *lrg_buf_q_ele;
1871  int i;
1872  struct ql_rcv_buf_cb *lrg_buf_cb;
1873  struct ql3xxx_port_registers __iomem *port_regs =
1874  qdev->mem_map_registers;
1875 
1876  if ((qdev->lrg_buf_free_count >= 8) &&
1877  (qdev->lrg_buf_release_cnt >= 16)) {
1878 
1879  if (qdev->lrg_buf_skb_check)
1880  if (!ql_populate_free_queue(qdev))
1881  return;
1882 
1883  lrg_buf_q_ele = qdev->lrg_buf_next_free;
1884 
1885  while ((qdev->lrg_buf_release_cnt >= 16) &&
1886  (qdev->lrg_buf_free_count >= 8)) {
1887 
1888  for (i = 0; i < 8; i++) {
1889  lrg_buf_cb =
1890  ql_get_from_lrg_buf_free_list(qdev);
1891  lrg_buf_q_ele->addr_high =
1892  lrg_buf_cb->buf_phy_addr_high;
1893  lrg_buf_q_ele->addr_low =
1894  lrg_buf_cb->buf_phy_addr_low;
1895  lrg_buf_q_ele++;
1896 
1897  qdev->lrg_buf_release_cnt--;
1898  }
1899 
1900  qdev->lrg_buf_q_producer_index++;
1901 
1902  if (qdev->lrg_buf_q_producer_index ==
1903  qdev->num_lbufq_entries)
1904  qdev->lrg_buf_q_producer_index = 0;
1905 
1906  if (qdev->lrg_buf_q_producer_index ==
1907  (qdev->num_lbufq_entries - 1)) {
1908  lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1909  }
1910  }
1911  wmb();
1912  qdev->lrg_buf_next_free = lrg_buf_q_ele;
1914  &port_regs->CommonRegs.rxLargeQProducerIndex);
1915  }
1916 }
1917 
1918 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1919  struct ob_mac_iocb_rsp *mac_rsp)
1920 {
1921  struct ql_tx_buf_cb *tx_cb;
1922  int i;
1923  int retval = 0;
1924 
1925  if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1926  netdev_warn(qdev->ndev,
1927  "Frame too short but it was padded and sent\n");
1928  }
1929 
1930  tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1931 
1932  /* Check the transmit response flags for any errors */
1933  if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1934  netdev_err(qdev->ndev,
1935  "Frame too short to be legal, frame not sent\n");
1936 
1937  qdev->ndev->stats.tx_errors++;
1938  retval = -EIO;
1939  goto frame_not_sent;
1940  }
1941 
1942  if (tx_cb->seg_count == 0) {
1943  netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1944  mac_rsp->transaction_id);
1945 
1946  qdev->ndev->stats.tx_errors++;
1947  retval = -EIO;
1948  goto invalid_seg_count;
1949  }
1950 
1951  pci_unmap_single(qdev->pdev,
1952  dma_unmap_addr(&tx_cb->map[0], mapaddr),
1953  dma_unmap_len(&tx_cb->map[0], maplen),
1955  tx_cb->seg_count--;
1956  if (tx_cb->seg_count) {
1957  for (i = 1; i < tx_cb->seg_count; i++) {
1958  pci_unmap_page(qdev->pdev,
1959  dma_unmap_addr(&tx_cb->map[i],
1960  mapaddr),
1961  dma_unmap_len(&tx_cb->map[i], maplen),
1963  }
1964  }
1965  qdev->ndev->stats.tx_packets++;
1966  qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1967 
1968 frame_not_sent:
1969  dev_kfree_skb_irq(tx_cb->skb);
1970  tx_cb->skb = NULL;
1971 
1972 invalid_seg_count:
1973  atomic_inc(&qdev->tx_count);
1974 }
1975 
1976 static void ql_get_sbuf(struct ql3_adapter *qdev)
1977 {
1978  if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1979  qdev->small_buf_index = 0;
1980  qdev->small_buf_release_cnt++;
1981 }
1982 
1983 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1984 {
1985  struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1986  lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1987  qdev->lrg_buf_release_cnt++;
1988  if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1989  qdev->lrg_buf_index = 0;
1990  return lrg_buf_cb;
1991 }
1992 
1993 /*
1994  * The difference between 3022 and 3032 for inbound completions:
1995  * 3022 uses two buffers per completion. The first buffer contains
1996  * (some) header info, the second the remainder of the headers plus
1997  * the data. For this chip we reserve some space at the top of the
1998  * receive buffer so that the header info in buffer one can be
1999  * prepended to the buffer two. Buffer two is the sent up while
2000  * buffer one is returned to the hardware to be reused.
2001  * 3032 receives all of it's data and headers in one buffer for a
2002  * simpler process. 3032 also supports checksum verification as
2003  * can be seen in ql_process_macip_rx_intr().
2004  */
2005 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2006  struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2007 {
2008  struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2009  struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2010  struct sk_buff *skb;
2011  u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2012 
2013  /*
2014  * Get the inbound address list (small buffer).
2015  */
2016  ql_get_sbuf(qdev);
2017 
2018  if (qdev->device_id == QL3022_DEVICE_ID)
2019  lrg_buf_cb1 = ql_get_lbuf(qdev);
2020 
2021  /* start of second buffer */
2022  lrg_buf_cb2 = ql_get_lbuf(qdev);
2023  skb = lrg_buf_cb2->skb;
2024 
2025  qdev->ndev->stats.rx_packets++;
2026  qdev->ndev->stats.rx_bytes += length;
2027 
2028  skb_put(skb, length);
2029  pci_unmap_single(qdev->pdev,
2030  dma_unmap_addr(lrg_buf_cb2, mapaddr),
2031  dma_unmap_len(lrg_buf_cb2, maplen),
2033  prefetch(skb->data);
2034  skb_checksum_none_assert(skb);
2035  skb->protocol = eth_type_trans(skb, qdev->ndev);
2036 
2037  netif_receive_skb(skb);
2038  lrg_buf_cb2->skb = NULL;
2039 
2040  if (qdev->device_id == QL3022_DEVICE_ID)
2041  ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2042  ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2043 }
2044 
2045 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2046  struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2047 {
2048  struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2049  struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2050  struct sk_buff *skb1 = NULL, *skb2;
2051  struct net_device *ndev = qdev->ndev;
2052  u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2053  u16 size = 0;
2054 
2055  /*
2056  * Get the inbound address list (small buffer).
2057  */
2058 
2059  ql_get_sbuf(qdev);
2060 
2061  if (qdev->device_id == QL3022_DEVICE_ID) {
2062  /* start of first buffer on 3022 */
2063  lrg_buf_cb1 = ql_get_lbuf(qdev);
2064  skb1 = lrg_buf_cb1->skb;
2065  size = ETH_HLEN;
2066  if (*((u16 *) skb1->data) != 0xFFFF)
2067  size += VLAN_ETH_HLEN - ETH_HLEN;
2068  }
2069 
2070  /* start of second buffer */
2071  lrg_buf_cb2 = ql_get_lbuf(qdev);
2072  skb2 = lrg_buf_cb2->skb;
2073 
2074  skb_put(skb2, length); /* Just the second buffer length here. */
2075  pci_unmap_single(qdev->pdev,
2076  dma_unmap_addr(lrg_buf_cb2, mapaddr),
2077  dma_unmap_len(lrg_buf_cb2, maplen),
2079  prefetch(skb2->data);
2080 
2081  skb_checksum_none_assert(skb2);
2082  if (qdev->device_id == QL3022_DEVICE_ID) {
2083  /*
2084  * Copy the ethhdr from first buffer to second. This
2085  * is necessary for 3022 IP completions.
2086  */
2087  skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2088  skb_push(skb2, size), size);
2089  } else {
2090  u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2091  if (checksum &
2094  netdev_err(ndev,
2095  "%s: Bad checksum for this %s packet, checksum = %x\n",
2096  __func__,
2097  ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2098  "TCP" : "UDP"), checksum);
2099  } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2100  (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2101  !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2102  skb2->ip_summed = CHECKSUM_UNNECESSARY;
2103  }
2104  }
2105  skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2106 
2107  netif_receive_skb(skb2);
2108  ndev->stats.rx_packets++;
2109  ndev->stats.rx_bytes += length;
2110  lrg_buf_cb2->skb = NULL;
2111 
2112  if (qdev->device_id == QL3022_DEVICE_ID)
2113  ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2114  ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2115 }
2116 
2117 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2118  int *tx_cleaned, int *rx_cleaned, int work_to_do)
2119 {
2120  struct net_rsp_iocb *net_rsp;
2121  struct net_device *ndev = qdev->ndev;
2122  int work_done = 0;
2123 
2124  /* While there are entries in the completion queue. */
2125  while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2126  qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2127 
2128  net_rsp = qdev->rsp_current;
2129  rmb();
2130  /*
2131  * Fix 4032 chip's undocumented "feature" where bit-8 is set
2132  * if the inbound completion is for a VLAN.
2133  */
2134  if (qdev->device_id == QL3032_DEVICE_ID)
2135  net_rsp->opcode &= 0x7f;
2136  switch (net_rsp->opcode) {
2137 
2140  ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2141  net_rsp);
2142  (*tx_cleaned)++;
2143  break;
2144 
2145  case OPCODE_IB_MAC_IOCB:
2147  ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2148  net_rsp);
2149  (*rx_cleaned)++;
2150  break;
2151 
2152  case OPCODE_IB_IP_IOCB:
2154  ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2155  net_rsp);
2156  (*rx_cleaned)++;
2157  break;
2158  default: {
2159  u32 *tmp = (u32 *)net_rsp;
2160  netdev_err(ndev,
2161  "Hit default case, not handled!\n"
2162  " dropping the packet, opcode = %x\n"
2163  "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2164  net_rsp->opcode,
2165  (unsigned long int)tmp[0],
2166  (unsigned long int)tmp[1],
2167  (unsigned long int)tmp[2],
2168  (unsigned long int)tmp[3]);
2169  }
2170  }
2171 
2172  qdev->rsp_consumer_index++;
2173 
2174  if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2175  qdev->rsp_consumer_index = 0;
2176  qdev->rsp_current = qdev->rsp_q_virt_addr;
2177  } else {
2178  qdev->rsp_current++;
2179  }
2180 
2181  work_done = *tx_cleaned + *rx_cleaned;
2182  }
2183 
2184  return work_done;
2185 }
2186 
2187 static int ql_poll(struct napi_struct *napi, int budget)
2188 {
2189  struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2190  int rx_cleaned = 0, tx_cleaned = 0;
2191  unsigned long hw_flags;
2192  struct ql3xxx_port_registers __iomem *port_regs =
2193  qdev->mem_map_registers;
2194 
2195  ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2196 
2197  if (tx_cleaned + rx_cleaned != budget) {
2198  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2199  __napi_complete(napi);
2200  ql_update_small_bufq_prod_index(qdev);
2201  ql_update_lrg_bufq_prod_index(qdev);
2202  writel(qdev->rsp_consumer_index,
2203  &port_regs->CommonRegs.rspQConsumerIndex);
2204  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2205 
2206  ql_enable_interrupts(qdev);
2207  }
2208  return tx_cleaned + rx_cleaned;
2209 }
2210 
2211 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2212 {
2213 
2214  struct net_device *ndev = dev_id;
2215  struct ql3_adapter *qdev = netdev_priv(ndev);
2216  struct ql3xxx_port_registers __iomem *port_regs =
2217  qdev->mem_map_registers;
2218  u32 value;
2219  int handled = 1;
2220  u32 var;
2221 
2222  value = ql_read_common_reg_l(qdev,
2223  &port_regs->CommonRegs.ispControlStatus);
2224 
2225  if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2226  spin_lock(&qdev->adapter_lock);
2227  netif_stop_queue(qdev->ndev);
2228  netif_carrier_off(qdev->ndev);
2229  ql_disable_interrupts(qdev);
2230  qdev->port_link_state = LS_DOWN;
2231  set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2232 
2233  if (value & ISP_CONTROL_FE) {
2234  /*
2235  * Chip Fatal Error.
2236  */
2237  var =
2238  ql_read_page0_reg_l(qdev,
2239  &port_regs->PortFatalErrStatus);
2240  netdev_warn(ndev,
2241  "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2242  var);
2243  set_bit(QL_RESET_START, &qdev->flags) ;
2244  } else {
2245  /*
2246  * Soft Reset Requested.
2247  */
2248  set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2249  netdev_err(ndev,
2250  "Another function issued a reset to the chip. ISR value = %x\n",
2251  value);
2252  }
2253  queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2254  spin_unlock(&qdev->adapter_lock);
2255  } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2256  ql_disable_interrupts(qdev);
2257  if (likely(napi_schedule_prep(&qdev->napi)))
2258  __napi_schedule(&qdev->napi);
2259  } else
2260  return IRQ_NONE;
2261 
2262  return IRQ_RETVAL(handled);
2263 }
2264 
2265 /*
2266  * Get the total number of segments needed for the given number of fragments.
2267  * This is necessary because outbound address lists (OAL) will be used when
2268  * more than two frags are given. Each address list has 5 addr/len pairs.
2269  * The 5th pair in each OAL is used to point to the next OAL if more frags
2270  * are coming. That is why the frags:segment count ratio is not linear.
2271  */
2272 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2273 {
2274  if (qdev->device_id == QL3022_DEVICE_ID)
2275  return 1;
2276 
2277  if (frags <= 2)
2278  return frags + 1;
2279  else if (frags <= 6)
2280  return frags + 2;
2281  else if (frags <= 10)
2282  return frags + 3;
2283  else if (frags <= 14)
2284  return frags + 4;
2285  else if (frags <= 18)
2286  return frags + 5;
2287  return -1;
2288 }
2289 
2290 static void ql_hw_csum_setup(const struct sk_buff *skb,
2291  struct ob_mac_iocb_req *mac_iocb_ptr)
2292 {
2293  const struct iphdr *ip = ip_hdr(skb);
2294 
2295  mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2296  mac_iocb_ptr->ip_hdr_len = ip->ihl;
2297 
2298  if (ip->protocol == IPPROTO_TCP) {
2299  mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2301  } else {
2302  mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2304  }
2305 
2306 }
2307 
2308 /*
2309  * Map the buffers for this transmit.
2310  * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2311  */
2312 static int ql_send_map(struct ql3_adapter *qdev,
2313  struct ob_mac_iocb_req *mac_iocb_ptr,
2314  struct ql_tx_buf_cb *tx_cb,
2315  struct sk_buff *skb)
2316 {
2317  struct oal *oal;
2318  struct oal_entry *oal_entry;
2319  int len = skb_headlen(skb);
2320  dma_addr_t map;
2321  int err;
2322  int completed_segs, i;
2323  int seg_cnt, seg = 0;
2324  int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2325 
2326  seg_cnt = tx_cb->seg_count;
2327  /*
2328  * Map the skb buffer first.
2329  */
2330  map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2331 
2332  err = pci_dma_mapping_error(qdev->pdev, map);
2333  if (err) {
2334  netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2335  err);
2336 
2337  return NETDEV_TX_BUSY;
2338  }
2339 
2340  oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2341  oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2342  oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2343  oal_entry->len = cpu_to_le32(len);
2344  dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2345  dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2346  seg++;
2347 
2348  if (seg_cnt == 1) {
2349  /* Terminate the last segment. */
2350  oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2351  return NETDEV_TX_OK;
2352  }
2353  oal = tx_cb->oal;
2354  for (completed_segs = 0;
2355  completed_segs < frag_cnt;
2356  completed_segs++, seg++) {
2357  skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2358  oal_entry++;
2359  /*
2360  * Check for continuation requirements.
2361  * It's strange but necessary.
2362  * Continuation entry points to outbound address list.
2363  */
2364  if ((seg == 2 && seg_cnt > 3) ||
2365  (seg == 7 && seg_cnt > 8) ||
2366  (seg == 12 && seg_cnt > 13) ||
2367  (seg == 17 && seg_cnt > 18)) {
2368  map = pci_map_single(qdev->pdev, oal,
2369  sizeof(struct oal),
2371 
2372  err = pci_dma_mapping_error(qdev->pdev, map);
2373  if (err) {
2374  netdev_err(qdev->ndev,
2375  "PCI mapping outbound address list with error: %d\n",
2376  err);
2377  goto map_error;
2378  }
2379 
2380  oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2381  oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2382  oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2383  OAL_CONT_ENTRY);
2384  dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2385  dma_unmap_len_set(&tx_cb->map[seg], maplen,
2386  sizeof(struct oal));
2387  oal_entry = (struct oal_entry *)oal;
2388  oal++;
2389  seg++;
2390  }
2391 
2392  map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2393  DMA_TO_DEVICE);
2394 
2395  err = dma_mapping_error(&qdev->pdev->dev, map);
2396  if (err) {
2397  netdev_err(qdev->ndev,
2398  "PCI mapping frags failed with error: %d\n",
2399  err);
2400  goto map_error;
2401  }
2402 
2403  oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2404  oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2405  oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2406  dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2407  dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2408  }
2409  /* Terminate the last segment. */
2410  oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2411  return NETDEV_TX_OK;
2412 
2413 map_error:
2414  /* A PCI mapping failed and now we will need to back out
2415  * We need to traverse through the oal's and associated pages which
2416  * have been mapped and now we must unmap them to clean up properly
2417  */
2418 
2419  seg = 1;
2420  oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2421  oal = tx_cb->oal;
2422  for (i = 0; i < completed_segs; i++, seg++) {
2423  oal_entry++;
2424 
2425  /*
2426  * Check for continuation requirements.
2427  * It's strange but necessary.
2428  */
2429 
2430  if ((seg == 2 && seg_cnt > 3) ||
2431  (seg == 7 && seg_cnt > 8) ||
2432  (seg == 12 && seg_cnt > 13) ||
2433  (seg == 17 && seg_cnt > 18)) {
2434  pci_unmap_single(qdev->pdev,
2435  dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2436  dma_unmap_len(&tx_cb->map[seg], maplen),
2438  oal++;
2439  seg++;
2440  }
2441 
2442  pci_unmap_page(qdev->pdev,
2443  dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2444  dma_unmap_len(&tx_cb->map[seg], maplen),
2446  }
2447 
2448  pci_unmap_single(qdev->pdev,
2449  dma_unmap_addr(&tx_cb->map[0], mapaddr),
2450  dma_unmap_addr(&tx_cb->map[0], maplen),
2452 
2453  return NETDEV_TX_BUSY;
2454 
2455 }
2456 
2457 /*
2458  * The difference between 3022 and 3032 sends:
2459  * 3022 only supports a simple single segment transmission.
2460  * 3032 supports checksumming and scatter/gather lists (fragments).
2461  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2462  * in the IOCB plus a chain of outbound address lists (OAL) that
2463  * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2464  * will be used to point to an OAL when more ALP entries are required.
2465  * The IOCB is always the top of the chain followed by one or more
2466  * OALs (when necessary).
2467  */
2468 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2469  struct net_device *ndev)
2470 {
2471  struct ql3_adapter *qdev = netdev_priv(ndev);
2472  struct ql3xxx_port_registers __iomem *port_regs =
2473  qdev->mem_map_registers;
2474  struct ql_tx_buf_cb *tx_cb;
2475  u32 tot_len = skb->len;
2476  struct ob_mac_iocb_req *mac_iocb_ptr;
2477 
2478  if (unlikely(atomic_read(&qdev->tx_count) < 2))
2479  return NETDEV_TX_BUSY;
2480 
2481  tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2482  tx_cb->seg_count = ql_get_seg_count(qdev,
2483  skb_shinfo(skb)->nr_frags);
2484  if (tx_cb->seg_count == -1) {
2485  netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2486  return NETDEV_TX_OK;
2487  }
2488 
2489  mac_iocb_ptr = tx_cb->queue_entry;
2490  memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2491  mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2492  mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2493  mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2494  mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2495  mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2496  tx_cb->skb = skb;
2497  if (qdev->device_id == QL3032_DEVICE_ID &&
2498  skb->ip_summed == CHECKSUM_PARTIAL)
2499  ql_hw_csum_setup(skb, mac_iocb_ptr);
2500 
2501  if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2502  netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2503  return NETDEV_TX_BUSY;
2504  }
2505 
2506  wmb();
2507  qdev->req_producer_index++;
2509  qdev->req_producer_index = 0;
2510  wmb();
2511  ql_write_common_reg_l(qdev,
2512  &port_regs->CommonRegs.reqQProducerIndex,
2513  qdev->req_producer_index);
2514 
2515  netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2516  "tx queued, slot %d, len %d\n",
2517  qdev->req_producer_index, skb->len);
2518 
2519  atomic_dec(&qdev->tx_count);
2520  return NETDEV_TX_OK;
2521 }
2522 
2523 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2524 {
2525  qdev->req_q_size =
2526  (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2527 
2528  qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2529 
2530  /* The barrier is required to ensure request and response queue
2531  * addr writes to the registers.
2532  */
2533  wmb();
2534 
2535  qdev->req_q_virt_addr =
2536  pci_alloc_consistent(qdev->pdev,
2537  (size_t) qdev->req_q_size,
2538  &qdev->req_q_phy_addr);
2539 
2540  if ((qdev->req_q_virt_addr == NULL) ||
2541  LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2542  netdev_err(qdev->ndev, "reqQ failed\n");
2543  return -ENOMEM;
2544  }
2545 
2546  qdev->rsp_q_virt_addr =
2547  pci_alloc_consistent(qdev->pdev,
2548  (size_t) qdev->rsp_q_size,
2549  &qdev->rsp_q_phy_addr);
2550 
2551  if ((qdev->rsp_q_virt_addr == NULL) ||
2552  LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2553  netdev_err(qdev->ndev, "rspQ allocation failed\n");
2554  pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2555  qdev->req_q_virt_addr,
2556  qdev->req_q_phy_addr);
2557  return -ENOMEM;
2558  }
2559 
2561 
2562  return 0;
2563 }
2564 
2565 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2566 {
2567  if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2568  netdev_info(qdev->ndev, "Already done\n");
2569  return;
2570  }
2571 
2572  pci_free_consistent(qdev->pdev,
2573  qdev->req_q_size,
2574  qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2575 
2576  qdev->req_q_virt_addr = NULL;
2577 
2578  pci_free_consistent(qdev->pdev,
2579  qdev->rsp_q_size,
2580  qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2581 
2582  qdev->rsp_q_virt_addr = NULL;
2583 
2585 }
2586 
2587 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2588 {
2589  /* Create Large Buffer Queue */
2590  qdev->lrg_buf_q_size =
2591  qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2592  if (qdev->lrg_buf_q_size < PAGE_SIZE)
2594  else
2595  qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2596 
2597  qdev->lrg_buf =
2598  kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
2599  GFP_KERNEL);
2600  if (qdev->lrg_buf == NULL) {
2601  netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2602  return -ENOMEM;
2603  }
2604 
2606  pci_alloc_consistent(qdev->pdev,
2607  qdev->lrg_buf_q_alloc_size,
2608  &qdev->lrg_buf_q_alloc_phy_addr);
2609 
2610  if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2611  netdev_err(qdev->ndev, "lBufQ failed\n");
2612  return -ENOMEM;
2613  }
2616 
2617  /* Create Small Buffer Queue */
2618  qdev->small_buf_q_size =
2619  NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2620  if (qdev->small_buf_q_size < PAGE_SIZE)
2622  else
2623  qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2624 
2626  pci_alloc_consistent(qdev->pdev,
2627  qdev->small_buf_q_alloc_size,
2629 
2630  if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2631  netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2634  qdev->lrg_buf_q_alloc_phy_addr);
2635  return -ENOMEM;
2636  }
2637 
2641  return 0;
2642 }
2643 
2644 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2645 {
2646  if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2647  netdev_info(qdev->ndev, "Already done\n");
2648  return;
2649  }
2650  kfree(qdev->lrg_buf);
2651  pci_free_consistent(qdev->pdev,
2652  qdev->lrg_buf_q_alloc_size,
2654  qdev->lrg_buf_q_alloc_phy_addr);
2655 
2656  qdev->lrg_buf_q_virt_addr = NULL;
2657 
2658  pci_free_consistent(qdev->pdev,
2659  qdev->small_buf_q_alloc_size,
2662 
2663  qdev->small_buf_q_virt_addr = NULL;
2664 
2666 }
2667 
2668 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2669 {
2670  int i;
2671  struct bufq_addr_element *small_buf_q_entry;
2672 
2673  /* Currently we allocate on one of memory and use it for smallbuffers */
2674  qdev->small_buf_total_size =
2677 
2678  qdev->small_buf_virt_addr =
2679  pci_alloc_consistent(qdev->pdev,
2680  qdev->small_buf_total_size,
2681  &qdev->small_buf_phy_addr);
2682 
2683  if (qdev->small_buf_virt_addr == NULL) {
2684  netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2685  return -ENOMEM;
2686  }
2687 
2690 
2691  small_buf_q_entry = qdev->small_buf_q_virt_addr;
2692 
2693  /* Initialize the small buffer queue. */
2694  for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2695  small_buf_q_entry->addr_high =
2697  small_buf_q_entry->addr_low =
2699  (i * QL_SMALL_BUFFER_SIZE));
2700  small_buf_q_entry++;
2701  }
2702  qdev->small_buf_index = 0;
2704  return 0;
2705 }
2706 
2707 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2708 {
2709  if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2710  netdev_info(qdev->ndev, "Already done\n");
2711  return;
2712  }
2713  if (qdev->small_buf_virt_addr != NULL) {
2714  pci_free_consistent(qdev->pdev,
2715  qdev->small_buf_total_size,
2716  qdev->small_buf_virt_addr,
2717  qdev->small_buf_phy_addr);
2718 
2719  qdev->small_buf_virt_addr = NULL;
2720  }
2721 }
2722 
2723 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2724 {
2725  int i = 0;
2726  struct ql_rcv_buf_cb *lrg_buf_cb;
2727 
2728  for (i = 0; i < qdev->num_large_buffers; i++) {
2729  lrg_buf_cb = &qdev->lrg_buf[i];
2730  if (lrg_buf_cb->skb) {
2731  dev_kfree_skb(lrg_buf_cb->skb);
2732  pci_unmap_single(qdev->pdev,
2733  dma_unmap_addr(lrg_buf_cb, mapaddr),
2734  dma_unmap_len(lrg_buf_cb, maplen),
2736  memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2737  } else {
2738  break;
2739  }
2740  }
2741 }
2742 
2743 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2744 {
2745  int i;
2746  struct ql_rcv_buf_cb *lrg_buf_cb;
2747  struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2748 
2749  for (i = 0; i < qdev->num_large_buffers; i++) {
2750  lrg_buf_cb = &qdev->lrg_buf[i];
2751  buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2752  buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2753  buf_addr_ele++;
2754  }
2755  qdev->lrg_buf_index = 0;
2756  qdev->lrg_buf_skb_check = 0;
2757 }
2758 
2759 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2760 {
2761  int i;
2762  struct ql_rcv_buf_cb *lrg_buf_cb;
2763  struct sk_buff *skb;
2764  dma_addr_t map;
2765  int err;
2766 
2767  for (i = 0; i < qdev->num_large_buffers; i++) {
2768  skb = netdev_alloc_skb(qdev->ndev,
2769  qdev->lrg_buffer_len);
2770  if (unlikely(!skb)) {
2771  /* Better luck next round */
2772  netdev_err(qdev->ndev,
2773  "large buff alloc failed for %d bytes at index %d\n",
2774  qdev->lrg_buffer_len * 2, i);
2775  ql_free_large_buffers(qdev);
2776  return -ENOMEM;
2777  } else {
2778 
2779  lrg_buf_cb = &qdev->lrg_buf[i];
2780  memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2781  lrg_buf_cb->index = i;
2782  lrg_buf_cb->skb = skb;
2783  /*
2784  * We save some space to copy the ethhdr from first
2785  * buffer
2786  */
2787  skb_reserve(skb, QL_HEADER_SPACE);
2788  map = pci_map_single(qdev->pdev,
2789  skb->data,
2790  qdev->lrg_buffer_len -
2793 
2794  err = pci_dma_mapping_error(qdev->pdev, map);
2795  if (err) {
2796  netdev_err(qdev->ndev,
2797  "PCI mapping failed with error: %d\n",
2798  err);
2799  ql_free_large_buffers(qdev);
2800  return -ENOMEM;
2801  }
2802 
2803  dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2804  dma_unmap_len_set(lrg_buf_cb, maplen,
2805  qdev->lrg_buffer_len -
2806  QL_HEADER_SPACE);
2807  lrg_buf_cb->buf_phy_addr_low =
2808  cpu_to_le32(LS_64BITS(map));
2809  lrg_buf_cb->buf_phy_addr_high =
2810  cpu_to_le32(MS_64BITS(map));
2811  }
2812  }
2813  return 0;
2814 }
2815 
2816 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2817 {
2818  struct ql_tx_buf_cb *tx_cb;
2819  int i;
2820 
2821  tx_cb = &qdev->tx_buf[0];
2822  for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2823  kfree(tx_cb->oal);
2824  tx_cb->oal = NULL;
2825  tx_cb++;
2826  }
2827 }
2828 
2829 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2830 {
2831  struct ql_tx_buf_cb *tx_cb;
2832  int i;
2833  struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2834 
2835  /* Create free list of transmit buffers */
2836  for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2837 
2838  tx_cb = &qdev->tx_buf[i];
2839  tx_cb->skb = NULL;
2840  tx_cb->queue_entry = req_q_curr;
2841  req_q_curr++;
2842  tx_cb->oal = kmalloc(512, GFP_KERNEL);
2843  if (tx_cb->oal == NULL)
2844  return -ENOMEM;
2845  }
2846  return 0;
2847 }
2848 
2849 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2850 {
2851  if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2854  } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2855  /*
2856  * Bigger buffers, so less of them.
2857  */
2860  } else {
2861  netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
2862  qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2863  return -ENOMEM;
2864  }
2865  qdev->num_large_buffers =
2868  qdev->max_frame_size =
2870 
2871  /*
2872  * First allocate a page of shared memory and use it for shadow
2873  * locations of Network Request Queue Consumer Address Register and
2874  * Network Completion Queue Producer Index Register
2875  */
2876  qdev->shadow_reg_virt_addr =
2877  pci_alloc_consistent(qdev->pdev,
2878  PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2879 
2880  if (qdev->shadow_reg_virt_addr != NULL) {
2886 
2887  qdev->prsp_producer_index =
2888  (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2893  } else {
2894  netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2895  return -ENOMEM;
2896  }
2897 
2898  if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2899  netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2900  goto err_req_rsp;
2901  }
2902 
2903  if (ql_alloc_buffer_queues(qdev) != 0) {
2904  netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2905  goto err_buffer_queues;
2906  }
2907 
2908  if (ql_alloc_small_buffers(qdev) != 0) {
2909  netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2910  goto err_small_buffers;
2911  }
2912 
2913  if (ql_alloc_large_buffers(qdev) != 0) {
2914  netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2915  goto err_small_buffers;
2916  }
2917 
2918  /* Initialize the large buffer queue. */
2919  ql_init_large_buffers(qdev);
2920  if (ql_create_send_free_list(qdev))
2921  goto err_free_list;
2922 
2923  qdev->rsp_current = qdev->rsp_q_virt_addr;
2924 
2925  return 0;
2926 err_free_list:
2927  ql_free_send_free_list(qdev);
2928 err_small_buffers:
2929  ql_free_buffer_queues(qdev);
2930 err_buffer_queues:
2931  ql_free_net_req_rsp_queues(qdev);
2932 err_req_rsp:
2933  pci_free_consistent(qdev->pdev,
2934  PAGE_SIZE,
2935  qdev->shadow_reg_virt_addr,
2936  qdev->shadow_reg_phy_addr);
2937 
2938  return -ENOMEM;
2939 }
2940 
2941 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2942 {
2943  ql_free_send_free_list(qdev);
2944  ql_free_large_buffers(qdev);
2945  ql_free_small_buffers(qdev);
2946  ql_free_buffer_queues(qdev);
2947  ql_free_net_req_rsp_queues(qdev);
2948  if (qdev->shadow_reg_virt_addr != NULL) {
2949  pci_free_consistent(qdev->pdev,
2950  PAGE_SIZE,
2951  qdev->shadow_reg_virt_addr,
2952  qdev->shadow_reg_phy_addr);
2953  qdev->shadow_reg_virt_addr = NULL;
2954  }
2955 }
2956 
2957 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2958 {
2959  struct ql3xxx_local_ram_registers __iomem *local_ram =
2960  (void __iomem *)qdev->mem_map_registers;
2961 
2962  if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2964  2) << 4))
2965  return -1;
2966 
2967  ql_write_page2_reg(qdev,
2968  &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2969 
2970  ql_write_page2_reg(qdev,
2971  &local_ram->maxBufletCount,
2972  qdev->nvram_data.bufletCount);
2973 
2974  ql_write_page2_reg(qdev,
2975  &local_ram->freeBufletThresholdLow,
2976  (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2977  (qdev->nvram_data.tcpWindowThreshold0));
2978 
2979  ql_write_page2_reg(qdev,
2980  &local_ram->freeBufletThresholdHigh,
2981  qdev->nvram_data.tcpWindowThreshold50);
2982 
2983  ql_write_page2_reg(qdev,
2984  &local_ram->ipHashTableBase,
2985  (qdev->nvram_data.ipHashTableBaseHi << 16) |
2986  qdev->nvram_data.ipHashTableBaseLo);
2987  ql_write_page2_reg(qdev,
2988  &local_ram->ipHashTableCount,
2989  qdev->nvram_data.ipHashTableSize);
2990  ql_write_page2_reg(qdev,
2991  &local_ram->tcpHashTableBase,
2992  (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2993  qdev->nvram_data.tcpHashTableBaseLo);
2994  ql_write_page2_reg(qdev,
2995  &local_ram->tcpHashTableCount,
2996  qdev->nvram_data.tcpHashTableSize);
2997  ql_write_page2_reg(qdev,
2998  &local_ram->ncbBase,
2999  (qdev->nvram_data.ncbTableBaseHi << 16) |
3000  qdev->nvram_data.ncbTableBaseLo);
3001  ql_write_page2_reg(qdev,
3002  &local_ram->maxNcbCount,
3003  qdev->nvram_data.ncbTableSize);
3004  ql_write_page2_reg(qdev,
3005  &local_ram->drbBase,
3006  (qdev->nvram_data.drbTableBaseHi << 16) |
3007  qdev->nvram_data.drbTableBaseLo);
3008  ql_write_page2_reg(qdev,
3009  &local_ram->maxDrbCount,
3010  qdev->nvram_data.drbTableSize);
3011  ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3012  return 0;
3013 }
3014 
3015 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3016 {
3017  u32 value;
3018  struct ql3xxx_port_registers __iomem *port_regs =
3019  qdev->mem_map_registers;
3020  __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3021  struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3022  (void __iomem *)port_regs;
3023  u32 delay = 10;
3024  int status = 0;
3025 
3026  if (ql_mii_setup(qdev))
3027  return -1;
3028 
3029  /* Bring out PHY out of reset */
3030  ql_write_common_reg(qdev, spir,
3032  (ISP_SERIAL_PORT_IF_WE << 16)));
3033  /* Give the PHY time to come out of reset. */
3034  mdelay(100);
3035  qdev->port_link_state = LS_DOWN;
3036  netif_carrier_off(qdev->ndev);
3037 
3038  /* V2 chip fix for ARS-39168. */
3039  ql_write_common_reg(qdev, spir,
3041  (ISP_SERIAL_PORT_IF_SDE << 16)));
3042 
3043  /* Request Queue Registers */
3044  *((u32 *)(qdev->preq_consumer_index)) = 0;
3045  atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3046  qdev->req_producer_index = 0;
3047 
3048  ql_write_page1_reg(qdev,
3049  &hmem_regs->reqConsumerIndexAddrHigh,
3051  ql_write_page1_reg(qdev,
3052  &hmem_regs->reqConsumerIndexAddrLow,
3054 
3055  ql_write_page1_reg(qdev,
3056  &hmem_regs->reqBaseAddrHigh,
3057  MS_64BITS(qdev->req_q_phy_addr));
3058  ql_write_page1_reg(qdev,
3059  &hmem_regs->reqBaseAddrLow,
3060  LS_64BITS(qdev->req_q_phy_addr));
3061  ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3062 
3063  /* Response Queue Registers */
3064  *((__le16 *) (qdev->prsp_producer_index)) = 0;
3065  qdev->rsp_consumer_index = 0;
3066  qdev->rsp_current = qdev->rsp_q_virt_addr;
3067 
3068  ql_write_page1_reg(qdev,
3069  &hmem_regs->rspProducerIndexAddrHigh,
3071 
3072  ql_write_page1_reg(qdev,
3073  &hmem_regs->rspProducerIndexAddrLow,
3075 
3076  ql_write_page1_reg(qdev,
3077  &hmem_regs->rspBaseAddrHigh,
3078  MS_64BITS(qdev->rsp_q_phy_addr));
3079 
3080  ql_write_page1_reg(qdev,
3081  &hmem_regs->rspBaseAddrLow,
3082  LS_64BITS(qdev->rsp_q_phy_addr));
3083 
3084  ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3085 
3086  /* Large Buffer Queue */
3087  ql_write_page1_reg(qdev,
3088  &hmem_regs->rxLargeQBaseAddrHigh,
3089  MS_64BITS(qdev->lrg_buf_q_phy_addr));
3090 
3091  ql_write_page1_reg(qdev,
3092  &hmem_regs->rxLargeQBaseAddrLow,
3093  LS_64BITS(qdev->lrg_buf_q_phy_addr));
3094 
3095  ql_write_page1_reg(qdev,
3096  &hmem_regs->rxLargeQLength,
3097  qdev->num_lbufq_entries);
3098 
3099  ql_write_page1_reg(qdev,
3100  &hmem_regs->rxLargeBufferLength,
3101  qdev->lrg_buffer_len);
3102 
3103  /* Small Buffer Queue */
3104  ql_write_page1_reg(qdev,
3105  &hmem_regs->rxSmallQBaseAddrHigh,
3107 
3108  ql_write_page1_reg(qdev,
3109  &hmem_regs->rxSmallQBaseAddrLow,
3111 
3112  ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3113  ql_write_page1_reg(qdev,
3114  &hmem_regs->rxSmallBufferLength,
3116 
3118  qdev->small_buf_release_cnt = 8;
3119  qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3120  qdev->lrg_buf_release_cnt = 8;
3121  qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3122  qdev->small_buf_index = 0;
3123  qdev->lrg_buf_index = 0;
3124  qdev->lrg_buf_free_count = 0;
3125  qdev->lrg_buf_free_head = NULL;
3126  qdev->lrg_buf_free_tail = NULL;
3127 
3128  ql_write_common_reg(qdev,
3129  &port_regs->CommonRegs.
3130  rxSmallQProducerIndex,
3132  ql_write_common_reg(qdev,
3133  &port_regs->CommonRegs.
3134  rxLargeQProducerIndex,
3135  qdev->lrg_buf_q_producer_index);
3136 
3137  /*
3138  * Find out if the chip has already been initialized. If it has, then
3139  * we skip some of the initialization.
3140  */
3141  clear_bit(QL_LINK_MASTER, &qdev->flags);
3142  value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3143  if ((value & PORT_STATUS_IC) == 0) {
3144 
3145  /* Chip has not been configured yet, so let it rip. */
3146  if (ql_init_misc_registers(qdev)) {
3147  status = -1;
3148  goto out;
3149  }
3150 
3151  value = qdev->nvram_data.tcpMaxWindowSize;
3152  ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3153 
3154  value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3155 
3156  if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3158  * 2) << 13)) {
3159  status = -1;
3160  goto out;
3161  }
3162  ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3163  ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3165  16) | (INTERNAL_CHIP_SD |
3166  INTERNAL_CHIP_WE)));
3167  ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3168  }
3169 
3170  if (qdev->mac_index)
3171  ql_write_page0_reg(qdev,
3172  &port_regs->mac1MaxFrameLengthReg,
3173  qdev->max_frame_size);
3174  else
3175  ql_write_page0_reg(qdev,
3176  &port_regs->mac0MaxFrameLengthReg,
3177  qdev->max_frame_size);
3178 
3179  if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3181  2) << 7)) {
3182  status = -1;
3183  goto out;
3184  }
3185 
3186  PHY_Setup(qdev);
3187  ql_init_scan_mode(qdev);
3188  ql_get_phy_owner(qdev);
3189 
3190  /* Load the MAC Configuration */
3191 
3192  /* Program lower 32 bits of the MAC address */
3193  ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3195  ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3196  ((qdev->ndev->dev_addr[2] << 24)
3197  | (qdev->ndev->dev_addr[3] << 16)
3198  | (qdev->ndev->dev_addr[4] << 8)
3199  | qdev->ndev->dev_addr[5]));
3200 
3201  /* Program top 16 bits of the MAC address */
3202  ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3203  ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3204  ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3205  ((qdev->ndev->dev_addr[0] << 8)
3206  | qdev->ndev->dev_addr[1]));
3207 
3208  /* Enable Primary MAC */
3209  ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3210  ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3212 
3213  /* Clear Primary and Secondary IP addresses */
3214  ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3215  ((IP_ADDR_INDEX_REG_MASK << 16) |
3216  (qdev->mac_index << 2)));
3217  ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3218 
3219  ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3220  ((IP_ADDR_INDEX_REG_MASK << 16) |
3221  ((qdev->mac_index << 2) + 1)));
3222  ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3223 
3224  ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3225 
3226  /* Indicate Configuration Complete */
3227  ql_write_page0_reg(qdev,
3228  &port_regs->portControl,
3229  ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3230 
3231  do {
3232  value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3233  if (value & PORT_STATUS_IC)
3234  break;
3235  spin_unlock_irq(&qdev->hw_lock);
3236  msleep(500);
3237  spin_lock_irq(&qdev->hw_lock);
3238  } while (--delay);
3239 
3240  if (delay == 0) {
3241  netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3242  status = -1;
3243  goto out;
3244  }
3245 
3246  /* Enable Ethernet Function */
3247  if (qdev->device_id == QL3032_DEVICE_ID) {
3248  value =
3252  ql_write_page0_reg(qdev, &port_regs->functionControl,
3253  ((value << 16) | value));
3254  } else {
3255  value =
3257  PORT_CONTROL_HH);
3258  ql_write_page0_reg(qdev, &port_regs->portControl,
3259  ((value << 16) | value));
3260  }
3261 
3262 
3263 out:
3264  return status;
3265 }
3266 
3267 /*
3268  * Caller holds hw_lock.
3269  */
3270 static int ql_adapter_reset(struct ql3_adapter *qdev)
3271 {
3272  struct ql3xxx_port_registers __iomem *port_regs =
3273  qdev->mem_map_registers;
3274  int status = 0;
3275  u16 value;
3276  int max_wait_time;
3277 
3278  set_bit(QL_RESET_ACTIVE, &qdev->flags);
3279  clear_bit(QL_RESET_DONE, &qdev->flags);
3280 
3281  /*
3282  * Issue soft reset to chip.
3283  */
3284  netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3285  ql_write_common_reg(qdev,
3286  &port_regs->CommonRegs.ispControlStatus,
3287  ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3288 
3289  /* Wait 3 seconds for reset to complete. */
3290  netdev_printk(KERN_DEBUG, qdev->ndev,
3291  "Wait 10 milliseconds for reset to complete\n");
3292 
3293  /* Wait until the firmware tells us the Soft Reset is done */
3294  max_wait_time = 5;
3295  do {
3296  value =
3297  ql_read_common_reg(qdev,
3298  &port_regs->CommonRegs.ispControlStatus);
3299  if ((value & ISP_CONTROL_SR) == 0)
3300  break;
3301 
3302  ssleep(1);
3303  } while ((--max_wait_time));
3304 
3305  /*
3306  * Also, make sure that the Network Reset Interrupt bit has been
3307  * cleared after the soft reset has taken place.
3308  */
3309  value =
3310  ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3311  if (value & ISP_CONTROL_RI) {
3312  netdev_printk(KERN_DEBUG, qdev->ndev,
3313  "clearing RI after reset\n");
3314  ql_write_common_reg(qdev,
3315  &port_regs->CommonRegs.
3316  ispControlStatus,
3317  ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3318  }
3319 
3320  if (max_wait_time == 0) {
3321  /* Issue Force Soft Reset */
3322  ql_write_common_reg(qdev,
3323  &port_regs->CommonRegs.
3324  ispControlStatus,
3325  ((ISP_CONTROL_FSR << 16) |
3326  ISP_CONTROL_FSR));
3327  /*
3328  * Wait until the firmware tells us the Force Soft Reset is
3329  * done
3330  */
3331  max_wait_time = 5;
3332  do {
3333  value = ql_read_common_reg(qdev,
3334  &port_regs->CommonRegs.
3335  ispControlStatus);
3336  if ((value & ISP_CONTROL_FSR) == 0)
3337  break;
3338  ssleep(1);
3339  } while ((--max_wait_time));
3340  }
3341  if (max_wait_time == 0)
3342  status = 1;
3343 
3344  clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3345  set_bit(QL_RESET_DONE, &qdev->flags);
3346  return status;
3347 }
3348 
3349 static void ql_set_mac_info(struct ql3_adapter *qdev)
3350 {
3351  struct ql3xxx_port_registers __iomem *port_regs =
3352  qdev->mem_map_registers;
3354  u8 func_number;
3355 
3356  /* Get the function number */
3357  value =
3358  ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3359  func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3360  port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3361  switch (value & ISP_CONTROL_FN_MASK) {
3362  case ISP_CONTROL_FN0_NET:
3363  qdev->mac_index = 0;
3364  qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3365  qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3366  qdev->PHYAddr = PORT0_PHY_ADDRESS;
3367  if (port_status & PORT_STATUS_SM0)
3368  set_bit(QL_LINK_OPTICAL, &qdev->flags);
3369  else
3370  clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3371  break;
3372 
3373  case ISP_CONTROL_FN1_NET:
3374  qdev->mac_index = 1;
3375  qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3376  qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3377  qdev->PHYAddr = PORT1_PHY_ADDRESS;
3378  if (port_status & PORT_STATUS_SM1)
3379  set_bit(QL_LINK_OPTICAL, &qdev->flags);
3380  else
3381  clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3382  break;
3383 
3384  case ISP_CONTROL_FN0_SCSI:
3385  case ISP_CONTROL_FN1_SCSI:
3386  default:
3387  netdev_printk(KERN_DEBUG, qdev->ndev,
3388  "Invalid function number, ispControlStatus = 0x%x\n",
3389  value);
3390  break;
3391  }
3392  qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3393 }
3394 
3395 static void ql_display_dev_info(struct net_device *ndev)
3396 {
3397  struct ql3_adapter *qdev = netdev_priv(ndev);
3398  struct pci_dev *pdev = qdev->pdev;
3399 
3400  netdev_info(ndev,
3401  "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3402  DRV_NAME, qdev->index, qdev->chip_rev_id,
3403  qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3404  qdev->pci_slot);
3405  netdev_info(ndev, "%s Interface\n",
3406  test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3407 
3408  /*
3409  * Print PCI bus width/type.
3410  */
3411  netdev_info(ndev, "Bus interface is %s %s\n",
3412  ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3413  ((qdev->pci_x) ? "PCI-X" : "PCI"));
3414 
3415  netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
3416  qdev->mem_map_registers);
3417  netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3418 
3419  netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3420 }
3421 
3422 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3423 {
3424  struct net_device *ndev = qdev->ndev;
3425  int retval = 0;
3426 
3427  netif_stop_queue(ndev);
3428  netif_carrier_off(ndev);
3429 
3430  clear_bit(QL_ADAPTER_UP, &qdev->flags);
3431  clear_bit(QL_LINK_MASTER, &qdev->flags);
3432 
3433  ql_disable_interrupts(qdev);
3434 
3435  free_irq(qdev->pdev->irq, ndev);
3436 
3437  if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3438  netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3439  clear_bit(QL_MSI_ENABLED, &qdev->flags);
3440  pci_disable_msi(qdev->pdev);
3441  }
3442 
3443  del_timer_sync(&qdev->adapter_timer);
3444 
3445  napi_disable(&qdev->napi);
3446 
3447  if (do_reset) {
3448  int soft_reset;
3449  unsigned long hw_flags;
3450 
3451  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3452  if (ql_wait_for_drvr_lock(qdev)) {
3453  soft_reset = ql_adapter_reset(qdev);
3454  if (soft_reset) {
3455  netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3456  qdev->index);
3457  }
3458  netdev_err(ndev,
3459  "Releasing driver lock via chip reset\n");
3460  } else {
3461  netdev_err(ndev,
3462  "Could not acquire driver lock to do reset!\n");
3463  retval = -1;
3464  }
3465  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3466  }
3467  ql_free_mem_resources(qdev);
3468  return retval;
3469 }
3470 
3471 static int ql_adapter_up(struct ql3_adapter *qdev)
3472 {
3473  struct net_device *ndev = qdev->ndev;
3474  int err;
3475  unsigned long irq_flags = IRQF_SHARED;
3476  unsigned long hw_flags;
3477 
3478  if (ql_alloc_mem_resources(qdev)) {
3479  netdev_err(ndev, "Unable to allocate buffers\n");
3480  return -ENOMEM;
3481  }
3482 
3483  if (qdev->msi) {
3484  if (pci_enable_msi(qdev->pdev)) {
3485  netdev_err(ndev,
3486  "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
3487  qdev->msi = 0;
3488  } else {
3489  netdev_info(ndev, "MSI Enabled...\n");
3490  set_bit(QL_MSI_ENABLED, &qdev->flags);
3491  irq_flags &= ~IRQF_SHARED;
3492  }
3493  }
3494 
3495  err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3496  irq_flags, ndev->name, ndev);
3497  if (err) {
3498  netdev_err(ndev,
3499  "Failed to reserve interrupt %d - already in use\n",
3500  qdev->pdev->irq);
3501  goto err_irq;
3502  }
3503 
3504  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3505 
3506  err = ql_wait_for_drvr_lock(qdev);
3507  if (err) {
3508  err = ql_adapter_initialize(qdev);
3509  if (err) {
3510  netdev_err(ndev, "Unable to initialize adapter\n");
3511  goto err_init;
3512  }
3513  netdev_err(ndev, "Releasing driver lock\n");
3514  ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3515  } else {
3516  netdev_err(ndev, "Could not acquire driver lock\n");
3517  goto err_lock;
3518  }
3519 
3520  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3521 
3522  set_bit(QL_ADAPTER_UP, &qdev->flags);
3523 
3524  mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3525 
3526  napi_enable(&qdev->napi);
3527  ql_enable_interrupts(qdev);
3528  return 0;
3529 
3530 err_init:
3531  ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3532 err_lock:
3533  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3534  free_irq(qdev->pdev->irq, ndev);
3535 err_irq:
3536  if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3537  netdev_info(ndev, "calling pci_disable_msi()\n");
3538  clear_bit(QL_MSI_ENABLED, &qdev->flags);
3539  pci_disable_msi(qdev->pdev);
3540  }
3541  return err;
3542 }
3543 
3544 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3545 {
3546  if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3547  netdev_err(qdev->ndev,
3548  "Driver up/down cycle failed, closing device\n");
3549  rtnl_lock();
3550  dev_close(qdev->ndev);
3551  rtnl_unlock();
3552  return -1;
3553  }
3554  return 0;
3555 }
3556 
3557 static int ql3xxx_close(struct net_device *ndev)
3558 {
3559  struct ql3_adapter *qdev = netdev_priv(ndev);
3560 
3561  /*
3562  * Wait for device to recover from a reset.
3563  * (Rarely happens, but possible.)
3564  */
3565  while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3566  msleep(50);
3567 
3568  ql_adapter_down(qdev, QL_DO_RESET);
3569  return 0;
3570 }
3571 
3572 static int ql3xxx_open(struct net_device *ndev)
3573 {
3574  struct ql3_adapter *qdev = netdev_priv(ndev);
3575  return ql_adapter_up(qdev);
3576 }
3577 
3578 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3579 {
3580  struct ql3_adapter *qdev = netdev_priv(ndev);
3581  struct ql3xxx_port_registers __iomem *port_regs =
3582  qdev->mem_map_registers;
3583  struct sockaddr *addr = p;
3584  unsigned long hw_flags;
3585 
3586  if (netif_running(ndev))
3587  return -EBUSY;
3588 
3589  if (!is_valid_ether_addr(addr->sa_data))
3590  return -EADDRNOTAVAIL;
3591 
3592  memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3593 
3594  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3595  /* Program lower 32 bits of the MAC address */
3596  ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3598  ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3599  ((ndev->dev_addr[2] << 24) | (ndev->
3600  dev_addr[3] << 16) |
3601  (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3602 
3603  /* Program top 16 bits of the MAC address */
3604  ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3605  ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3606  ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3607  ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3608  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3609 
3610  return 0;
3611 }
3612 
3613 static void ql3xxx_tx_timeout(struct net_device *ndev)
3614 {
3615  struct ql3_adapter *qdev = netdev_priv(ndev);
3616 
3617  netdev_err(ndev, "Resetting...\n");
3618  /*
3619  * Stop the queues, we've got a problem.
3620  */
3621  netif_stop_queue(ndev);
3622 
3623  /*
3624  * Wake up the worker to process this event.
3625  */
3626  queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3627 }
3628 
3629 static void ql_reset_work(struct work_struct *work)
3630 {
3631  struct ql3_adapter *qdev =
3632  container_of(work, struct ql3_adapter, reset_work.work);
3633  struct net_device *ndev = qdev->ndev;
3634  u32 value;
3635  struct ql_tx_buf_cb *tx_cb;
3636  int max_wait_time, i;
3637  struct ql3xxx_port_registers __iomem *port_regs =
3638  qdev->mem_map_registers;
3639  unsigned long hw_flags;
3640 
3641  if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3642  clear_bit(QL_LINK_MASTER, &qdev->flags);
3643 
3644  /*
3645  * Loop through the active list and return the skb.
3646  */
3647  for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3648  int j;
3649  tx_cb = &qdev->tx_buf[i];
3650  if (tx_cb->skb) {
3651  netdev_printk(KERN_DEBUG, ndev,
3652  "Freeing lost SKB\n");
3653  pci_unmap_single(qdev->pdev,
3654  dma_unmap_addr(&tx_cb->map[0],
3655  mapaddr),
3656  dma_unmap_len(&tx_cb->map[0], maplen),
3658  for (j = 1; j < tx_cb->seg_count; j++) {
3659  pci_unmap_page(qdev->pdev,
3660  dma_unmap_addr(&tx_cb->map[j],
3661  mapaddr),
3662  dma_unmap_len(&tx_cb->map[j],
3663  maplen),
3665  }
3666  dev_kfree_skb(tx_cb->skb);
3667  tx_cb->skb = NULL;
3668  }
3669  }
3670 
3671  netdev_err(ndev, "Clearing NRI after reset\n");
3672  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3673  ql_write_common_reg(qdev,
3674  &port_regs->CommonRegs.
3675  ispControlStatus,
3676  ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3677  /*
3678  * Wait the for Soft Reset to Complete.
3679  */
3680  max_wait_time = 10;
3681  do {
3682  value = ql_read_common_reg(qdev,
3683  &port_regs->CommonRegs.
3684 
3685  ispControlStatus);
3686  if ((value & ISP_CONTROL_SR) == 0) {
3687  netdev_printk(KERN_DEBUG, ndev,
3688  "reset completed\n");
3689  break;
3690  }
3691 
3692  if (value & ISP_CONTROL_RI) {
3693  netdev_printk(KERN_DEBUG, ndev,
3694  "clearing NRI after reset\n");
3695  ql_write_common_reg(qdev,
3696  &port_regs->
3697  CommonRegs.
3698  ispControlStatus,
3699  ((ISP_CONTROL_RI <<
3700  16) | ISP_CONTROL_RI));
3701  }
3702 
3703  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3704  ssleep(1);
3705  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3706  } while (--max_wait_time);
3707  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3708 
3709  if (value & ISP_CONTROL_SR) {
3710 
3711  /*
3712  * Set the reset flags and clear the board again.
3713  * Nothing else to do...
3714  */
3715  netdev_err(ndev,
3716  "Timed out waiting for reset to complete\n");
3717  netdev_err(ndev, "Do a reset\n");
3719  clear_bit(QL_RESET_START, &qdev->flags);
3720  ql_cycle_adapter(qdev, QL_DO_RESET);
3721  return;
3722  }
3723 
3724  clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3726  clear_bit(QL_RESET_START, &qdev->flags);
3727  ql_cycle_adapter(qdev, QL_NO_RESET);
3728  }
3729 }
3730 
3731 static void ql_tx_timeout_work(struct work_struct *work)
3732 {
3733  struct ql3_adapter *qdev =
3734  container_of(work, struct ql3_adapter, tx_timeout_work.work);
3735 
3736  ql_cycle_adapter(qdev, QL_DO_RESET);
3737 }
3738 
3739 static void ql_get_board_info(struct ql3_adapter *qdev)
3740 {
3741  struct ql3xxx_port_registers __iomem *port_regs =
3742  qdev->mem_map_registers;
3743  u32 value;
3744 
3745  value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3746 
3747  qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3748  if (value & PORT_STATUS_64)
3749  qdev->pci_width = 64;
3750  else
3751  qdev->pci_width = 32;
3752  if (value & PORT_STATUS_X)
3753  qdev->pci_x = 1;
3754  else
3755  qdev->pci_x = 0;
3756  qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3757 }
3758 
3759 static void ql3xxx_timer(unsigned long ptr)
3760 {
3761  struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3762  queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3763 }
3764 
3765 static const struct net_device_ops ql3xxx_netdev_ops = {
3766  .ndo_open = ql3xxx_open,
3767  .ndo_start_xmit = ql3xxx_send,
3768  .ndo_stop = ql3xxx_close,
3769  .ndo_change_mtu = eth_change_mtu,
3770  .ndo_validate_addr = eth_validate_addr,
3771  .ndo_set_mac_address = ql3xxx_set_mac_address,
3772  .ndo_tx_timeout = ql3xxx_tx_timeout,
3773 };
3774 
3775 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3776  const struct pci_device_id *pci_entry)
3777 {
3778  struct net_device *ndev = NULL;
3779  struct ql3_adapter *qdev = NULL;
3780  static int cards_found;
3781  int uninitialized_var(pci_using_dac), err;
3782 
3783  err = pci_enable_device(pdev);
3784  if (err) {
3785  pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3786  goto err_out;
3787  }
3788 
3789  err = pci_request_regions(pdev, DRV_NAME);
3790  if (err) {
3791  pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3792  goto err_out_disable_pdev;
3793  }
3794 
3795  pci_set_master(pdev);
3796 
3797  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3798  pci_using_dac = 1;
3799  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3800  } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3801  pci_using_dac = 0;
3802  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3803  }
3804 
3805  if (err) {
3806  pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3807  goto err_out_free_regions;
3808  }
3809 
3810  ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3811  if (!ndev) {
3812  err = -ENOMEM;
3813  goto err_out_free_regions;
3814  }
3815 
3816  SET_NETDEV_DEV(ndev, &pdev->dev);
3817 
3818  pci_set_drvdata(pdev, ndev);
3819 
3820  qdev = netdev_priv(ndev);
3821  qdev->index = cards_found;
3822  qdev->ndev = ndev;
3823  qdev->pdev = pdev;
3824  qdev->device_id = pci_entry->device;
3825  qdev->port_link_state = LS_DOWN;
3826  if (msi)
3827  qdev->msi = 1;
3828 
3829  qdev->msg_enable = netif_msg_init(debug, default_msg);
3830 
3831  if (pci_using_dac)
3832  ndev->features |= NETIF_F_HIGHDMA;
3833  if (qdev->device_id == QL3032_DEVICE_ID)
3834  ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3835 
3836  qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3837  if (!qdev->mem_map_registers) {
3838  pr_err("%s: cannot map device registers\n", pci_name(pdev));
3839  err = -EIO;
3840  goto err_out_free_ndev;
3841  }
3842 
3843  spin_lock_init(&qdev->adapter_lock);
3844  spin_lock_init(&qdev->hw_lock);
3845 
3846  /* Set driver entry points */
3847  ndev->netdev_ops = &ql3xxx_netdev_ops;
3848  SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3849  ndev->watchdog_timeo = 5 * HZ;
3850 
3851  netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3852 
3853  ndev->irq = pdev->irq;
3854 
3855  /* make sure the EEPROM is good */
3856  if (ql_get_nvram_params(qdev)) {
3857  pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3858  __func__, qdev->index);
3859  err = -EIO;
3860  goto err_out_iounmap;
3861  }
3862 
3863  ql_set_mac_info(qdev);
3864 
3865  /* Validate and set parameters */
3866  if (qdev->mac_index) {
3867  ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3868  ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3869  } else {
3870  ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3871  ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3872  }
3873  memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3874 
3876 
3877  /* Record PCI bus information. */
3878  ql_get_board_info(qdev);
3879 
3880  /*
3881  * Set the Maximum Memory Read Byte Count value. We do this to handle
3882  * jumbo frames.
3883  */
3884  if (qdev->pci_x)
3885  pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3886 
3887  err = register_netdev(ndev);
3888  if (err) {
3889  pr_err("%s: cannot register net device\n", pci_name(pdev));
3890  goto err_out_iounmap;
3891  }
3892 
3893  /* we're going to reset, so assume we have no link for now */
3894 
3895  netif_carrier_off(ndev);
3896  netif_stop_queue(ndev);
3897 
3899  INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3900  INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3901  INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3902 
3903  init_timer(&qdev->adapter_timer);
3904  qdev->adapter_timer.function = ql3xxx_timer;
3905  qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3906  qdev->adapter_timer.data = (unsigned long)qdev;
3907 
3908  if (!cards_found) {
3909  pr_alert("%s\n", DRV_STRING);
3910  pr_alert("Driver name: %s, Version: %s\n",
3912  }
3913  ql_display_dev_info(ndev);
3914 
3915  cards_found++;
3916  return 0;
3917 
3918 err_out_iounmap:
3919  iounmap(qdev->mem_map_registers);
3920 err_out_free_ndev:
3921  free_netdev(ndev);
3922 err_out_free_regions:
3923  pci_release_regions(pdev);
3924 err_out_disable_pdev:
3925  pci_disable_device(pdev);
3926  pci_set_drvdata(pdev, NULL);
3927 err_out:
3928  return err;
3929 }
3930 
3931 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3932 {
3933  struct net_device *ndev = pci_get_drvdata(pdev);
3934  struct ql3_adapter *qdev = netdev_priv(ndev);
3935 
3936  unregister_netdev(ndev);
3937 
3938  ql_disable_interrupts(qdev);
3939 
3940  if (qdev->workqueue) {
3944  qdev->workqueue = NULL;
3945  }
3946 
3947  iounmap(qdev->mem_map_registers);
3948  pci_release_regions(pdev);
3949  pci_set_drvdata(pdev, NULL);
3950  free_netdev(ndev);
3951 }
3952 
3953 static struct pci_driver ql3xxx_driver = {
3954 
3955  .name = DRV_NAME,
3956  .id_table = ql3xxx_pci_tbl,
3957  .probe = ql3xxx_probe,
3958  .remove = __devexit_p(ql3xxx_remove),
3959 };
3960 
3961 static int __init ql3xxx_init_module(void)
3962 {
3963  return pci_register_driver(&ql3xxx_driver);
3964 }
3965 
3966 static void __exit ql3xxx_exit(void)
3967 {
3968  pci_unregister_driver(&ql3xxx_driver);
3969 }
3970 
3971 module_init(ql3xxx_init_module);
3972 module_exit(ql3xxx_exit);