Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qlge_main.c
Go to the documentation of this file.
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c) 2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author: Linux qlge network device driver by
6  * Ron Mercer <[email protected]>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44 
45 #include "qlge.h"
46 
49 
50 MODULE_AUTHOR("Ron Mercer <[email protected]>");
52 MODULE_LICENSE("GPL");
54 
55 static const u32 default_msg =
57 /* NETIF_MSG_TIMER | */
62 /* NETIF_MSG_TX_QUEUED | */
63 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
66 
67 static int debug = -1; /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81  "Option to enable MPI firmware dump. "
82  "Default is OFF - Do Not allocate memory. ");
83 
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87  "Option to allow force of firmware core dump. "
88  "Default is OFF - Do not allow.");
89 
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
93  /* required last entry */
94  {0,}
95 };
96 
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101 
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108  u32 sem_bits = 0;
109 
110  switch (sem_mask) {
111  case SEM_XGMAC0_MASK:
112  sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113  break;
114  case SEM_XGMAC1_MASK:
115  sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116  break;
117  case SEM_ICB_MASK:
118  sem_bits = SEM_SET << SEM_ICB_SHIFT;
119  break;
120  case SEM_MAC_ADDR_MASK:
121  sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122  break;
123  case SEM_FLASH_MASK:
124  sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125  break;
126  case SEM_PROBE_MASK:
127  sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128  break;
129  case SEM_RT_IDX_MASK:
130  sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131  break;
132  case SEM_PROC_REG_MASK:
133  sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134  break;
135  default:
136  netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137  return -EINVAL;
138  }
139 
140  ql_write32(qdev, SEM, sem_bits | sem_mask);
141  return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143 
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146  unsigned int wait_count = 30;
147  do {
148  if (!ql_sem_trylock(qdev, sem_mask))
149  return 0;
150  udelay(100);
151  } while (--wait_count);
152  return -ETIMEDOUT;
153 }
154 
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157  ql_write32(qdev, SEM, sem_mask);
158  ql_read32(qdev, SEM); /* flush */
159 }
160 
161 /* This function waits for a specific bit to come ready
162  * in a given register. It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168  u32 temp;
169  int count = UDELAY_COUNT;
170 
171  while (count) {
172  temp = ql_read32(qdev, reg);
173 
174  /* check for errors */
175  if (temp & err_bit) {
176  netif_alert(qdev, probe, qdev->ndev,
177  "register 0x%.08x access error, value = 0x%.08x!.\n",
178  reg, temp);
179  return -EIO;
180  } else if (temp & bit)
181  return 0;
183  count--;
184  }
185  netif_alert(qdev, probe, qdev->ndev,
186  "Timed out waiting for reg %x to come ready.\n", reg);
187  return -ETIMEDOUT;
188 }
189 
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195  int count = UDELAY_COUNT;
196  u32 temp;
197 
198  while (count) {
199  temp = ql_read32(qdev, CFG);
200  if (temp & CFG_LE)
201  return -EIO;
202  if (!(temp & bit))
203  return 0;
205  count--;
206  }
207  return -ETIMEDOUT;
208 }
209 
210 
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215  u16 q_id)
216 {
217  u64 map;
218  int status = 0;
219  int direction;
220  u32 mask;
221  u32 value;
222 
223  direction =
224  (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 
227  map = pci_map_single(qdev->pdev, ptr, size, direction);
228  if (pci_dma_mapping_error(qdev->pdev, map)) {
229  netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230  return -ENOMEM;
231  }
232 
233  status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234  if (status)
235  return status;
236 
237  status = ql_wait_cfg(qdev, bit);
238  if (status) {
239  netif_err(qdev, ifup, qdev->ndev,
240  "Timed out waiting for CFG to come ready.\n");
241  goto exit;
242  }
243 
244  ql_write32(qdev, ICB_L, (u32) map);
245  ql_write32(qdev, ICB_H, (u32) (map >> 32));
246 
247  mask = CFG_Q_MASK | (bit << 16);
248  value = bit | (q_id << CFG_Q_SHIFT);
249  ql_write32(qdev, CFG, (mask | value));
250 
251  /*
252  * Wait for the bit to clear after signaling hw.
253  */
254  status = ql_wait_cfg(qdev, bit);
255 exit:
256  ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
257  pci_unmap_single(qdev->pdev, map, size, direction);
258  return status;
259 }
260 
261 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263  u32 *value)
264 {
265  u32 offset = 0;
266  int status;
267 
268  switch (type) {
271  {
272  status =
273  ql_wait_reg_rdy(qdev,
275  if (status)
276  goto exit;
277  ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278  (index << MAC_ADDR_IDX_SHIFT) | /* index */
279  MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280  status =
281  ql_wait_reg_rdy(qdev,
283  if (status)
284  goto exit;
285  *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286  status =
287  ql_wait_reg_rdy(qdev,
289  if (status)
290  goto exit;
291  ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292  (index << MAC_ADDR_IDX_SHIFT) | /* index */
293  MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294  status =
295  ql_wait_reg_rdy(qdev,
297  if (status)
298  goto exit;
299  *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300  if (type == MAC_ADDR_TYPE_CAM_MAC) {
301  status =
302  ql_wait_reg_rdy(qdev,
304  if (status)
305  goto exit;
306  ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307  (index << MAC_ADDR_IDX_SHIFT) | /* index */
308  MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309  status =
311  MAC_ADDR_MR, 0);
312  if (status)
313  goto exit;
314  *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315  }
316  break;
317  }
318  case MAC_ADDR_TYPE_VLAN:
320  default:
321  netif_crit(qdev, ifup, qdev->ndev,
322  "Address type %d not yet supported.\n", type);
323  status = -EPERM;
324  }
325 exit:
326  return status;
327 }
328 
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333  u16 index)
334 {
335  u32 offset = 0;
336  int status = 0;
337 
338  switch (type) {
340  {
341  u32 upper = (addr[0] << 8) | addr[1];
342  u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343  (addr[4] << 8) | (addr[5]);
344 
345  status =
346  ql_wait_reg_rdy(qdev,
348  if (status)
349  goto exit;
350  ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351  (index << MAC_ADDR_IDX_SHIFT) |
352  type | MAC_ADDR_E);
353  ql_write32(qdev, MAC_ADDR_DATA, lower);
354  status =
355  ql_wait_reg_rdy(qdev,
357  if (status)
358  goto exit;
359  ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360  (index << MAC_ADDR_IDX_SHIFT) |
361  type | MAC_ADDR_E);
362 
363  ql_write32(qdev, MAC_ADDR_DATA, upper);
364  status =
365  ql_wait_reg_rdy(qdev,
367  if (status)
368  goto exit;
369  break;
370  }
372  {
373  u32 cam_output;
374  u32 upper = (addr[0] << 8) | addr[1];
375  u32 lower =
376  (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377  (addr[5]);
378  status =
379  ql_wait_reg_rdy(qdev,
381  if (status)
382  goto exit;
383  ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384  (index << MAC_ADDR_IDX_SHIFT) | /* index */
385  type); /* type */
386  ql_write32(qdev, MAC_ADDR_DATA, lower);
387  status =
388  ql_wait_reg_rdy(qdev,
390  if (status)
391  goto exit;
392  ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393  (index << MAC_ADDR_IDX_SHIFT) | /* index */
394  type); /* type */
395  ql_write32(qdev, MAC_ADDR_DATA, upper);
396  status =
397  ql_wait_reg_rdy(qdev,
399  if (status)
400  goto exit;
401  ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
402  (index << MAC_ADDR_IDX_SHIFT) | /* index */
403  type); /* type */
404  /* This field should also include the queue id
405  and possibly the function id. Right now we hardcode
406  the route field to NIC core.
407  */
408  cam_output = (CAM_OUT_ROUTE_NIC |
409  (qdev->
411  (0 << CAM_OUT_CQ_ID_SHIFT));
412  if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413  cam_output |= CAM_OUT_RV;
414  /* route to NIC core */
415  ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416  break;
417  }
418  case MAC_ADDR_TYPE_VLAN:
419  {
420  u32 enable_bit = *((u32 *) &addr[0]);
421  /* For VLAN, the addr actually holds a bit that
422  * either enables or disables the vlan id we are
423  * addressing. It's either MAC_ADDR_E on or off.
424  * That's bit-27 we're talking about.
425  */
426  status =
427  ql_wait_reg_rdy(qdev,
429  if (status)
430  goto exit;
431  ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432  (index << MAC_ADDR_IDX_SHIFT) | /* index */
433  type | /* type */
434  enable_bit); /* enable/disable */
435  break;
436  }
438  default:
439  netif_crit(qdev, ifup, qdev->ndev,
440  "Address type %d not yet supported.\n", type);
441  status = -EPERM;
442  }
443 exit:
444  return status;
445 }
446 
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453  int status;
454  char zero_mac_addr[ETH_ALEN];
455  char *addr;
456 
457  if (set) {
458  addr = &qdev->current_mac_addr[0];
459  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460  "Set Mac addr %pM\n", addr);
461  } else {
462  memset(zero_mac_addr, 0, ETH_ALEN);
463  addr = &zero_mac_addr[0];
464  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465  "Clearing MAC address\n");
466  }
467  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468  if (status)
469  return status;
470  status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
473  if (status)
474  netif_err(qdev, ifup, qdev->ndev,
475  "Failed to init mac address.\n");
476  return status;
477 }
478 
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481  netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482  netif_carrier_on(qdev->ndev);
483  ql_set_mac_addr(qdev, 1);
484 }
485 
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488  netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489  netif_carrier_off(qdev->ndev);
490  ql_set_mac_addr(qdev, 0);
491 }
492 
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498  int status = 0;
499 
500  status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501  if (status)
502  goto exit;
503 
504  ql_write32(qdev, RT_IDX,
506  status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507  if (status)
508  goto exit;
509  *value = ql_read32(qdev, RT_DATA);
510 exit:
511  return status;
512 }
513 
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515  * to route different frame types to various inbound queues. We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520  int enable)
521 {
522  int status = -EINVAL; /* Return error if no mask match. */
523  u32 value = 0;
524 
525  switch (mask) {
526  case RT_IDX_CAM_HIT:
527  {
528  value = RT_IDX_DST_CAM_Q | /* dest */
529  RT_IDX_TYPE_NICQ | /* type */
530  (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531  break;
532  }
533  case RT_IDX_VALID: /* Promiscuous Mode frames. */
534  {
535  value = RT_IDX_DST_DFLT_Q | /* dest */
536  RT_IDX_TYPE_NICQ | /* type */
538  break;
539  }
540  case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
541  {
542  value = RT_IDX_DST_DFLT_Q | /* dest */
543  RT_IDX_TYPE_NICQ | /* type */
544  (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545  break;
546  }
547  case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548  {
549  value = RT_IDX_DST_DFLT_Q | /* dest */
550  RT_IDX_TYPE_NICQ | /* type */
552  RT_IDX_IDX_SHIFT); /* index */
553  break;
554  }
555  case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556  {
557  value = RT_IDX_DST_DFLT_Q | /* dest */
558  RT_IDX_TYPE_NICQ | /* type */
560  RT_IDX_IDX_SHIFT); /* index */
561  break;
562  }
563  case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
564  {
565  value = RT_IDX_DST_DFLT_Q | /* dest */
566  RT_IDX_TYPE_NICQ | /* type */
567  (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568  break;
569  }
570  case RT_IDX_MCAST: /* Pass up All Multicast frames. */
571  {
572  value = RT_IDX_DST_DFLT_Q | /* dest */
573  RT_IDX_TYPE_NICQ | /* type */
574  (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575  break;
576  }
577  case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
578  {
579  value = RT_IDX_DST_DFLT_Q | /* dest */
580  RT_IDX_TYPE_NICQ | /* type */
582  break;
583  }
584  case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
585  {
586  value = RT_IDX_DST_RSS | /* dest */
587  RT_IDX_TYPE_NICQ | /* type */
588  (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589  break;
590  }
591  case 0: /* Clear the E-bit on an entry. */
592  {
593  value = RT_IDX_DST_DFLT_Q | /* dest */
594  RT_IDX_TYPE_NICQ | /* type */
595  (index << RT_IDX_IDX_SHIFT);/* index */
596  break;
597  }
598  default:
599  netif_err(qdev, ifup, qdev->ndev,
600  "Mask type %d not yet supported.\n", mask);
601  status = -EPERM;
602  goto exit;
603  }
604 
605  if (value) {
606  status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607  if (status)
608  goto exit;
609  value |= (enable ? RT_IDX_E : 0);
610  ql_write32(qdev, RT_IDX, value);
611  ql_write32(qdev, RT_DATA, enable ? mask : 0);
612  }
613 exit:
614  return status;
615 }
616 
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619  ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621 
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624  ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626 
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes. Once it hits zero we enable the interrupt.
632  */
634 {
635  u32 var = 0;
636  unsigned long hw_flags = 0;
637  struct intr_context *ctx = qdev->intr_context + intr;
638 
639  if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640  /* Always enable if we're MSIX multi interrupts and
641  * it's not the default (zeroeth) interrupt.
642  */
643  ql_write32(qdev, INTR_EN,
644  ctx->intr_en_mask);
645  var = ql_read32(qdev, STS);
646  return var;
647  }
648 
649  spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650  if (atomic_dec_and_test(&ctx->irq_cnt)) {
651  ql_write32(qdev, INTR_EN,
652  ctx->intr_en_mask);
653  var = ql_read32(qdev, STS);
654  }
655  spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656  return var;
657 }
658 
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661  u32 var = 0;
662  struct intr_context *ctx;
663 
664  /* HW disables for us if we're MSIX multi interrupts and
665  * it's not the default (zeroeth) interrupt.
666  */
667  if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668  return 0;
669 
670  ctx = qdev->intr_context + intr;
671  spin_lock(&qdev->hw_lock);
672  if (!atomic_read(&ctx->irq_cnt)) {
673  ql_write32(qdev, INTR_EN,
674  ctx->intr_dis_mask);
675  var = ql_read32(qdev, STS);
676  }
677  atomic_inc(&ctx->irq_cnt);
678  spin_unlock(&qdev->hw_lock);
679  return var;
680 }
681 
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684  int i;
685  for (i = 0; i < qdev->intr_count; i++) {
686  /* The enable call does a atomic_dec_and_test
687  * and enables only if the result is zero.
688  * So we precharge it here.
689  */
690  if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691  i == 0))
692  atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694  }
695 
696 }
697 
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700  int status, i;
701  u16 csum = 0;
702  __le16 *flash = (__le16 *)&qdev->flash;
703 
704  status = strncmp((char *)&qdev->flash, str, 4);
705  if (status) {
706  netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707  return status;
708  }
709 
710  for (i = 0; i < size; i++)
711  csum += le16_to_cpu(*flash++);
712 
713  if (csum)
714  netif_err(qdev, ifup, qdev->ndev,
715  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716 
717  return csum;
718 }
719 
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722  int status = 0;
723  /* wait for reg to come ready */
724  status = ql_wait_reg_rdy(qdev,
726  if (status)
727  goto exit;
728  /* set up for reg read */
729  ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730  /* wait for reg to come ready */
731  status = ql_wait_reg_rdy(qdev,
733  if (status)
734  goto exit;
735  /* This data is stored on flash as an array of
736  * __le32. Since ql_read32() returns cpu endian
737  * we need to swap it back.
738  */
739  *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741  return status;
742 }
743 
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746  u32 i, size;
747  int status;
748  __le32 *p = (__le32 *)&qdev->flash;
749  u32 offset;
750  u8 mac_addr[6];
751 
752  /* Get flash offset for function and adjust
753  * for dword access.
754  */
755  if (!qdev->port)
756  offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757  else
758  offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759 
760  if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761  return -ETIMEDOUT;
762 
763  size = sizeof(struct flash_params_8000) / sizeof(u32);
764  for (i = 0; i < size; i++, p++) {
765  status = ql_read_flash_word(qdev, i+offset, p);
766  if (status) {
767  netif_err(qdev, ifup, qdev->ndev,
768  "Error reading flash.\n");
769  goto exit;
770  }
771  }
772 
773  status = ql_validate_flash(qdev,
774  sizeof(struct flash_params_8000) / sizeof(u16),
775  "8000");
776  if (status) {
777  netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778  status = -EINVAL;
779  goto exit;
780  }
781 
782  /* Extract either manufacturer or BOFM modified
783  * MAC address.
784  */
785  if (qdev->flash.flash_params_8000.data_type1 == 2)
787  qdev->flash.flash_params_8000.mac_addr1,
788  qdev->ndev->addr_len);
789  else
791  qdev->flash.flash_params_8000.mac_addr,
792  qdev->ndev->addr_len);
793 
794  if (!is_valid_ether_addr(mac_addr)) {
795  netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796  status = -EINVAL;
797  goto exit;
798  }
799 
800  memcpy(qdev->ndev->dev_addr,
801  mac_addr,
802  qdev->ndev->addr_len);
803 
804 exit:
806  return status;
807 }
808 
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811  int i;
812  int status;
813  __le32 *p = (__le32 *)&qdev->flash;
814  u32 offset = 0;
815  u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816 
817  /* Second function's parameters follow the first
818  * function's.
819  */
820  if (qdev->port)
821  offset = size;
822 
823  if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824  return -ETIMEDOUT;
825 
826  for (i = 0; i < size; i++, p++) {
827  status = ql_read_flash_word(qdev, i+offset, p);
828  if (status) {
829  netif_err(qdev, ifup, qdev->ndev,
830  "Error reading flash.\n");
831  goto exit;
832  }
833 
834  }
835 
836  status = ql_validate_flash(qdev,
837  sizeof(struct flash_params_8012) / sizeof(u16),
838  "8012");
839  if (status) {
840  netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841  status = -EINVAL;
842  goto exit;
843  }
844 
845  if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846  status = -EINVAL;
847  goto exit;
848  }
849 
850  memcpy(qdev->ndev->dev_addr,
851  qdev->flash.flash_params_8012.mac_addr,
852  qdev->ndev->addr_len);
853 
854 exit:
856  return status;
857 }
858 
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair. Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865  int status;
866  /* wait for reg to come ready */
867  status = ql_wait_reg_rdy(qdev,
869  if (status)
870  return status;
871  /* write the data to the data reg */
872  ql_write32(qdev, XGMAC_DATA, data);
873  /* trigger the write */
874  ql_write32(qdev, XGMAC_ADDR, reg);
875  return status;
876 }
877 
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair. Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884  int status = 0;
885  /* wait for reg to come ready */
886  status = ql_wait_reg_rdy(qdev,
888  if (status)
889  goto exit;
890  /* set up for reg read */
891  ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892  /* wait for reg to come ready */
893  status = ql_wait_reg_rdy(qdev,
895  if (status)
896  goto exit;
897  /* get the data */
898  *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900  return status;
901 }
902 
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906  int status = 0;
907  u32 hi = 0;
908  u32 lo = 0;
909 
910  status = ql_read_xgmac_reg(qdev, reg, &lo);
911  if (status)
912  goto exit;
913 
914  status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915  if (status)
916  goto exit;
917 
918  *data = (u64) lo | ((u64) hi << 32);
919 
920 exit:
921  return status;
922 }
923 
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926  int status;
927  /*
928  * Get MPI firmware version for driver banner
929  * and ethool info.
930  */
931  status = ql_mb_about_fw(qdev);
932  if (status)
933  goto exit;
934  status = ql_mb_get_fw_state(qdev);
935  if (status)
936  goto exit;
937  /* Wake up a worker to get/set the TX/RX frame sizes. */
939 exit:
940  return status;
941 }
942 
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951  int status = 0;
952  u32 data;
953 
954  if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955  /* Another function has the semaphore, so
956  * wait for the port init bit to come ready.
957  */
958  netif_info(qdev, link, qdev->ndev,
959  "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960  status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961  if (status) {
962  netif_crit(qdev, link, qdev->ndev,
963  "Port initialize timed out.\n");
964  }
965  return status;
966  }
967 
968  netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969  /* Set the core reset. */
970  status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971  if (status)
972  goto end;
973  data |= GLOBAL_CFG_RESET;
974  status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975  if (status)
976  goto end;
977 
978  /* Clear the core reset and turn on jumbo for receiver. */
979  data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
980  data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
981  data |= GLOBAL_CFG_TX_STAT_EN;
982  data |= GLOBAL_CFG_RX_STAT_EN;
983  status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984  if (status)
985  goto end;
986 
987  /* Enable transmitter, and clear it's reset. */
988  status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989  if (status)
990  goto end;
991  data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
992  data |= TX_CFG_EN; /* Enable the transmitter. */
993  status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994  if (status)
995  goto end;
996 
997  /* Enable receiver and clear it's reset. */
998  status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999  if (status)
1000  goto end;
1001  data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1002  data |= RX_CFG_EN; /* Enable the receiver. */
1003  status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004  if (status)
1005  goto end;
1006 
1007  /* Turn on jumbo. */
1008  status =
1009  ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010  if (status)
1011  goto end;
1012  status =
1013  ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014  if (status)
1015  goto end;
1016 
1017  /* Signal to the world that the port is enabled. */
1018  ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020  ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021  return status;
1022 }
1023 
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026  return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028 
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032  struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033  rx_ring->lbq_curr_idx++;
1034  if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035  rx_ring->lbq_curr_idx = 0;
1036  rx_ring->lbq_free_cnt++;
1037  return lbq_desc;
1038 }
1039 
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041  struct rx_ring *rx_ring)
1042 {
1043  struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044 
1045  pci_dma_sync_single_for_cpu(qdev->pdev,
1046  dma_unmap_addr(lbq_desc, mapaddr),
1047  rx_ring->lbq_buf_size,
1049 
1050  /* If it's the last chunk of our master page then
1051  * we unmap it.
1052  */
1053  if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054  == ql_lbq_block_size(qdev))
1055  pci_unmap_page(qdev->pdev,
1056  lbq_desc->p.pg_chunk.map,
1057  ql_lbq_block_size(qdev),
1059  return lbq_desc;
1060 }
1061 
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065  struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066  rx_ring->sbq_curr_idx++;
1067  if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068  rx_ring->sbq_curr_idx = 0;
1069  rx_ring->sbq_free_cnt++;
1070  return sbq_desc;
1071 }
1072 
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076  rx_ring->cnsmr_idx++;
1077  rx_ring->curr_entry++;
1078  if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079  rx_ring->cnsmr_idx = 0;
1080  rx_ring->curr_entry = rx_ring->cq_base;
1081  }
1082 }
1083 
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086  ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088 
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090  struct bq_desc *lbq_desc)
1091 {
1092  if (!rx_ring->pg_chunk.page) {
1093  u64 map;
1094  rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095  GFP_ATOMIC,
1096  qdev->lbq_buf_order);
1097  if (unlikely(!rx_ring->pg_chunk.page)) {
1098  netif_err(qdev, drv, qdev->ndev,
1099  "page allocation failed.\n");
1100  return -ENOMEM;
1101  }
1102  rx_ring->pg_chunk.offset = 0;
1103  map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104  0, ql_lbq_block_size(qdev),
1106  if (pci_dma_mapping_error(qdev->pdev, map)) {
1107  __free_pages(rx_ring->pg_chunk.page,
1108  qdev->lbq_buf_order);
1109  netif_err(qdev, drv, qdev->ndev,
1110  "PCI mapping failed.\n");
1111  return -ENOMEM;
1112  }
1113  rx_ring->pg_chunk.map = map;
1114  rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115  }
1116 
1117  /* Copy the current master pg_chunk info
1118  * to the current descriptor.
1119  */
1120  lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121 
1122  /* Adjust the master page chunk for next
1123  * buffer get.
1124  */
1125  rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126  if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127  rx_ring->pg_chunk.page = NULL;
1128  lbq_desc->p.pg_chunk.last_flag = 1;
1129  } else {
1130  rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131  get_page(rx_ring->pg_chunk.page);
1132  lbq_desc->p.pg_chunk.last_flag = 0;
1133  }
1134  return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139  u32 clean_idx = rx_ring->lbq_clean_idx;
1140  u32 start_idx = clean_idx;
1141  struct bq_desc *lbq_desc;
1142  u64 map;
1143  int i;
1144 
1145  while (rx_ring->lbq_free_cnt > 32) {
1146  for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148  "lbq: try cleaning clean_idx = %d.\n",
1149  clean_idx);
1150  lbq_desc = &rx_ring->lbq[clean_idx];
1151  if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152  rx_ring->lbq_clean_idx = clean_idx;
1153  netif_err(qdev, ifup, qdev->ndev,
1154  "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155  i, clean_idx);
1156  return;
1157  }
1158 
1159  map = lbq_desc->p.pg_chunk.map +
1160  lbq_desc->p.pg_chunk.offset;
1161  dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162  dma_unmap_len_set(lbq_desc, maplen,
1163  rx_ring->lbq_buf_size);
1164  *lbq_desc->addr = cpu_to_le64(map);
1165 
1166  pci_dma_sync_single_for_device(qdev->pdev, map,
1167  rx_ring->lbq_buf_size,
1169  clean_idx++;
1170  if (clean_idx == rx_ring->lbq_len)
1171  clean_idx = 0;
1172  }
1173 
1174  rx_ring->lbq_clean_idx = clean_idx;
1175  rx_ring->lbq_prod_idx += 16;
1176  if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177  rx_ring->lbq_prod_idx = 0;
1178  rx_ring->lbq_free_cnt -= 16;
1179  }
1180 
1181  if (start_idx != clean_idx) {
1182  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183  "lbq: updating prod idx = %d.\n",
1184  rx_ring->lbq_prod_idx);
1185  ql_write_db_reg(rx_ring->lbq_prod_idx,
1186  rx_ring->lbq_prod_idx_db_reg);
1187  }
1188 }
1189 
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193  u32 clean_idx = rx_ring->sbq_clean_idx;
1194  u32 start_idx = clean_idx;
1195  struct bq_desc *sbq_desc;
1196  u64 map;
1197  int i;
1198 
1199  while (rx_ring->sbq_free_cnt > 16) {
1200  for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201  sbq_desc = &rx_ring->sbq[clean_idx];
1202  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203  "sbq: try cleaning clean_idx = %d.\n",
1204  clean_idx);
1205  if (sbq_desc->p.skb == NULL) {
1207  qdev->ndev,
1208  "sbq: getting new skb for index %d.\n",
1209  sbq_desc->index);
1210  sbq_desc->p.skb =
1211  netdev_alloc_skb(qdev->ndev,
1213  if (sbq_desc->p.skb == NULL) {
1214  netif_err(qdev, probe, qdev->ndev,
1215  "Couldn't get an skb.\n");
1216  rx_ring->sbq_clean_idx = clean_idx;
1217  return;
1218  }
1219  skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220  map = pci_map_single(qdev->pdev,
1221  sbq_desc->p.skb->data,
1222  rx_ring->sbq_buf_size,
1224  if (pci_dma_mapping_error(qdev->pdev, map)) {
1225  netif_err(qdev, ifup, qdev->ndev,
1226  "PCI mapping failed.\n");
1227  rx_ring->sbq_clean_idx = clean_idx;
1228  dev_kfree_skb_any(sbq_desc->p.skb);
1229  sbq_desc->p.skb = NULL;
1230  return;
1231  }
1232  dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233  dma_unmap_len_set(sbq_desc, maplen,
1234  rx_ring->sbq_buf_size);
1235  *sbq_desc->addr = cpu_to_le64(map);
1236  }
1237 
1238  clean_idx++;
1239  if (clean_idx == rx_ring->sbq_len)
1240  clean_idx = 0;
1241  }
1242  rx_ring->sbq_clean_idx = clean_idx;
1243  rx_ring->sbq_prod_idx += 16;
1244  if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245  rx_ring->sbq_prod_idx = 0;
1246  rx_ring->sbq_free_cnt -= 16;
1247  }
1248 
1249  if (start_idx != clean_idx) {
1250  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251  "sbq: updating prod idx = %d.\n",
1252  rx_ring->sbq_prod_idx);
1253  ql_write_db_reg(rx_ring->sbq_prod_idx,
1254  rx_ring->sbq_prod_idx_db_reg);
1255  }
1256 }
1257 
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259  struct rx_ring *rx_ring)
1260 {
1261  ql_update_sbq(qdev, rx_ring);
1262  ql_update_lbq(qdev, rx_ring);
1263 }
1264 
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269  struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271  int i;
1272  for (i = 0; i < mapped; i++) {
1273  if (i == 0 || (i == 7 && mapped > 7)) {
1274  /*
1275  * Unmap the skb->data area, or the
1276  * external sglist (AKA the Outbound
1277  * Address List (OAL)).
1278  * If its the zeroeth element, then it's
1279  * the skb->data area. If it's the 7th
1280  * element and there is more than 6 frags,
1281  * then its an OAL.
1282  */
1283  if (i == 7) {
1285  qdev->ndev,
1286  "unmapping OAL area.\n");
1287  }
1288  pci_unmap_single(qdev->pdev,
1289  dma_unmap_addr(&tx_ring_desc->map[i],
1290  mapaddr),
1291  dma_unmap_len(&tx_ring_desc->map[i],
1292  maplen),
1294  } else {
1295  netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296  "unmapping frag %d.\n", i);
1297  pci_unmap_page(qdev->pdev,
1298  dma_unmap_addr(&tx_ring_desc->map[i],
1299  mapaddr),
1300  dma_unmap_len(&tx_ring_desc->map[i],
1301  maplen), PCI_DMA_TODEVICE);
1302  }
1303  }
1304 
1305 }
1306 
1307 /* Map the buffers for this transmit. This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
1310 static int ql_map_send(struct ql_adapter *qdev,
1311  struct ob_mac_iocb_req *mac_iocb_ptr,
1312  struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314  int len = skb_headlen(skb);
1315  dma_addr_t map;
1316  int frag_idx, err, map_idx = 0;
1317  struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318  int frag_cnt = skb_shinfo(skb)->nr_frags;
1319 
1320  if (frag_cnt) {
1321  netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322  "frag_cnt = %d.\n", frag_cnt);
1323  }
1324  /*
1325  * Map the skb buffer first.
1326  */
1327  map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328 
1329  err = pci_dma_mapping_error(qdev->pdev, map);
1330  if (err) {
1331  netif_err(qdev, tx_queued, qdev->ndev,
1332  "PCI mapping failed with error: %d\n", err);
1333 
1334  return NETDEV_TX_BUSY;
1335  }
1336 
1337  tbd->len = cpu_to_le32(len);
1338  tbd->addr = cpu_to_le64(map);
1339  dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340  dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341  map_idx++;
1342 
1343  /*
1344  * This loop fills the remainder of the 8 address descriptors
1345  * in the IOCB. If there are more than 7 fragments, then the
1346  * eighth address desc will point to an external list (OAL).
1347  * When this happens, the remainder of the frags will be stored
1348  * in this list.
1349  */
1350  for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351  skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352  tbd++;
1353  if (frag_idx == 6 && frag_cnt > 7) {
1354  /* Let's tack on an sglist.
1355  * Our control block will now
1356  * look like this:
1357  * iocb->seg[0] = skb->data
1358  * iocb->seg[1] = frag[0]
1359  * iocb->seg[2] = frag[1]
1360  * iocb->seg[3] = frag[2]
1361  * iocb->seg[4] = frag[3]
1362  * iocb->seg[5] = frag[4]
1363  * iocb->seg[6] = frag[5]
1364  * iocb->seg[7] = ptr to OAL (external sglist)
1365  * oal->seg[0] = frag[6]
1366  * oal->seg[1] = frag[7]
1367  * oal->seg[2] = frag[8]
1368  * oal->seg[3] = frag[9]
1369  * oal->seg[4] = frag[10]
1370  * etc...
1371  */
1372  /* Tack on the OAL in the eighth segment of IOCB. */
1373  map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374  sizeof(struct oal),
1376  err = pci_dma_mapping_error(qdev->pdev, map);
1377  if (err) {
1378  netif_err(qdev, tx_queued, qdev->ndev,
1379  "PCI mapping outbound address list with error: %d\n",
1380  err);
1381  goto map_error;
1382  }
1383 
1384  tbd->addr = cpu_to_le64(map);
1385  /*
1386  * The length is the number of fragments
1387  * that remain to be mapped times the length
1388  * of our sglist (OAL).
1389  */
1390  tbd->len =
1391  cpu_to_le32((sizeof(struct tx_buf_desc) *
1392  (frag_cnt - frag_idx)) | TX_DESC_C);
1393  dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394  map);
1395  dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396  sizeof(struct oal));
1397  tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398  map_idx++;
1399  }
1400 
1401  map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402  DMA_TO_DEVICE);
1403 
1404  err = dma_mapping_error(&qdev->pdev->dev, map);
1405  if (err) {
1406  netif_err(qdev, tx_queued, qdev->ndev,
1407  "PCI mapping frags failed with error: %d.\n",
1408  err);
1409  goto map_error;
1410  }
1411 
1412  tbd->addr = cpu_to_le64(map);
1413  tbd->len = cpu_to_le32(skb_frag_size(frag));
1414  dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415  dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416  skb_frag_size(frag));
1417 
1418  }
1419  /* Save the number of segments we've mapped. */
1420  tx_ring_desc->map_cnt = map_idx;
1421  /* Terminate the last segment. */
1422  tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423  return NETDEV_TX_OK;
1424 
1425 map_error:
1426  /*
1427  * If the first frag mapping failed, then i will be zero.
1428  * This causes the unmap of the skb->data area. Otherwise
1429  * we pass in the number of frags that mapped successfully
1430  * so they can be umapped.
1431  */
1432  ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433  return NETDEV_TX_BUSY;
1434 }
1435 
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
1438 {
1439  struct nic_stats *stats = &qdev->nic_stats;
1440 
1441  stats->rx_err_count++;
1442 
1443  switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445  stats->rx_code_err++;
1446  break;
1448  stats->rx_oversize_err++;
1449  break;
1451  stats->rx_undersize_err++;
1452  break;
1454  stats->rx_preamble_err++;
1455  break;
1457  stats->rx_frame_len_err++;
1458  break;
1460  stats->rx_crc_err++;
1461  default:
1462  break;
1463  }
1464 }
1465 
1466 /* Process an inbound completion from an rx ring. */
1467 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1468  struct rx_ring *rx_ring,
1469  struct ib_mac_iocb_rsp *ib_mac_rsp,
1470  u32 length,
1471  u16 vlan_id)
1472 {
1473  struct sk_buff *skb;
1474  struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475  struct napi_struct *napi = &rx_ring->napi;
1476 
1477  napi->dev = qdev->ndev;
1478 
1479  skb = napi_get_frags(napi);
1480  if (!skb) {
1481  netif_err(qdev, drv, qdev->ndev,
1482  "Couldn't get an skb, exiting.\n");
1483  rx_ring->rx_dropped++;
1484  put_page(lbq_desc->p.pg_chunk.page);
1485  return;
1486  }
1487  prefetch(lbq_desc->p.pg_chunk.va);
1488  __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1489  lbq_desc->p.pg_chunk.page,
1490  lbq_desc->p.pg_chunk.offset,
1491  length);
1492 
1493  skb->len += length;
1494  skb->data_len += length;
1495  skb->truesize += length;
1496  skb_shinfo(skb)->nr_frags++;
1497 
1498  rx_ring->rx_packets++;
1499  rx_ring->rx_bytes += length;
1501  skb_record_rx_queue(skb, rx_ring->cq_id);
1502  if (vlan_id != 0xffff)
1503  __vlan_hwaccel_put_tag(skb, vlan_id);
1504  napi_gro_frags(napi);
1505 }
1506 
1507 /* Process an inbound completion from an rx ring. */
1508 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1509  struct rx_ring *rx_ring,
1510  struct ib_mac_iocb_rsp *ib_mac_rsp,
1511  u32 length,
1512  u16 vlan_id)
1513 {
1514  struct net_device *ndev = qdev->ndev;
1515  struct sk_buff *skb = NULL;
1516  void *addr;
1517  struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1518  struct napi_struct *napi = &rx_ring->napi;
1519 
1520  skb = netdev_alloc_skb(ndev, length);
1521  if (!skb) {
1522  netif_err(qdev, drv, qdev->ndev,
1523  "Couldn't get an skb, need to unwind!.\n");
1524  rx_ring->rx_dropped++;
1525  put_page(lbq_desc->p.pg_chunk.page);
1526  return;
1527  }
1528 
1529  addr = lbq_desc->p.pg_chunk.va;
1530  prefetch(addr);
1531 
1532  /* The max framesize filter on this chip is set higher than
1533  * MTU since FCoE uses 2k frames.
1534  */
1535  if (skb->len > ndev->mtu + ETH_HLEN) {
1536  netif_err(qdev, drv, qdev->ndev,
1537  "Segment too small, dropping.\n");
1538  rx_ring->rx_dropped++;
1539  goto err_out;
1540  }
1541  memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1542  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1543  "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1544  length);
1545  skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1546  lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1547  length-ETH_HLEN);
1548  skb->len += length-ETH_HLEN;
1549  skb->data_len += length-ETH_HLEN;
1550  skb->truesize += length-ETH_HLEN;
1551 
1552  rx_ring->rx_packets++;
1553  rx_ring->rx_bytes += skb->len;
1554  skb->protocol = eth_type_trans(skb, ndev);
1555  skb_checksum_none_assert(skb);
1556 
1557  if ((ndev->features & NETIF_F_RXCSUM) &&
1558  !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1559  /* TCP frame. */
1560  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1561  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1562  "TCP checksum done!\n");
1564  } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1565  (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1566  /* Unfragmented ipv4 UDP frame. */
1567  struct iphdr *iph =
1568  (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1569  if (!(iph->frag_off &
1570  htons(IP_MF|IP_OFFSET))) {
1573  qdev->ndev,
1574  "UDP checksum done!\n");
1575  }
1576  }
1577  }
1578 
1579  skb_record_rx_queue(skb, rx_ring->cq_id);
1580  if (vlan_id != 0xffff)
1581  __vlan_hwaccel_put_tag(skb, vlan_id);
1582  if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1583  napi_gro_receive(napi, skb);
1584  else
1585  netif_receive_skb(skb);
1586  return;
1587 err_out:
1588  dev_kfree_skb_any(skb);
1589  put_page(lbq_desc->p.pg_chunk.page);
1590 }
1591 
1592 /* Process an inbound completion from an rx ring. */
1593 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1594  struct rx_ring *rx_ring,
1595  struct ib_mac_iocb_rsp *ib_mac_rsp,
1596  u32 length,
1597  u16 vlan_id)
1598 {
1599  struct net_device *ndev = qdev->ndev;
1600  struct sk_buff *skb = NULL;
1601  struct sk_buff *new_skb = NULL;
1602  struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1603 
1604  skb = sbq_desc->p.skb;
1605  /* Allocate new_skb and copy */
1606  new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607  if (new_skb == NULL) {
1608  netif_err(qdev, probe, qdev->ndev,
1609  "No skb available, drop the packet.\n");
1610  rx_ring->rx_dropped++;
1611  return;
1612  }
1613  skb_reserve(new_skb, NET_IP_ALIGN);
1614  memcpy(skb_put(new_skb, length), skb->data, length);
1615  skb = new_skb;
1616 
1617  /* loopback self test for ethtool */
1618  if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619  ql_check_lb_frame(qdev, skb);
1620  dev_kfree_skb_any(skb);
1621  return;
1622  }
1623 
1624  /* The max framesize filter on this chip is set higher than
1625  * MTU since FCoE uses 2k frames.
1626  */
1627  if (skb->len > ndev->mtu + ETH_HLEN) {
1628  dev_kfree_skb_any(skb);
1629  rx_ring->rx_dropped++;
1630  return;
1631  }
1632 
1633  prefetch(skb->data);
1634  if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1635  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1636  "%s Multicast.\n",
1637  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1638  IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1639  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640  IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1641  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642  IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1643  }
1644  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1645  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1646  "Promiscuous Packet.\n");
1647 
1648  rx_ring->rx_packets++;
1649  rx_ring->rx_bytes += skb->len;
1650  skb->protocol = eth_type_trans(skb, ndev);
1651  skb_checksum_none_assert(skb);
1652 
1653  /* If rx checksum is on, and there are no
1654  * csum or frame errors.
1655  */
1656  if ((ndev->features & NETIF_F_RXCSUM) &&
1657  !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658  /* TCP frame. */
1659  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1661  "TCP checksum done!\n");
1663  } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664  (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665  /* Unfragmented ipv4 UDP frame. */
1666  struct iphdr *iph = (struct iphdr *) skb->data;
1667  if (!(iph->frag_off &
1668  htons(IP_MF|IP_OFFSET))) {
1671  qdev->ndev,
1672  "UDP checksum done!\n");
1673  }
1674  }
1675  }
1676 
1677  skb_record_rx_queue(skb, rx_ring->cq_id);
1678  if (vlan_id != 0xffff)
1679  __vlan_hwaccel_put_tag(skb, vlan_id);
1680  if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1681  napi_gro_receive(&rx_ring->napi, skb);
1682  else
1683  netif_receive_skb(skb);
1684 }
1685 
1686 static void ql_realign_skb(struct sk_buff *skb, int len)
1687 {
1688  void *temp_addr = skb->data;
1689 
1690  /* Undo the skb_reserve(skb,32) we did before
1691  * giving to hardware, and realign data on
1692  * a 2-byte boundary.
1693  */
1694  skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1695  skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1696  skb_copy_to_linear_data(skb, temp_addr,
1697  (unsigned int)len);
1698 }
1699 
1700 /*
1701  * This function builds an skb for the given inbound
1702  * completion. It will be rewritten for readability in the near
1703  * future, but for not it works well.
1704  */
1705 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1706  struct rx_ring *rx_ring,
1707  struct ib_mac_iocb_rsp *ib_mac_rsp)
1708 {
1709  struct bq_desc *lbq_desc;
1710  struct bq_desc *sbq_desc;
1711  struct sk_buff *skb = NULL;
1712  u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1713  u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1714 
1715  /*
1716  * Handle the header buffer if present.
1717  */
1718  if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1719  ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1720  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1721  "Header of %d bytes in small buffer.\n", hdr_len);
1722  /*
1723  * Headers fit nicely into a small buffer.
1724  */
1725  sbq_desc = ql_get_curr_sbuf(rx_ring);
1726  pci_unmap_single(qdev->pdev,
1727  dma_unmap_addr(sbq_desc, mapaddr),
1728  dma_unmap_len(sbq_desc, maplen),
1730  skb = sbq_desc->p.skb;
1731  ql_realign_skb(skb, hdr_len);
1732  skb_put(skb, hdr_len);
1733  sbq_desc->p.skb = NULL;
1734  }
1735 
1736  /*
1737  * Handle the data buffer(s).
1738  */
1739  if (unlikely(!length)) { /* Is there data too? */
1740  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741  "No Data buffer in this packet.\n");
1742  return skb;
1743  }
1744 
1745  if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1746  if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1747  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1748  "Headers in small, data of %d bytes in small, combine them.\n",
1749  length);
1750  /*
1751  * Data is less than small buffer size so it's
1752  * stuffed in a small buffer.
1753  * For this case we append the data
1754  * from the "data" small buffer to the "header" small
1755  * buffer.
1756  */
1757  sbq_desc = ql_get_curr_sbuf(rx_ring);
1758  pci_dma_sync_single_for_cpu(qdev->pdev,
1760  (sbq_desc, mapaddr),
1762  (sbq_desc, maplen),
1764  memcpy(skb_put(skb, length),
1765  sbq_desc->p.skb->data, length);
1766  pci_dma_sync_single_for_device(qdev->pdev,
1768  (sbq_desc,
1769  mapaddr),
1771  (sbq_desc,
1772  maplen),
1774  } else {
1775  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1776  "%d bytes in a single small buffer.\n",
1777  length);
1778  sbq_desc = ql_get_curr_sbuf(rx_ring);
1779  skb = sbq_desc->p.skb;
1780  ql_realign_skb(skb, length);
1781  skb_put(skb, length);
1782  pci_unmap_single(qdev->pdev,
1783  dma_unmap_addr(sbq_desc,
1784  mapaddr),
1785  dma_unmap_len(sbq_desc,
1786  maplen),
1788  sbq_desc->p.skb = NULL;
1789  }
1790  } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1791  if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793  "Header in small, %d bytes in large. Chain large to small!\n",
1794  length);
1795  /*
1796  * The data is in a single large buffer. We
1797  * chain it to the header buffer's skb and let
1798  * it rip.
1799  */
1800  lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1801  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802  "Chaining page at offset = %d, for %d bytes to skb.\n",
1803  lbq_desc->p.pg_chunk.offset, length);
1804  skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1805  lbq_desc->p.pg_chunk.offset,
1806  length);
1807  skb->len += length;
1808  skb->data_len += length;
1809  skb->truesize += length;
1810  } else {
1811  /*
1812  * The headers and data are in a single large buffer. We
1813  * copy it to a new skb and let it go. This can happen with
1814  * jumbo mtu on a non-TCP/UDP frame.
1815  */
1816  lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817  skb = netdev_alloc_skb(qdev->ndev, length);
1818  if (skb == NULL) {
1819  netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1820  "No skb available, drop the packet.\n");
1821  return NULL;
1822  }
1823  pci_unmap_page(qdev->pdev,
1824  dma_unmap_addr(lbq_desc,
1825  mapaddr),
1826  dma_unmap_len(lbq_desc, maplen),
1828  skb_reserve(skb, NET_IP_ALIGN);
1829  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830  "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1831  length);
1832  skb_fill_page_desc(skb, 0,
1833  lbq_desc->p.pg_chunk.page,
1834  lbq_desc->p.pg_chunk.offset,
1835  length);
1836  skb->len += length;
1837  skb->data_len += length;
1838  skb->truesize += length;
1839  length -= length;
1840  __pskb_pull_tail(skb,
1841  (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1842  VLAN_ETH_HLEN : ETH_HLEN);
1843  }
1844  } else {
1845  /*
1846  * The data is in a chain of large buffers
1847  * pointed to by a small buffer. We loop
1848  * thru and chain them to the our small header
1849  * buffer's skb.
1850  * frags: There are 18 max frags and our small
1851  * buffer will hold 32 of them. The thing is,
1852  * we'll use 3 max for our 9000 byte jumbo
1853  * frames. If the MTU goes up we could
1854  * eventually be in trouble.
1855  */
1856  int size, i = 0;
1857  sbq_desc = ql_get_curr_sbuf(rx_ring);
1858  pci_unmap_single(qdev->pdev,
1859  dma_unmap_addr(sbq_desc, mapaddr),
1860  dma_unmap_len(sbq_desc, maplen),
1862  if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1863  /*
1864  * This is an non TCP/UDP IP frame, so
1865  * the headers aren't split into a small
1866  * buffer. We have to use the small buffer
1867  * that contains our sg list as our skb to
1868  * send upstairs. Copy the sg list here to
1869  * a local buffer and use it to find the
1870  * pages to chain.
1871  */
1872  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1873  "%d bytes of headers & data in chain of large.\n",
1874  length);
1875  skb = sbq_desc->p.skb;
1876  sbq_desc->p.skb = NULL;
1877  skb_reserve(skb, NET_IP_ALIGN);
1878  }
1879  while (length > 0) {
1880  lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1881  size = (length < rx_ring->lbq_buf_size) ? length :
1882  rx_ring->lbq_buf_size;
1883 
1884  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885  "Adding page %d to skb for %d bytes.\n",
1886  i, size);
1887  skb_fill_page_desc(skb, i,
1888  lbq_desc->p.pg_chunk.page,
1889  lbq_desc->p.pg_chunk.offset,
1890  size);
1891  skb->len += size;
1892  skb->data_len += size;
1893  skb->truesize += size;
1894  length -= size;
1895  i++;
1896  }
1897  __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1898  VLAN_ETH_HLEN : ETH_HLEN);
1899  }
1900  return skb;
1901 }
1902 
1903 /* Process an inbound completion from an rx ring. */
1904 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1905  struct rx_ring *rx_ring,
1906  struct ib_mac_iocb_rsp *ib_mac_rsp,
1907  u16 vlan_id)
1908 {
1909  struct net_device *ndev = qdev->ndev;
1910  struct sk_buff *skb = NULL;
1911 
1912  QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1913 
1914  skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1915  if (unlikely(!skb)) {
1916  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1917  "No skb available, drop packet.\n");
1918  rx_ring->rx_dropped++;
1919  return;
1920  }
1921 
1922  /* The max framesize filter on this chip is set higher than
1923  * MTU since FCoE uses 2k frames.
1924  */
1925  if (skb->len > ndev->mtu + ETH_HLEN) {
1926  dev_kfree_skb_any(skb);
1927  rx_ring->rx_dropped++;
1928  return;
1929  }
1930 
1931  /* loopback self test for ethtool */
1932  if (test_bit(QL_SELFTEST, &qdev->flags)) {
1933  ql_check_lb_frame(qdev, skb);
1934  dev_kfree_skb_any(skb);
1935  return;
1936  }
1937 
1938  prefetch(skb->data);
1939  if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1940  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1941  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942  IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1943  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944  IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1945  (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946  IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947  rx_ring->rx_multicast++;
1948  }
1949  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1950  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1951  "Promiscuous Packet.\n");
1952  }
1953 
1954  skb->protocol = eth_type_trans(skb, ndev);
1955  skb_checksum_none_assert(skb);
1956 
1957  /* If rx checksum is on, and there are no
1958  * csum or frame errors.
1959  */
1960  if ((ndev->features & NETIF_F_RXCSUM) &&
1961  !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1962  /* TCP frame. */
1963  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1964  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1965  "TCP checksum done!\n");
1967  } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1968  (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1969  /* Unfragmented ipv4 UDP frame. */
1970  struct iphdr *iph = (struct iphdr *) skb->data;
1971  if (!(iph->frag_off &
1972  htons(IP_MF|IP_OFFSET))) {
1974  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1975  "TCP checksum done!\n");
1976  }
1977  }
1978  }
1979 
1980  rx_ring->rx_packets++;
1981  rx_ring->rx_bytes += skb->len;
1982  skb_record_rx_queue(skb, rx_ring->cq_id);
1983  if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1984  __vlan_hwaccel_put_tag(skb, vlan_id);
1985  if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1986  napi_gro_receive(&rx_ring->napi, skb);
1987  else
1988  netif_receive_skb(skb);
1989 }
1990 
1991 /* Process an inbound completion from an rx ring. */
1992 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1993  struct rx_ring *rx_ring,
1994  struct ib_mac_iocb_rsp *ib_mac_rsp)
1995 {
1996  u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1997  u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1998  ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1999  IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2000 
2001  QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2002 
2003  /* Frame error, so drop the packet. */
2004  if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005  ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006  return (unsigned long)length;
2007  }
2008 
2009  if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010  /* The data and headers are split into
2011  * separate buffers.
2012  */
2013  ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2014  vlan_id);
2015  } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2016  /* The data fit in a single small buffer.
2017  * Allocate a new skb, copy the data and
2018  * return the buffer to the free pool.
2019  */
2020  ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2021  length, vlan_id);
2022  } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2023  !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2024  (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2025  /* TCP packet in a page chunk that's been checksummed.
2026  * Tack it on to our GRO skb and let it go.
2027  */
2028  ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2029  length, vlan_id);
2030  } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2031  /* Non-TCP packet in a page chunk. Allocate an
2032  * skb, tack it on frags, and send it up.
2033  */
2034  ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2035  length, vlan_id);
2036  } else {
2037  /* Non-TCP/UDP large frames that span multiple buffers
2038  * can be processed corrrectly by the split frame logic.
2039  */
2040  ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2041  vlan_id);
2042  }
2043 
2044  return (unsigned long)length;
2045 }
2046 
2047 /* Process an outbound completion from an rx ring. */
2048 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2049  struct ob_mac_iocb_rsp *mac_rsp)
2050 {
2051  struct tx_ring *tx_ring;
2052  struct tx_ring_desc *tx_ring_desc;
2053 
2054  QL_DUMP_OB_MAC_RSP(mac_rsp);
2055  tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2056  tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2057  ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2058  tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2059  tx_ring->tx_packets++;
2060  dev_kfree_skb(tx_ring_desc->skb);
2061  tx_ring_desc->skb = NULL;
2062 
2063  if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2067  if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2068  netif_warn(qdev, tx_done, qdev->ndev,
2069  "Total descriptor length did not match transfer length.\n");
2070  }
2071  if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2072  netif_warn(qdev, tx_done, qdev->ndev,
2073  "Frame too short to be valid, not sent.\n");
2074  }
2075  if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2076  netif_warn(qdev, tx_done, qdev->ndev,
2077  "Frame too long, but sent anyway.\n");
2078  }
2079  if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2080  netif_warn(qdev, tx_done, qdev->ndev,
2081  "PCI backplane error. Frame not sent.\n");
2082  }
2083  }
2084  atomic_inc(&tx_ring->tx_count);
2085 }
2086 
2087 /* Fire up a handler to reset the MPI processor. */
2088 void ql_queue_fw_error(struct ql_adapter *qdev)
2089 {
2090  ql_link_off(qdev);
2091  queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2092 }
2093 
2095 {
2096  ql_link_off(qdev);
2097  ql_disable_interrupts(qdev);
2098  /* Clear adapter up bit to signal the recovery
2099  * process that it shouldn't kill the reset worker
2100  * thread
2101  */
2102  clear_bit(QL_ADAPTER_UP, &qdev->flags);
2103  /* Set asic recovery bit to indicate reset process that we are
2104  * in fatal error recovery process rather than normal close
2105  */
2106  set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2107  queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2108 }
2109 
2110 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2111  struct ib_ae_iocb_rsp *ib_ae_rsp)
2112 {
2113  switch (ib_ae_rsp->event) {
2114  case MGMT_ERR_EVENT:
2115  netif_err(qdev, rx_err, qdev->ndev,
2116  "Management Processor Fatal Error.\n");
2117  ql_queue_fw_error(qdev);
2118  return;
2119 
2120  case CAM_LOOKUP_ERR_EVENT:
2121  netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2122  netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2123  ql_queue_asic_error(qdev);
2124  return;
2125 
2126  case SOFT_ECC_ERROR_EVENT:
2127  netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2128  ql_queue_asic_error(qdev);
2129  break;
2130 
2131  case PCI_ERR_ANON_BUF_RD:
2132  netdev_err(qdev->ndev, "PCI error occurred when reading "
2133  "anonymous buffers from rx_ring %d.\n",
2134  ib_ae_rsp->q_id);
2135  ql_queue_asic_error(qdev);
2136  break;
2137 
2138  default:
2139  netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2140  ib_ae_rsp->event);
2141  ql_queue_asic_error(qdev);
2142  break;
2143  }
2144 }
2145 
2146 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2147 {
2148  struct ql_adapter *qdev = rx_ring->qdev;
2149  u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2150  struct ob_mac_iocb_rsp *net_rsp = NULL;
2151  int count = 0;
2152 
2153  struct tx_ring *tx_ring;
2154  /* While there are entries in the completion queue. */
2155  while (prod != rx_ring->cnsmr_idx) {
2156 
2157  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2158  "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2159  rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2160 
2161  net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2162  rmb();
2163  switch (net_rsp->opcode) {
2164 
2166  case OPCODE_OB_MAC_IOCB:
2167  ql_process_mac_tx_intr(qdev, net_rsp);
2168  break;
2169  default:
2170  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2171  "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2172  net_rsp->opcode);
2173  }
2174  count++;
2175  ql_update_cq(rx_ring);
2176  prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2177  }
2178  if (!net_rsp)
2179  return 0;
2180  ql_write_cq_idx(rx_ring);
2181  tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2182  if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2183  if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2184  /*
2185  * The queue got stopped because the tx_ring was full.
2186  * Wake it up, because it's now at least 25% empty.
2187  */
2188  netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2189  }
2190 
2191  return count;
2192 }
2193 
2194 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2195 {
2196  struct ql_adapter *qdev = rx_ring->qdev;
2197  u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2198  struct ql_net_rsp_iocb *net_rsp;
2199  int count = 0;
2200 
2201  /* While there are entries in the completion queue. */
2202  while (prod != rx_ring->cnsmr_idx) {
2203 
2204  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2205  "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2206  rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2207 
2208  net_rsp = rx_ring->curr_entry;
2209  rmb();
2210  switch (net_rsp->opcode) {
2211  case OPCODE_IB_MAC_IOCB:
2212  ql_process_mac_rx_intr(qdev, rx_ring,
2213  (struct ib_mac_iocb_rsp *)
2214  net_rsp);
2215  break;
2216 
2217  case OPCODE_IB_AE_IOCB:
2218  ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2219  net_rsp);
2220  break;
2221  default:
2222  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2223  "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2224  net_rsp->opcode);
2225  break;
2226  }
2227  count++;
2228  ql_update_cq(rx_ring);
2229  prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2230  if (count == budget)
2231  break;
2232  }
2233  ql_update_buffer_queues(qdev, rx_ring);
2234  ql_write_cq_idx(rx_ring);
2235  return count;
2236 }
2237 
2238 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2239 {
2240  struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2241  struct ql_adapter *qdev = rx_ring->qdev;
2242  struct rx_ring *trx_ring;
2243  int i, work_done = 0;
2244  struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2245 
2246  netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2247  "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2248 
2249  /* Service the TX rings first. They start
2250  * right after the RSS rings. */
2251  for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2252  trx_ring = &qdev->rx_ring[i];
2253  /* If this TX completion ring belongs to this vector and
2254  * it's not empty then service it.
2255  */
2256  if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2257  (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2258  trx_ring->cnsmr_idx)) {
2259  netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2260  "%s: Servicing TX completion ring %d.\n",
2261  __func__, trx_ring->cq_id);
2262  ql_clean_outbound_rx_ring(trx_ring);
2263  }
2264  }
2265 
2266  /*
2267  * Now service the RSS ring if it's active.
2268  */
2269  if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2270  rx_ring->cnsmr_idx) {
2271  netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2272  "%s: Servicing RX completion ring %d.\n",
2273  __func__, rx_ring->cq_id);
2274  work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2275  }
2276 
2277  if (work_done < budget) {
2278  napi_complete(napi);
2279  ql_enable_completion_interrupt(qdev, rx_ring->irq);
2280  }
2281  return work_done;
2282 }
2283 
2284 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2285 {
2286  struct ql_adapter *qdev = netdev_priv(ndev);
2287 
2288  if (features & NETIF_F_HW_VLAN_RX) {
2289  ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2291  } else {
2292  ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2293  }
2294 }
2295 
2296 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2297  netdev_features_t features)
2298 {
2299  /*
2300  * Since there is no support for separate rx/tx vlan accel
2301  * enable/disable make sure tx flag is always in same state as rx.
2302  */
2303  if (features & NETIF_F_HW_VLAN_RX)
2304  features |= NETIF_F_HW_VLAN_TX;
2305  else
2306  features &= ~NETIF_F_HW_VLAN_TX;
2307 
2308  return features;
2309 }
2310 
2311 static int qlge_set_features(struct net_device *ndev,
2312  netdev_features_t features)
2313 {
2315 
2316  if (changed & NETIF_F_HW_VLAN_RX)
2317  qlge_vlan_mode(ndev, features);
2318 
2319  return 0;
2320 }
2321 
2322 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2323 {
2324  u32 enable_bit = MAC_ADDR_E;
2325  int err;
2326 
2327  err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2328  MAC_ADDR_TYPE_VLAN, vid);
2329  if (err)
2330  netif_err(qdev, ifup, qdev->ndev,
2331  "Failed to init vlan address.\n");
2332  return err;
2333 }
2334 
2335 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2336 {
2337  struct ql_adapter *qdev = netdev_priv(ndev);
2338  int status;
2339  int err;
2340 
2341  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342  if (status)
2343  return status;
2344 
2345  err = __qlge_vlan_rx_add_vid(qdev, vid);
2346  set_bit(vid, qdev->active_vlans);
2347 
2349 
2350  return err;
2351 }
2352 
2353 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2354 {
2355  u32 enable_bit = 0;
2356  int err;
2357 
2358  err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2359  MAC_ADDR_TYPE_VLAN, vid);
2360  if (err)
2361  netif_err(qdev, ifup, qdev->ndev,
2362  "Failed to clear vlan address.\n");
2363  return err;
2364 }
2365 
2366 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2367 {
2368  struct ql_adapter *qdev = netdev_priv(ndev);
2369  int status;
2370  int err;
2371 
2372  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2373  if (status)
2374  return status;
2375 
2376  err = __qlge_vlan_rx_kill_vid(qdev, vid);
2377  clear_bit(vid, qdev->active_vlans);
2378 
2380 
2381  return err;
2382 }
2383 
2384 static void qlge_restore_vlan(struct ql_adapter *qdev)
2385 {
2386  int status;
2387  u16 vid;
2388 
2389  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2390  if (status)
2391  return;
2392 
2394  __qlge_vlan_rx_add_vid(qdev, vid);
2395 
2396  ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2397 }
2398 
2399 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2400 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2401 {
2402  struct rx_ring *rx_ring = dev_id;
2403  napi_schedule(&rx_ring->napi);
2404  return IRQ_HANDLED;
2405 }
2406 
2407 /* This handles a fatal error, MPI activity, and the default
2408  * rx_ring in an MSI-X multiple vector environment.
2409  * In MSI/Legacy environment it also process the rest of
2410  * the rx_rings.
2411  */
2412 static irqreturn_t qlge_isr(int irq, void *dev_id)
2413 {
2414  struct rx_ring *rx_ring = dev_id;
2415  struct ql_adapter *qdev = rx_ring->qdev;
2416  struct intr_context *intr_context = &qdev->intr_context[0];
2417  u32 var;
2418  int work_done = 0;
2419 
2420  spin_lock(&qdev->hw_lock);
2421  if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2422  netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2423  "Shared Interrupt, Not ours!\n");
2424  spin_unlock(&qdev->hw_lock);
2425  return IRQ_NONE;
2426  }
2427  spin_unlock(&qdev->hw_lock);
2428 
2429  var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2430 
2431  /*
2432  * Check for fatal error.
2433  */
2434  if (var & STS_FE) {
2435  ql_queue_asic_error(qdev);
2436  netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2437  var = ql_read32(qdev, ERR_STS);
2438  netdev_err(qdev->ndev, "Resetting chip. "
2439  "Error Status Register = 0x%x\n", var);
2440  return IRQ_HANDLED;
2441  }
2442 
2443  /*
2444  * Check MPI processor activity.
2445  */
2446  if ((var & STS_PI) &&
2447  (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2448  /*
2449  * We've got an async event or mailbox completion.
2450  * Handle it and clear the source of the interrupt.
2451  */
2452  netif_err(qdev, intr, qdev->ndev,
2453  "Got MPI processor interrupt.\n");
2454  ql_disable_completion_interrupt(qdev, intr_context->intr);
2455  ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2457  qdev->workqueue, &qdev->mpi_work, 0);
2458  work_done++;
2459  }
2460 
2461  /*
2462  * Get the bit-mask that shows the active queues for this
2463  * pass. Compare it to the queues that this irq services
2464  * and call napi if there's a match.
2465  */
2466  var = ql_read32(qdev, ISR1);
2467  if (var & intr_context->irq_mask) {
2468  netif_info(qdev, intr, qdev->ndev,
2469  "Waking handler for rx_ring[0].\n");
2470  ql_disable_completion_interrupt(qdev, intr_context->intr);
2471  napi_schedule(&rx_ring->napi);
2472  work_done++;
2473  }
2474  ql_enable_completion_interrupt(qdev, intr_context->intr);
2475  return work_done ? IRQ_HANDLED : IRQ_NONE;
2476 }
2477 
2478 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2479 {
2480 
2481  if (skb_is_gso(skb)) {
2482  int err;
2483  if (skb_header_cloned(skb)) {
2484  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2485  if (err)
2486  return err;
2487  }
2488 
2489  mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2490  mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2491  mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2492  mac_iocb_ptr->total_hdrs_len =
2493  cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2494  mac_iocb_ptr->net_trans_offset =
2495  cpu_to_le16(skb_network_offset(skb) |
2496  skb_transport_offset(skb)
2498  mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2499  mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2500  if (likely(skb->protocol == htons(ETH_P_IP))) {
2501  struct iphdr *iph = ip_hdr(skb);
2502  iph->check = 0;
2503  mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2504  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2505  iph->daddr, 0,
2506  IPPROTO_TCP,
2507  0);
2508  } else if (skb->protocol == htons(ETH_P_IPV6)) {
2509  mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2510  tcp_hdr(skb)->check =
2511  ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2512  &ipv6_hdr(skb)->daddr,
2513  0, IPPROTO_TCP, 0);
2514  }
2515  return 1;
2516  }
2517  return 0;
2518 }
2519 
2520 static void ql_hw_csum_setup(struct sk_buff *skb,
2521  struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2522 {
2523  int len;
2524  struct iphdr *iph = ip_hdr(skb);
2525  __sum16 *check;
2526  mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2527  mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2528  mac_iocb_ptr->net_trans_offset =
2529  cpu_to_le16(skb_network_offset(skb) |
2530  skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2531 
2532  mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2533  len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2534  if (likely(iph->protocol == IPPROTO_TCP)) {
2535  check = &(tcp_hdr(skb)->check);
2536  mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2537  mac_iocb_ptr->total_hdrs_len =
2538  cpu_to_le16(skb_transport_offset(skb) +
2539  (tcp_hdr(skb)->doff << 2));
2540  } else {
2541  check = &(udp_hdr(skb)->check);
2542  mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2543  mac_iocb_ptr->total_hdrs_len =
2544  cpu_to_le16(skb_transport_offset(skb) +
2545  sizeof(struct udphdr));
2546  }
2547  *check = ~csum_tcpudp_magic(iph->saddr,
2548  iph->daddr, len, iph->protocol, 0);
2549 }
2550 
2551 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2552 {
2553  struct tx_ring_desc *tx_ring_desc;
2554  struct ob_mac_iocb_req *mac_iocb_ptr;
2555  struct ql_adapter *qdev = netdev_priv(ndev);
2556  int tso;
2557  struct tx_ring *tx_ring;
2558  u32 tx_ring_idx = (u32) skb->queue_mapping;
2559 
2560  tx_ring = &qdev->tx_ring[tx_ring_idx];
2561 
2562  if (skb_padto(skb, ETH_ZLEN))
2563  return NETDEV_TX_OK;
2564 
2565  if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2566  netif_info(qdev, tx_queued, qdev->ndev,
2567  "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2568  __func__, tx_ring_idx);
2569  netif_stop_subqueue(ndev, tx_ring->wq_id);
2570  tx_ring->tx_errors++;
2571  return NETDEV_TX_BUSY;
2572  }
2573  tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2574  mac_iocb_ptr = tx_ring_desc->queue_entry;
2575  memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2576 
2577  mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2578  mac_iocb_ptr->tid = tx_ring_desc->index;
2579  /* We use the upper 32-bits to store the tx queue for this IO.
2580  * When we get the completion we can use it to establish the context.
2581  */
2582  mac_iocb_ptr->txq_idx = tx_ring_idx;
2583  tx_ring_desc->skb = skb;
2584 
2585  mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2586 
2587  if (vlan_tx_tag_present(skb)) {
2588  netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589  "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2590  mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2591  mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2592  }
2593  tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2594  if (tso < 0) {
2595  dev_kfree_skb_any(skb);
2596  return NETDEV_TX_OK;
2597  } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2598  ql_hw_csum_setup(skb,
2599  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2600  }
2601  if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2602  NETDEV_TX_OK) {
2603  netif_err(qdev, tx_queued, qdev->ndev,
2604  "Could not map the segments.\n");
2605  tx_ring->tx_errors++;
2606  return NETDEV_TX_BUSY;
2607  }
2608  QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2609  tx_ring->prod_idx++;
2610  if (tx_ring->prod_idx == tx_ring->wq_len)
2611  tx_ring->prod_idx = 0;
2612  wmb();
2613 
2614  ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2615  netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2616  "tx queued, slot %d, len %d\n",
2617  tx_ring->prod_idx, skb->len);
2618 
2619  atomic_dec(&tx_ring->tx_count);
2620 
2621  if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2622  netif_stop_subqueue(ndev, tx_ring->wq_id);
2623  if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2624  /*
2625  * The queue got stopped because the tx_ring was full.
2626  * Wake it up, because it's now at least 25% empty.
2627  */
2628  netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2629  }
2630  return NETDEV_TX_OK;
2631 }
2632 
2633 
2634 static void ql_free_shadow_space(struct ql_adapter *qdev)
2635 {
2636  if (qdev->rx_ring_shadow_reg_area) {
2637  pci_free_consistent(qdev->pdev,
2638  PAGE_SIZE,
2640  qdev->rx_ring_shadow_reg_dma);
2641  qdev->rx_ring_shadow_reg_area = NULL;
2642  }
2643  if (qdev->tx_ring_shadow_reg_area) {
2644  pci_free_consistent(qdev->pdev,
2645  PAGE_SIZE,
2647  qdev->tx_ring_shadow_reg_dma);
2648  qdev->tx_ring_shadow_reg_area = NULL;
2649  }
2650 }
2651 
2652 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2653 {
2654  qdev->rx_ring_shadow_reg_area =
2655  pci_alloc_consistent(qdev->pdev,
2657  if (qdev->rx_ring_shadow_reg_area == NULL) {
2658  netif_err(qdev, ifup, qdev->ndev,
2659  "Allocation of RX shadow space failed.\n");
2660  return -ENOMEM;
2661  }
2663  qdev->tx_ring_shadow_reg_area =
2665  &qdev->tx_ring_shadow_reg_dma);
2666  if (qdev->tx_ring_shadow_reg_area == NULL) {
2667  netif_err(qdev, ifup, qdev->ndev,
2668  "Allocation of TX shadow space failed.\n");
2669  goto err_wqp_sh_area;
2670  }
2672  return 0;
2673 
2674 err_wqp_sh_area:
2675  pci_free_consistent(qdev->pdev,
2676  PAGE_SIZE,
2678  qdev->rx_ring_shadow_reg_dma);
2679  return -ENOMEM;
2680 }
2681 
2682 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2683 {
2684  struct tx_ring_desc *tx_ring_desc;
2685  int i;
2686  struct ob_mac_iocb_req *mac_iocb_ptr;
2687 
2688  mac_iocb_ptr = tx_ring->wq_base;
2689  tx_ring_desc = tx_ring->q;
2690  for (i = 0; i < tx_ring->wq_len; i++) {
2691  tx_ring_desc->index = i;
2692  tx_ring_desc->skb = NULL;
2693  tx_ring_desc->queue_entry = mac_iocb_ptr;
2694  mac_iocb_ptr++;
2695  tx_ring_desc++;
2696  }
2697  atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2698 }
2699 
2700 static void ql_free_tx_resources(struct ql_adapter *qdev,
2701  struct tx_ring *tx_ring)
2702 {
2703  if (tx_ring->wq_base) {
2704  pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2705  tx_ring->wq_base, tx_ring->wq_base_dma);
2706  tx_ring->wq_base = NULL;
2707  }
2708  kfree(tx_ring->q);
2709  tx_ring->q = NULL;
2710 }
2711 
2712 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2713  struct tx_ring *tx_ring)
2714 {
2715  tx_ring->wq_base =
2716  pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2717  &tx_ring->wq_base_dma);
2718 
2719  if ((tx_ring->wq_base == NULL) ||
2720  tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2721  goto pci_alloc_err;
2722 
2723  tx_ring->q =
2724  kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2725  if (tx_ring->q == NULL)
2726  goto err;
2727 
2728  return 0;
2729 err:
2730  pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2731  tx_ring->wq_base, tx_ring->wq_base_dma);
2732  tx_ring->wq_base = NULL;
2733 pci_alloc_err:
2734  netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2735  return -ENOMEM;
2736 }
2737 
2738 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2739 {
2740  struct bq_desc *lbq_desc;
2741 
2742  uint32_t curr_idx, clean_idx;
2743 
2744  curr_idx = rx_ring->lbq_curr_idx;
2745  clean_idx = rx_ring->lbq_clean_idx;
2746  while (curr_idx != clean_idx) {
2747  lbq_desc = &rx_ring->lbq[curr_idx];
2748 
2749  if (lbq_desc->p.pg_chunk.last_flag) {
2750  pci_unmap_page(qdev->pdev,
2751  lbq_desc->p.pg_chunk.map,
2752  ql_lbq_block_size(qdev),
2754  lbq_desc->p.pg_chunk.last_flag = 0;
2755  }
2756 
2757  put_page(lbq_desc->p.pg_chunk.page);
2758  lbq_desc->p.pg_chunk.page = NULL;
2759 
2760  if (++curr_idx == rx_ring->lbq_len)
2761  curr_idx = 0;
2762 
2763  }
2764 }
2765 
2766 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2767 {
2768  int i;
2769  struct bq_desc *sbq_desc;
2770 
2771  for (i = 0; i < rx_ring->sbq_len; i++) {
2772  sbq_desc = &rx_ring->sbq[i];
2773  if (sbq_desc == NULL) {
2774  netif_err(qdev, ifup, qdev->ndev,
2775  "sbq_desc %d is NULL.\n", i);
2776  return;
2777  }
2778  if (sbq_desc->p.skb) {
2779  pci_unmap_single(qdev->pdev,
2780  dma_unmap_addr(sbq_desc, mapaddr),
2781  dma_unmap_len(sbq_desc, maplen),
2783  dev_kfree_skb(sbq_desc->p.skb);
2784  sbq_desc->p.skb = NULL;
2785  }
2786  }
2787 }
2788 
2789 /* Free all large and small rx buffers associated
2790  * with the completion queues for this device.
2791  */
2792 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2793 {
2794  int i;
2795  struct rx_ring *rx_ring;
2796 
2797  for (i = 0; i < qdev->rx_ring_count; i++) {
2798  rx_ring = &qdev->rx_ring[i];
2799  if (rx_ring->lbq)
2800  ql_free_lbq_buffers(qdev, rx_ring);
2801  if (rx_ring->sbq)
2802  ql_free_sbq_buffers(qdev, rx_ring);
2803  }
2804 }
2805 
2806 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2807 {
2808  struct rx_ring *rx_ring;
2809  int i;
2810 
2811  for (i = 0; i < qdev->rx_ring_count; i++) {
2812  rx_ring = &qdev->rx_ring[i];
2813  if (rx_ring->type != TX_Q)
2814  ql_update_buffer_queues(qdev, rx_ring);
2815  }
2816 }
2817 
2818 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2819  struct rx_ring *rx_ring)
2820 {
2821  int i;
2822  struct bq_desc *lbq_desc;
2823  __le64 *bq = rx_ring->lbq_base;
2824 
2825  memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2826  for (i = 0; i < rx_ring->lbq_len; i++) {
2827  lbq_desc = &rx_ring->lbq[i];
2828  memset(lbq_desc, 0, sizeof(*lbq_desc));
2829  lbq_desc->index = i;
2830  lbq_desc->addr = bq;
2831  bq++;
2832  }
2833 }
2834 
2835 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2836  struct rx_ring *rx_ring)
2837 {
2838  int i;
2839  struct bq_desc *sbq_desc;
2840  __le64 *bq = rx_ring->sbq_base;
2841 
2842  memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2843  for (i = 0; i < rx_ring->sbq_len; i++) {
2844  sbq_desc = &rx_ring->sbq[i];
2845  memset(sbq_desc, 0, sizeof(*sbq_desc));
2846  sbq_desc->index = i;
2847  sbq_desc->addr = bq;
2848  bq++;
2849  }
2850 }
2851 
2852 static void ql_free_rx_resources(struct ql_adapter *qdev,
2853  struct rx_ring *rx_ring)
2854 {
2855  /* Free the small buffer queue. */
2856  if (rx_ring->sbq_base) {
2857  pci_free_consistent(qdev->pdev,
2858  rx_ring->sbq_size,
2859  rx_ring->sbq_base, rx_ring->sbq_base_dma);
2860  rx_ring->sbq_base = NULL;
2861  }
2862 
2863  /* Free the small buffer queue control blocks. */
2864  kfree(rx_ring->sbq);
2865  rx_ring->sbq = NULL;
2866 
2867  /* Free the large buffer queue. */
2868  if (rx_ring->lbq_base) {
2869  pci_free_consistent(qdev->pdev,
2870  rx_ring->lbq_size,
2871  rx_ring->lbq_base, rx_ring->lbq_base_dma);
2872  rx_ring->lbq_base = NULL;
2873  }
2874 
2875  /* Free the large buffer queue control blocks. */
2876  kfree(rx_ring->lbq);
2877  rx_ring->lbq = NULL;
2878 
2879  /* Free the rx queue. */
2880  if (rx_ring->cq_base) {
2881  pci_free_consistent(qdev->pdev,
2882  rx_ring->cq_size,
2883  rx_ring->cq_base, rx_ring->cq_base_dma);
2884  rx_ring->cq_base = NULL;
2885  }
2886 }
2887 
2888 /* Allocate queues and buffers for this completions queue based
2889  * on the values in the parameter structure. */
2890 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2891  struct rx_ring *rx_ring)
2892 {
2893 
2894  /*
2895  * Allocate the completion queue for this rx_ring.
2896  */
2897  rx_ring->cq_base =
2898  pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2899  &rx_ring->cq_base_dma);
2900 
2901  if (rx_ring->cq_base == NULL) {
2902  netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2903  return -ENOMEM;
2904  }
2905 
2906  if (rx_ring->sbq_len) {
2907  /*
2908  * Allocate small buffer queue.
2909  */
2910  rx_ring->sbq_base =
2911  pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2912  &rx_ring->sbq_base_dma);
2913 
2914  if (rx_ring->sbq_base == NULL) {
2915  netif_err(qdev, ifup, qdev->ndev,
2916  "Small buffer queue allocation failed.\n");
2917  goto err_mem;
2918  }
2919 
2920  /*
2921  * Allocate small buffer queue control blocks.
2922  */
2923  rx_ring->sbq =
2924  kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2925  GFP_KERNEL);
2926  if (rx_ring->sbq == NULL) {
2927  netif_err(qdev, ifup, qdev->ndev,
2928  "Small buffer queue control block allocation failed.\n");
2929  goto err_mem;
2930  }
2931 
2932  ql_init_sbq_ring(qdev, rx_ring);
2933  }
2934 
2935  if (rx_ring->lbq_len) {
2936  /*
2937  * Allocate large buffer queue.
2938  */
2939  rx_ring->lbq_base =
2940  pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2941  &rx_ring->lbq_base_dma);
2942 
2943  if (rx_ring->lbq_base == NULL) {
2944  netif_err(qdev, ifup, qdev->ndev,
2945  "Large buffer queue allocation failed.\n");
2946  goto err_mem;
2947  }
2948  /*
2949  * Allocate large buffer queue control blocks.
2950  */
2951  rx_ring->lbq =
2952  kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2953  GFP_KERNEL);
2954  if (rx_ring->lbq == NULL) {
2955  netif_err(qdev, ifup, qdev->ndev,
2956  "Large buffer queue control block allocation failed.\n");
2957  goto err_mem;
2958  }
2959 
2960  ql_init_lbq_ring(qdev, rx_ring);
2961  }
2962 
2963  return 0;
2964 
2965 err_mem:
2966  ql_free_rx_resources(qdev, rx_ring);
2967  return -ENOMEM;
2968 }
2969 
2970 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2971 {
2972  struct tx_ring *tx_ring;
2973  struct tx_ring_desc *tx_ring_desc;
2974  int i, j;
2975 
2976  /*
2977  * Loop through all queues and free
2978  * any resources.
2979  */
2980  for (j = 0; j < qdev->tx_ring_count; j++) {
2981  tx_ring = &qdev->tx_ring[j];
2982  for (i = 0; i < tx_ring->wq_len; i++) {
2983  tx_ring_desc = &tx_ring->q[i];
2984  if (tx_ring_desc && tx_ring_desc->skb) {
2985  netif_err(qdev, ifdown, qdev->ndev,
2986  "Freeing lost SKB %p, from queue %d, index %d.\n",
2987  tx_ring_desc->skb, j,
2988  tx_ring_desc->index);
2989  ql_unmap_send(qdev, tx_ring_desc,
2990  tx_ring_desc->map_cnt);
2991  dev_kfree_skb(tx_ring_desc->skb);
2992  tx_ring_desc->skb = NULL;
2993  }
2994  }
2995  }
2996 }
2997 
2998 static void ql_free_mem_resources(struct ql_adapter *qdev)
2999 {
3000  int i;
3001 
3002  for (i = 0; i < qdev->tx_ring_count; i++)
3003  ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3004  for (i = 0; i < qdev->rx_ring_count; i++)
3005  ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3006  ql_free_shadow_space(qdev);
3007 }
3008 
3009 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3010 {
3011  int i;
3012 
3013  /* Allocate space for our shadow registers and such. */
3014  if (ql_alloc_shadow_space(qdev))
3015  return -ENOMEM;
3016 
3017  for (i = 0; i < qdev->rx_ring_count; i++) {
3018  if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3019  netif_err(qdev, ifup, qdev->ndev,
3020  "RX resource allocation failed.\n");
3021  goto err_mem;
3022  }
3023  }
3024  /* Allocate tx queue resources */
3025  for (i = 0; i < qdev->tx_ring_count; i++) {
3026  if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3027  netif_err(qdev, ifup, qdev->ndev,
3028  "TX resource allocation failed.\n");
3029  goto err_mem;
3030  }
3031  }
3032  return 0;
3033 
3034 err_mem:
3035  ql_free_mem_resources(qdev);
3036  return -ENOMEM;
3037 }
3038 
3039 /* Set up the rx ring control block and pass it to the chip.
3040  * The control block is defined as
3041  * "Completion Queue Initialization Control Block", or cqicb.
3042  */
3043 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3044 {
3045  struct cqicb *cqicb = &rx_ring->cqicb;
3046  void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3047  (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3048  u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3049  (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3050  void __iomem *doorbell_area =
3051  qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3052  int err = 0;
3053  u16 bq_len;
3054  u64 tmp;
3055  __le64 *base_indirect_ptr;
3056  int page_entries;
3057 
3058  /* Set up the shadow registers for this ring. */
3059  rx_ring->prod_idx_sh_reg = shadow_reg;
3060  rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3061  *rx_ring->prod_idx_sh_reg = 0;
3062  shadow_reg += sizeof(u64);
3063  shadow_reg_dma += sizeof(u64);
3064  rx_ring->lbq_base_indirect = shadow_reg;
3065  rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3066  shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3067  shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3068  rx_ring->sbq_base_indirect = shadow_reg;
3069  rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3070 
3071  /* PCI doorbell mem area + 0x00 for consumer index register */
3072  rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3073  rx_ring->cnsmr_idx = 0;
3074  rx_ring->curr_entry = rx_ring->cq_base;
3075 
3076  /* PCI doorbell mem area + 0x04 for valid register */
3077  rx_ring->valid_db_reg = doorbell_area + 0x04;
3078 
3079  /* PCI doorbell mem area + 0x18 for large buffer consumer */
3080  rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3081 
3082  /* PCI doorbell mem area + 0x1c */
3083  rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3084 
3085  memset((void *)cqicb, 0, sizeof(struct cqicb));
3086  cqicb->msix_vect = rx_ring->irq;
3087 
3088  bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3089  cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3090 
3091  cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3092 
3093  cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3094 
3095  /*
3096  * Set up the control block load flags.
3097  */
3098  cqicb->flags = FLAGS_LC | /* Load queue base address */
3099  FLAGS_LV | /* Load MSI-X vector */
3100  FLAGS_LI; /* Load irq delay values */
3101  if (rx_ring->lbq_len) {
3102  cqicb->flags |= FLAGS_LL; /* Load lbq values */
3103  tmp = (u64)rx_ring->lbq_base_dma;
3104  base_indirect_ptr = rx_ring->lbq_base_indirect;
3105  page_entries = 0;
3106  do {
3107  *base_indirect_ptr = cpu_to_le64(tmp);
3108  tmp += DB_PAGE_SIZE;
3109  base_indirect_ptr++;
3110  page_entries++;
3111  } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3112  cqicb->lbq_addr =
3114  bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3115  (u16) rx_ring->lbq_buf_size;
3116  cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3117  bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3118  (u16) rx_ring->lbq_len;
3119  cqicb->lbq_len = cpu_to_le16(bq_len);
3120  rx_ring->lbq_prod_idx = 0;
3121  rx_ring->lbq_curr_idx = 0;
3122  rx_ring->lbq_clean_idx = 0;
3123  rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3124  }
3125  if (rx_ring->sbq_len) {
3126  cqicb->flags |= FLAGS_LS; /* Load sbq values */
3127  tmp = (u64)rx_ring->sbq_base_dma;
3128  base_indirect_ptr = rx_ring->sbq_base_indirect;
3129  page_entries = 0;
3130  do {
3131  *base_indirect_ptr = cpu_to_le64(tmp);
3132  tmp += DB_PAGE_SIZE;
3133  base_indirect_ptr++;
3134  page_entries++;
3135  } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3136  cqicb->sbq_addr =
3138  cqicb->sbq_buf_size =
3139  cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3140  bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3141  (u16) rx_ring->sbq_len;
3142  cqicb->sbq_len = cpu_to_le16(bq_len);
3143  rx_ring->sbq_prod_idx = 0;
3144  rx_ring->sbq_curr_idx = 0;
3145  rx_ring->sbq_clean_idx = 0;
3146  rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3147  }
3148  switch (rx_ring->type) {
3149  case TX_Q:
3150  cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3152  break;
3153  case RX_Q:
3154  /* Inbound completion handling rx_rings run in
3155  * separate NAPI contexts.
3156  */
3157  netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3158  64);
3159  cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3161  break;
3162  default:
3163  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3164  "Invalid rx_ring->type = %d.\n", rx_ring->type);
3165  }
3166  err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3167  CFG_LCQ, rx_ring->cq_id);
3168  if (err) {
3169  netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3170  return err;
3171  }
3172  return err;
3173 }
3174 
3175 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3176 {
3177  struct wqicb *wqicb = (struct wqicb *)tx_ring;
3178  void __iomem *doorbell_area =
3179  qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3180  void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3181  (tx_ring->wq_id * sizeof(u64));
3182  u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3183  (tx_ring->wq_id * sizeof(u64));
3184  int err = 0;
3185 
3186  /*
3187  * Assign doorbell registers for this tx_ring.
3188  */
3189  /* TX PCI doorbell mem area for tx producer index */
3190  tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3191  tx_ring->prod_idx = 0;
3192  /* TX PCI doorbell mem area + 0x04 */
3193  tx_ring->valid_db_reg = doorbell_area + 0x04;
3194 
3195  /*
3196  * Assign shadow registers for this tx_ring.
3197  */
3198  tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3199  tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3200 
3201  wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3202  wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3204  wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3205  wqicb->rid = 0;
3206  wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3207 
3208  wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3209 
3210  ql_init_tx_ring(qdev, tx_ring);
3211 
3212  err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3213  (u16) tx_ring->wq_id);
3214  if (err) {
3215  netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3216  return err;
3217  }
3218  return err;
3219 }
3220 
3221 static void ql_disable_msix(struct ql_adapter *qdev)
3222 {
3223  if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3224  pci_disable_msix(qdev->pdev);
3225  clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3226  kfree(qdev->msi_x_entry);
3227  qdev->msi_x_entry = NULL;
3228  } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3229  pci_disable_msi(qdev->pdev);
3230  clear_bit(QL_MSI_ENABLED, &qdev->flags);
3231  }
3232 }
3233 
3234 /* We start by trying to get the number of vectors
3235  * stored in qdev->intr_count. If we don't get that
3236  * many then we reduce the count and try again.
3237  */
3238 static void ql_enable_msix(struct ql_adapter *qdev)
3239 {
3240  int i, err;
3241 
3242  /* Get the MSIX vectors. */
3243  if (qlge_irq_type == MSIX_IRQ) {
3244  /* Try to alloc space for the msix struct,
3245  * if it fails then go to MSI/legacy.
3246  */
3247  qdev->msi_x_entry = kcalloc(qdev->intr_count,
3248  sizeof(struct msix_entry),
3249  GFP_KERNEL);
3250  if (!qdev->msi_x_entry) {
3251  qlge_irq_type = MSI_IRQ;
3252  goto msi;
3253  }
3254 
3255  for (i = 0; i < qdev->intr_count; i++)
3256  qdev->msi_x_entry[i].entry = i;
3257 
3258  /* Loop to get our vectors. We start with
3259  * what we want and settle for what we get.
3260  */
3261  do {
3262  err = pci_enable_msix(qdev->pdev,
3263  qdev->msi_x_entry, qdev->intr_count);
3264  if (err > 0)
3265  qdev->intr_count = err;
3266  } while (err > 0);
3267 
3268  if (err < 0) {
3269  kfree(qdev->msi_x_entry);
3270  qdev->msi_x_entry = NULL;
3271  netif_warn(qdev, ifup, qdev->ndev,
3272  "MSI-X Enable failed, trying MSI.\n");
3273  qdev->intr_count = 1;
3274  qlge_irq_type = MSI_IRQ;
3275  } else if (err == 0) {
3276  set_bit(QL_MSIX_ENABLED, &qdev->flags);
3277  netif_info(qdev, ifup, qdev->ndev,
3278  "MSI-X Enabled, got %d vectors.\n",
3279  qdev->intr_count);
3280  return;
3281  }
3282  }
3283 msi:
3284  qdev->intr_count = 1;
3285  if (qlge_irq_type == MSI_IRQ) {
3286  if (!pci_enable_msi(qdev->pdev)) {
3287  set_bit(QL_MSI_ENABLED, &qdev->flags);
3288  netif_info(qdev, ifup, qdev->ndev,
3289  "Running with MSI interrupts.\n");
3290  return;
3291  }
3292  }
3293  qlge_irq_type = LEG_IRQ;
3294  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3295  "Running with legacy interrupts.\n");
3296 }
3297 
3298 /* Each vector services 1 RSS ring and and 1 or more
3299  * TX completion rings. This function loops through
3300  * the TX completion rings and assigns the vector that
3301  * will service it. An example would be if there are
3302  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3303  * This would mean that vector 0 would service RSS ring 0
3304  * and TX completion rings 0,1,2 and 3. Vector 1 would
3305  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3306  */
3307 static void ql_set_tx_vect(struct ql_adapter *qdev)
3308 {
3309  int i, j, vect;
3310  u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3311 
3312  if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3313  /* Assign irq vectors to TX rx_rings.*/
3314  for (vect = 0, j = 0, i = qdev->rss_ring_count;
3315  i < qdev->rx_ring_count; i++) {
3316  if (j == tx_rings_per_vector) {
3317  vect++;
3318  j = 0;
3319  }
3320  qdev->rx_ring[i].irq = vect;
3321  j++;
3322  }
3323  } else {
3324  /* For single vector all rings have an irq
3325  * of zero.
3326  */
3327  for (i = 0; i < qdev->rx_ring_count; i++)
3328  qdev->rx_ring[i].irq = 0;
3329  }
3330 }
3331 
3332 /* Set the interrupt mask for this vector. Each vector
3333  * will service 1 RSS ring and 1 or more TX completion
3334  * rings. This function sets up a bit mask per vector
3335  * that indicates which rings it services.
3336  */
3337 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3338 {
3339  int j, vect = ctx->intr;
3340  u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3341 
3342  if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3343  /* Add the RSS ring serviced by this vector
3344  * to the mask.
3345  */
3346  ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3347  /* Add the TX ring(s) serviced by this vector
3348  * to the mask. */
3349  for (j = 0; j < tx_rings_per_vector; j++) {
3350  ctx->irq_mask |=
3351  (1 << qdev->rx_ring[qdev->rss_ring_count +
3352  (vect * tx_rings_per_vector) + j].cq_id);
3353  }
3354  } else {
3355  /* For single vector we just shift each queue's
3356  * ID into the mask.
3357  */
3358  for (j = 0; j < qdev->rx_ring_count; j++)
3359  ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3360  }
3361 }
3362 
3363 /*
3364  * Here we build the intr_context structures based on
3365  * our rx_ring count and intr vector count.
3366  * The intr_context structure is used to hook each vector
3367  * to possibly different handlers.
3368  */
3369 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3370 {
3371  int i = 0;
3372  struct intr_context *intr_context = &qdev->intr_context[0];
3373 
3374  if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3375  /* Each rx_ring has it's
3376  * own intr_context since we have separate
3377  * vectors for each queue.
3378  */
3379  for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3380  qdev->rx_ring[i].irq = i;
3381  intr_context->intr = i;
3382  intr_context->qdev = qdev;
3383  /* Set up this vector's bit-mask that indicates
3384  * which queues it services.
3385  */
3386  ql_set_irq_mask(qdev, intr_context);
3387  /*
3388  * We set up each vectors enable/disable/read bits so
3389  * there's no bit/mask calculations in the critical path.
3390  */
3391  intr_context->intr_en_mask =
3394  | i;
3395  intr_context->intr_dis_mask =
3398  INTR_EN_IHD | i;
3399  intr_context->intr_read_mask =
3402  i;
3403  if (i == 0) {
3404  /* The first vector/queue handles
3405  * broadcast/multicast, fatal errors,
3406  * and firmware events. This in addition
3407  * to normal inbound NAPI processing.
3408  */
3409  intr_context->handler = qlge_isr;
3410  sprintf(intr_context->name, "%s-rx-%d",
3411  qdev->ndev->name, i);
3412  } else {
3413  /*
3414  * Inbound queues handle unicast frames only.
3415  */
3416  intr_context->handler = qlge_msix_rx_isr;
3417  sprintf(intr_context->name, "%s-rx-%d",
3418  qdev->ndev->name, i);
3419  }
3420  }
3421  } else {
3422  /*
3423  * All rx_rings use the same intr_context since
3424  * there is only one vector.
3425  */
3426  intr_context->intr = 0;
3427  intr_context->qdev = qdev;
3428  /*
3429  * We set up each vectors enable/disable/read bits so
3430  * there's no bit/mask calculations in the critical path.
3431  */
3432  intr_context->intr_en_mask =
3434  intr_context->intr_dis_mask =
3437  intr_context->intr_read_mask =
3439  /*
3440  * Single interrupt means one handler for all rings.
3441  */
3442  intr_context->handler = qlge_isr;
3443  sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3444  /* Set up this vector's bit-mask that indicates
3445  * which queues it services. In this case there is
3446  * a single vector so it will service all RSS and
3447  * TX completion rings.
3448  */
3449  ql_set_irq_mask(qdev, intr_context);
3450  }
3451  /* Tell the TX completion rings which MSIx vector
3452  * they will be using.
3453  */
3454  ql_set_tx_vect(qdev);
3455 }
3456 
3457 static void ql_free_irq(struct ql_adapter *qdev)
3458 {
3459  int i;
3460  struct intr_context *intr_context = &qdev->intr_context[0];
3461 
3462  for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463  if (intr_context->hooked) {
3464  if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3465  free_irq(qdev->msi_x_entry[i].vector,
3466  &qdev->rx_ring[i]);
3467  } else {
3468  free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3469  }
3470  }
3471  }
3472  ql_disable_msix(qdev);
3473 }
3474 
3475 static int ql_request_irq(struct ql_adapter *qdev)
3476 {
3477  int i;
3478  int status = 0;
3479  struct pci_dev *pdev = qdev->pdev;
3480  struct intr_context *intr_context = &qdev->intr_context[0];
3481 
3482  ql_resolve_queues_to_irqs(qdev);
3483 
3484  for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3485  atomic_set(&intr_context->irq_cnt, 0);
3486  if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3487  status = request_irq(qdev->msi_x_entry[i].vector,
3488  intr_context->handler,
3489  0,
3490  intr_context->name,
3491  &qdev->rx_ring[i]);
3492  if (status) {
3493  netif_err(qdev, ifup, qdev->ndev,
3494  "Failed request for MSIX interrupt %d.\n",
3495  i);
3496  goto err_irq;
3497  }
3498  } else {
3499  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3500  "trying msi or legacy interrupts.\n");
3501  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3502  "%s: irq = %d.\n", __func__, pdev->irq);
3503  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3504  "%s: context->name = %s.\n", __func__,
3505  intr_context->name);
3506  netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3507  "%s: dev_id = 0x%p.\n", __func__,
3508  &qdev->rx_ring[0]);
3509  status =
3510  request_irq(pdev->irq, qlge_isr,
3512  &qdev->
3513  flags) ? 0 : IRQF_SHARED,
3514  intr_context->name, &qdev->rx_ring[0]);
3515  if (status)
3516  goto err_irq;
3517 
3518  netif_err(qdev, ifup, qdev->ndev,
3519  "Hooked intr %d, queue type %s, with name %s.\n",
3520  i,
3521  qdev->rx_ring[0].type == DEFAULT_Q ?
3522  "DEFAULT_Q" :
3523  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3524  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3525  intr_context->name);
3526  }
3527  intr_context->hooked = 1;
3528  }
3529  return status;
3530 err_irq:
3531  netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3532  ql_free_irq(qdev);
3533  return status;
3534 }
3535 
3536 static int ql_start_rss(struct ql_adapter *qdev)
3537 {
3538  static const u8 init_hash_seed[] = {
3539  0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3540  0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3541  0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3542  0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3543  0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3544  };
3545  struct ricb *ricb = &qdev->ricb;
3546  int status = 0;
3547  int i;
3548  u8 *hash_id = (u8 *) ricb->hash_cq_id;
3549 
3550  memset((void *)ricb, 0, sizeof(*ricb));
3551 
3552  ricb->base_cq = RSS_L4K;
3553  ricb->flags =
3554  (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3555  ricb->mask = cpu_to_le16((u16)(0x3ff));
3556 
3557  /*
3558  * Fill out the Indirection Table.
3559  */
3560  for (i = 0; i < 1024; i++)
3561  hash_id[i] = (i & (qdev->rss_ring_count - 1));
3562 
3563  memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3564  memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3565 
3566  status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3567  if (status) {
3568  netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3569  return status;
3570  }
3571  return status;
3572 }
3573 
3574 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3575 {
3576  int i, status = 0;
3577 
3578  status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3579  if (status)
3580  return status;
3581  /* Clear all the entries in the routing table. */
3582  for (i = 0; i < 16; i++) {
3583  status = ql_set_routing_reg(qdev, i, 0, 0);
3584  if (status) {
3585  netif_err(qdev, ifup, qdev->ndev,
3586  "Failed to init routing register for CAM packets.\n");
3587  break;
3588  }
3589  }
3591  return status;
3592 }
3593 
3594 /* Initialize the frame-to-queue routing. */
3595 static int ql_route_initialize(struct ql_adapter *qdev)
3596 {
3597  int status = 0;
3598 
3599  /* Clear all the entries in the routing table. */
3600  status = ql_clear_routing_entries(qdev);
3601  if (status)
3602  return status;
3603 
3604  status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3605  if (status)
3606  return status;
3607 
3608  status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3609  RT_IDX_IP_CSUM_ERR, 1);
3610  if (status) {
3611  netif_err(qdev, ifup, qdev->ndev,
3612  "Failed to init routing register "
3613  "for IP CSUM error packets.\n");
3614  goto exit;
3615  }
3616  status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3617  RT_IDX_TU_CSUM_ERR, 1);
3618  if (status) {
3619  netif_err(qdev, ifup, qdev->ndev,
3620  "Failed to init routing register "
3621  "for TCP/UDP CSUM error packets.\n");
3622  goto exit;
3623  }
3624  status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3625  if (status) {
3626  netif_err(qdev, ifup, qdev->ndev,
3627  "Failed to init routing register for broadcast packets.\n");
3628  goto exit;
3629  }
3630  /* If we have more than one inbound queue, then turn on RSS in the
3631  * routing block.
3632  */
3633  if (qdev->rss_ring_count > 1) {
3634  status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3635  RT_IDX_RSS_MATCH, 1);
3636  if (status) {
3637  netif_err(qdev, ifup, qdev->ndev,
3638  "Failed to init routing register for MATCH RSS packets.\n");
3639  goto exit;
3640  }
3641  }
3642 
3643  status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3644  RT_IDX_CAM_HIT, 1);
3645  if (status)
3646  netif_err(qdev, ifup, qdev->ndev,
3647  "Failed to init routing register for CAM packets.\n");
3648 exit:
3650  return status;
3651 }
3652 
3654 {
3655  int status, set;
3656 
3657  /* If check if the link is up and use to
3658  * determine if we are setting or clearing
3659  * the MAC address in the CAM.
3660  */
3661  set = ql_read32(qdev, STS);
3662  set &= qdev->port_link_up;
3663  status = ql_set_mac_addr(qdev, set);
3664  if (status) {
3665  netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3666  return status;
3667  }
3668 
3669  status = ql_route_initialize(qdev);
3670  if (status)
3671  netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3672 
3673  return status;
3674 }
3675 
3676 static int ql_adapter_initialize(struct ql_adapter *qdev)
3677 {
3678  u32 value, mask;
3679  int i;
3680  int status = 0;
3681 
3682  /*
3683  * Set up the System register to halt on errors.
3684  */
3685  value = SYS_EFE | SYS_FAE;
3686  mask = value << 16;
3687  ql_write32(qdev, SYS, mask | value);
3688 
3689  /* Set the default queue, and VLAN behavior. */
3690  value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3691  mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3692  ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3693 
3694  /* Set the MPI interrupt to enabled. */
3695  ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3696 
3697  /* Enable the function, set pagesize, enable error checking. */
3700  value |= SPLT_SETTING;
3701 
3702  /* Set/clear header splitting. */
3703  mask = FSC_VM_PAGESIZE_MASK |
3704  FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3705  ql_write32(qdev, FSC, mask | value);
3706 
3707  ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3708 
3709  /* Set RX packet routing to use port/pci function on which the
3710  * packet arrived on in addition to usual frame routing.
3711  * This is helpful on bonding where both interfaces can have
3712  * the same MAC address.
3713  */
3714  ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3715  /* Reroute all packets to our Interface.
3716  * They may have been routed to MPI firmware
3717  * due to WOL.
3718  */
3719  value = ql_read32(qdev, MGMT_RCV_CFG);
3720  value &= ~MGMT_RCV_CFG_RM;
3721  mask = 0xffff0000;
3722 
3723  /* Sticky reg needs clearing due to WOL. */
3724  ql_write32(qdev, MGMT_RCV_CFG, mask);
3725  ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3726 
3727  /* Default WOL is enable on Mezz cards */
3728  if (qdev->pdev->subsystem_device == 0x0068 ||
3729  qdev->pdev->subsystem_device == 0x0180)
3730  qdev->wol = WAKE_MAGIC;
3731 
3732  /* Start up the rx queues. */
3733  for (i = 0; i < qdev->rx_ring_count; i++) {
3734  status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3735  if (status) {
3736  netif_err(qdev, ifup, qdev->ndev,
3737  "Failed to start rx ring[%d].\n", i);
3738  return status;
3739  }
3740  }
3741 
3742  /* If there is more than one inbound completion queue
3743  * then download a RICB to configure RSS.
3744  */
3745  if (qdev->rss_ring_count > 1) {
3746  status = ql_start_rss(qdev);
3747  if (status) {
3748  netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3749  return status;
3750  }
3751  }
3752 
3753  /* Start up the tx queues. */
3754  for (i = 0; i < qdev->tx_ring_count; i++) {
3755  status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3756  if (status) {
3757  netif_err(qdev, ifup, qdev->ndev,
3758  "Failed to start tx ring[%d].\n", i);
3759  return status;
3760  }
3761  }
3762 
3763  /* Initialize the port and set the max framesize. */
3764  status = qdev->nic_ops->port_initialize(qdev);
3765  if (status)
3766  netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3767 
3768  /* Set up the MAC address and frame routing filter. */
3769  status = ql_cam_route_initialize(qdev);
3770  if (status) {
3771  netif_err(qdev, ifup, qdev->ndev,
3772  "Failed to init CAM/Routing tables.\n");
3773  return status;
3774  }
3775 
3776  /* Start NAPI for the RSS queues. */
3777  for (i = 0; i < qdev->rss_ring_count; i++)
3778  napi_enable(&qdev->rx_ring[i].napi);
3779 
3780  return status;
3781 }
3782 
3783 /* Issue soft reset to chip. */
3784 static int ql_adapter_reset(struct ql_adapter *qdev)
3785 {
3786  u32 value;
3787  int status = 0;
3788  unsigned long end_jiffies;
3789 
3790  /* Clear all the entries in the routing table. */
3791  status = ql_clear_routing_entries(qdev);
3792  if (status) {
3793  netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3794  return status;
3795  }
3796 
3797  end_jiffies = jiffies +
3798  max((unsigned long)1, usecs_to_jiffies(30));
3799 
3800  /* Check if bit is set then skip the mailbox command and
3801  * clear the bit, else we are in normal reset process.
3802  */
3803  if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3804  /* Stop management traffic. */
3806 
3807  /* Wait for the NIC and MGMNT FIFOs to empty. */
3808  ql_wait_fifo_empty(qdev);
3809  } else
3810  clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3811 
3812  ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3813 
3814  do {
3815  value = ql_read32(qdev, RST_FO);
3816  if ((value & RST_FO_FR) == 0)
3817  break;
3818  cpu_relax();
3819  } while (time_before(jiffies, end_jiffies));
3820 
3821  if (value & RST_FO_FR) {
3822  netif_err(qdev, ifdown, qdev->ndev,
3823  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3824  status = -ETIMEDOUT;
3825  }
3826 
3827  /* Resume management traffic. */
3829  return status;
3830 }
3831 
3832 static void ql_display_dev_info(struct net_device *ndev)
3833 {
3834  struct ql_adapter *qdev = netdev_priv(ndev);
3835 
3836  netif_info(qdev, probe, qdev->ndev,
3837  "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3838  "XG Roll = %d, XG Rev = %d.\n",
3839  qdev->func,
3840  qdev->port,
3841  qdev->chip_rev_id & 0x0000000f,
3842  qdev->chip_rev_id >> 4 & 0x0000000f,
3843  qdev->chip_rev_id >> 8 & 0x0000000f,
3844  qdev->chip_rev_id >> 12 & 0x0000000f);
3845  netif_info(qdev, probe, qdev->ndev,
3846  "MAC address %pM\n", ndev->dev_addr);
3847 }
3848 
3849 static int ql_wol(struct ql_adapter *qdev)
3850 {
3851  int status = 0;
3853 
3854  /* The CAM is still intact after a reset, but if we
3855  * are doing WOL, then we may need to program the
3856  * routing regs. We would also need to issue the mailbox
3857  * commands to instruct the MPI what to do per the ethtool
3858  * settings.
3859  */
3860 
3861  if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3862  WAKE_MCAST | WAKE_BCAST)) {
3863  netif_err(qdev, ifdown, qdev->ndev,
3864  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3865  qdev->wol);
3866  return -EINVAL;
3867  }
3868 
3869  if (qdev->wol & WAKE_MAGIC) {
3870  status = ql_mb_wol_set_magic(qdev, 1);
3871  if (status) {
3872  netif_err(qdev, ifdown, qdev->ndev,
3873  "Failed to set magic packet on %s.\n",
3874  qdev->ndev->name);
3875  return status;
3876  } else
3877  netif_info(qdev, drv, qdev->ndev,
3878  "Enabled magic packet successfully on %s.\n",
3879  qdev->ndev->name);
3880 
3881  wol |= MB_WOL_MAGIC_PKT;
3882  }
3883 
3884  if (qdev->wol) {
3885  wol |= MB_WOL_MODE_ON;
3886  status = ql_mb_wol_mode(qdev, wol);
3887  netif_err(qdev, drv, qdev->ndev,
3888  "WOL %s (wol code 0x%x) on %s\n",
3889  (status == 0) ? "Successfully set" : "Failed",
3890  wol, qdev->ndev->name);
3891  }
3892 
3893  return status;
3894 }
3895 
3896 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3897 {
3898 
3899  /* Don't kill the reset worker thread if we
3900  * are in the process of recovery.
3901  */
3902  if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3909 }
3910 
3911 static int ql_adapter_down(struct ql_adapter *qdev)
3912 {
3913  int i, status = 0;
3914 
3915  ql_link_off(qdev);
3916 
3917  ql_cancel_all_work_sync(qdev);
3918 
3919  for (i = 0; i < qdev->rss_ring_count; i++)
3920  napi_disable(&qdev->rx_ring[i].napi);
3921 
3922  clear_bit(QL_ADAPTER_UP, &qdev->flags);
3923 
3924  ql_disable_interrupts(qdev);
3925 
3926  ql_tx_ring_clean(qdev);
3927 
3928  /* Call netif_napi_del() from common point.
3929  */
3930  for (i = 0; i < qdev->rss_ring_count; i++)
3931  netif_napi_del(&qdev->rx_ring[i].napi);
3932 
3933  status = ql_adapter_reset(qdev);
3934  if (status)
3935  netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3936  qdev->func);
3937  ql_free_rx_buffers(qdev);
3938 
3939  return status;
3940 }
3941 
3942 static int ql_adapter_up(struct ql_adapter *qdev)
3943 {
3944  int err = 0;
3945 
3946  err = ql_adapter_initialize(qdev);
3947  if (err) {
3948  netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3949  goto err_init;
3950  }
3951  set_bit(QL_ADAPTER_UP, &qdev->flags);
3952  ql_alloc_rx_buffers(qdev);
3953  /* If the port is initialized and the
3954  * link is up the turn on the carrier.
3955  */
3956  if ((ql_read32(qdev, STS) & qdev->port_init) &&
3957  (ql_read32(qdev, STS) & qdev->port_link_up))
3958  ql_link_on(qdev);
3959  /* Restore rx mode. */
3960  clear_bit(QL_ALLMULTI, &qdev->flags);
3961  clear_bit(QL_PROMISCUOUS, &qdev->flags);
3962  qlge_set_multicast_list(qdev->ndev);
3963 
3964  /* Restore vlan setting. */
3965  qlge_restore_vlan(qdev);
3966 
3967  ql_enable_interrupts(qdev);
3968  ql_enable_all_completion_interrupts(qdev);
3969  netif_tx_start_all_queues(qdev->ndev);
3970 
3971  return 0;
3972 err_init:
3973  ql_adapter_reset(qdev);
3974  return err;
3975 }
3976 
3977 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3978 {
3979  ql_free_mem_resources(qdev);
3980  ql_free_irq(qdev);
3981 }
3982 
3983 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3984 {
3985  int status = 0;
3986 
3987  if (ql_alloc_mem_resources(qdev)) {
3988  netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3989  return -ENOMEM;
3990  }
3991  status = ql_request_irq(qdev);
3992  return status;
3993 }
3994 
3995 static int qlge_close(struct net_device *ndev)
3996 {
3997  struct ql_adapter *qdev = netdev_priv(ndev);
3998 
3999  /* If we hit pci_channel_io_perm_failure
4000  * failure condition, then we already
4001  * brought the adapter down.
4002  */
4003  if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4004  netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4005  clear_bit(QL_EEH_FATAL, &qdev->flags);
4006  return 0;
4007  }
4008 
4009  /*
4010  * Wait for device to recover from a reset.
4011  * (Rarely happens, but possible.)
4012  */
4013  while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4014  msleep(1);
4015  ql_adapter_down(qdev);
4016  ql_release_adapter_resources(qdev);
4017  return 0;
4018 }
4019 
4020 static int ql_configure_rings(struct ql_adapter *qdev)
4021 {
4022  int i;
4023  struct rx_ring *rx_ring;
4024  struct tx_ring *tx_ring;
4025  int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4026  unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4028 
4029  qdev->lbq_buf_order = get_order(lbq_buf_len);
4030 
4031  /* In a perfect world we have one RSS ring for each CPU
4032  * and each has it's own vector. To do that we ask for
4033  * cpu_cnt vectors. ql_enable_msix() will adjust the
4034  * vector count to what we actually get. We then
4035  * allocate an RSS ring for each.
4036  * Essentially, we are doing min(cpu_count, msix_vector_count).
4037  */
4038  qdev->intr_count = cpu_cnt;
4039  ql_enable_msix(qdev);
4040  /* Adjust the RSS ring count to the actual vector count. */
4041  qdev->rss_ring_count = qdev->intr_count;
4042  qdev->tx_ring_count = cpu_cnt;
4043  qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4044 
4045  for (i = 0; i < qdev->tx_ring_count; i++) {
4046  tx_ring = &qdev->tx_ring[i];
4047  memset((void *)tx_ring, 0, sizeof(*tx_ring));
4048  tx_ring->qdev = qdev;
4049  tx_ring->wq_id = i;
4050  tx_ring->wq_len = qdev->tx_ring_size;
4051  tx_ring->wq_size =
4052  tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4053 
4054  /*
4055  * The completion queue ID for the tx rings start
4056  * immediately after the rss rings.
4057  */
4058  tx_ring->cq_id = qdev->rss_ring_count + i;
4059  }
4060 
4061  for (i = 0; i < qdev->rx_ring_count; i++) {
4062  rx_ring = &qdev->rx_ring[i];
4063  memset((void *)rx_ring, 0, sizeof(*rx_ring));
4064  rx_ring->qdev = qdev;
4065  rx_ring->cq_id = i;
4066  rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4067  if (i < qdev->rss_ring_count) {
4068  /*
4069  * Inbound (RSS) queues.
4070  */
4071  rx_ring->cq_len = qdev->rx_ring_size;
4072  rx_ring->cq_size =
4073  rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4074  rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4075  rx_ring->lbq_size =
4076  rx_ring->lbq_len * sizeof(__le64);
4077  rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4078  rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4079  rx_ring->sbq_size =
4080  rx_ring->sbq_len * sizeof(__le64);
4081  rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4082  rx_ring->type = RX_Q;
4083  } else {
4084  /*
4085  * Outbound queue handles outbound completions only.
4086  */
4087  /* outbound cq is same size as tx_ring it services. */
4088  rx_ring->cq_len = qdev->tx_ring_size;
4089  rx_ring->cq_size =
4090  rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091  rx_ring->lbq_len = 0;
4092  rx_ring->lbq_size = 0;
4093  rx_ring->lbq_buf_size = 0;
4094  rx_ring->sbq_len = 0;
4095  rx_ring->sbq_size = 0;
4096  rx_ring->sbq_buf_size = 0;
4097  rx_ring->type = TX_Q;
4098  }
4099  }
4100  return 0;
4101 }
4102 
4103 static int qlge_open(struct net_device *ndev)
4104 {
4105  int err = 0;
4106  struct ql_adapter *qdev = netdev_priv(ndev);
4107 
4108  err = ql_adapter_reset(qdev);
4109  if (err)
4110  return err;
4111 
4112  err = ql_configure_rings(qdev);
4113  if (err)
4114  return err;
4115 
4116  err = ql_get_adapter_resources(qdev);
4117  if (err)
4118  goto error_up;
4119 
4120  err = ql_adapter_up(qdev);
4121  if (err)
4122  goto error_up;
4123 
4124  return err;
4125 
4126 error_up:
4127  ql_release_adapter_resources(qdev);
4128  return err;
4129 }
4130 
4131 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4132 {
4133  struct rx_ring *rx_ring;
4134  int i, status;
4135  u32 lbq_buf_len;
4136 
4137  /* Wait for an outstanding reset to complete. */
4138  if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4139  int i = 3;
4140  while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4141  netif_err(qdev, ifup, qdev->ndev,
4142  "Waiting for adapter UP...\n");
4143  ssleep(1);
4144  }
4145 
4146  if (!i) {
4147  netif_err(qdev, ifup, qdev->ndev,
4148  "Timed out waiting for adapter UP\n");
4149  return -ETIMEDOUT;
4150  }
4151  }
4152 
4153  status = ql_adapter_down(qdev);
4154  if (status)
4155  goto error;
4156 
4157  /* Get the new rx buffer size. */
4158  lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4160  qdev->lbq_buf_order = get_order(lbq_buf_len);
4161 
4162  for (i = 0; i < qdev->rss_ring_count; i++) {
4163  rx_ring = &qdev->rx_ring[i];
4164  /* Set the new size. */
4165  rx_ring->lbq_buf_size = lbq_buf_len;
4166  }
4167 
4168  status = ql_adapter_up(qdev);
4169  if (status)
4170  goto error;
4171 
4172  return status;
4173 error:
4174  netif_alert(qdev, ifup, qdev->ndev,
4175  "Driver up/down cycle failed, closing device.\n");
4176  set_bit(QL_ADAPTER_UP, &qdev->flags);
4177  dev_close(qdev->ndev);
4178  return status;
4179 }
4180 
4181 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4182 {
4183  struct ql_adapter *qdev = netdev_priv(ndev);
4184  int status;
4185 
4186  if (ndev->mtu == 1500 && new_mtu == 9000) {
4187  netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4188  } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4189  netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4190  } else
4191  return -EINVAL;
4192 
4194  &qdev->mpi_port_cfg_work, 3*HZ);
4195 
4196  ndev->mtu = new_mtu;
4197 
4198  if (!netif_running(qdev->ndev)) {
4199  return 0;
4200  }
4201 
4202  status = ql_change_rx_buffers(qdev);
4203  if (status) {
4204  netif_err(qdev, ifup, qdev->ndev,
4205  "Changing MTU failed.\n");
4206  }
4207 
4208  return status;
4209 }
4210 
4211 static struct net_device_stats *qlge_get_stats(struct net_device
4212  *ndev)
4213 {
4214  struct ql_adapter *qdev = netdev_priv(ndev);
4215  struct rx_ring *rx_ring = &qdev->rx_ring[0];
4216  struct tx_ring *tx_ring = &qdev->tx_ring[0];
4217  unsigned long pkts, mcast, dropped, errors, bytes;
4218  int i;
4219 
4220  /* Get RX stats. */
4221  pkts = mcast = dropped = errors = bytes = 0;
4222  for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4223  pkts += rx_ring->rx_packets;
4224  bytes += rx_ring->rx_bytes;
4225  dropped += rx_ring->rx_dropped;
4226  errors += rx_ring->rx_errors;
4227  mcast += rx_ring->rx_multicast;
4228  }
4229  ndev->stats.rx_packets = pkts;
4230  ndev->stats.rx_bytes = bytes;
4231  ndev->stats.rx_dropped = dropped;
4232  ndev->stats.rx_errors = errors;
4233  ndev->stats.multicast = mcast;
4234 
4235  /* Get TX stats. */
4236  pkts = errors = bytes = 0;
4237  for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4238  pkts += tx_ring->tx_packets;
4239  bytes += tx_ring->tx_bytes;
4240  errors += tx_ring->tx_errors;
4241  }
4242  ndev->stats.tx_packets = pkts;
4243  ndev->stats.tx_bytes = bytes;
4244  ndev->stats.tx_errors = errors;
4245  return &ndev->stats;
4246 }
4247 
4248 static void qlge_set_multicast_list(struct net_device *ndev)
4249 {
4250  struct ql_adapter *qdev = netdev_priv(ndev);
4251  struct netdev_hw_addr *ha;
4252  int i, status;
4253 
4254  status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4255  if (status)
4256  return;
4257  /*
4258  * Set or clear promiscuous mode if a
4259  * transition is taking place.
4260  */
4261  if (ndev->flags & IFF_PROMISC) {
4262  if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4263  if (ql_set_routing_reg
4264  (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4265  netif_err(qdev, hw, qdev->ndev,
4266  "Failed to set promiscuous mode.\n");
4267  } else {
4268  set_bit(QL_PROMISCUOUS, &qdev->flags);
4269  }
4270  }
4271  } else {
4272  if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4273  if (ql_set_routing_reg
4274  (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4275  netif_err(qdev, hw, qdev->ndev,
4276  "Failed to clear promiscuous mode.\n");
4277  } else {
4278  clear_bit(QL_PROMISCUOUS, &qdev->flags);
4279  }
4280  }
4281  }
4282 
4283  /*
4284  * Set or clear all multicast mode if a
4285  * transition is taking place.
4286  */
4287  if ((ndev->flags & IFF_ALLMULTI) ||
4289  if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4290  if (ql_set_routing_reg
4291  (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4292  netif_err(qdev, hw, qdev->ndev,
4293  "Failed to set all-multi mode.\n");
4294  } else {
4295  set_bit(QL_ALLMULTI, &qdev->flags);
4296  }
4297  }
4298  } else {
4299  if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4300  if (ql_set_routing_reg
4301  (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4302  netif_err(qdev, hw, qdev->ndev,
4303  "Failed to clear all-multi mode.\n");
4304  } else {
4305  clear_bit(QL_ALLMULTI, &qdev->flags);
4306  }
4307  }
4308  }
4309 
4310  if (!netdev_mc_empty(ndev)) {
4311  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4312  if (status)
4313  goto exit;
4314  i = 0;
4315  netdev_for_each_mc_addr(ha, ndev) {
4316  if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4318  netif_err(qdev, hw, qdev->ndev,
4319  "Failed to loadmulticast address.\n");
4321  goto exit;
4322  }
4323  i++;
4324  }
4326  if (ql_set_routing_reg
4328  netif_err(qdev, hw, qdev->ndev,
4329  "Failed to set multicast match mode.\n");
4330  } else {
4331  set_bit(QL_ALLMULTI, &qdev->flags);
4332  }
4333  }
4334 exit:
4336 }
4337 
4338 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4339 {
4340  struct ql_adapter *qdev = netdev_priv(ndev);
4341  struct sockaddr *addr = p;
4342  int status;
4343 
4344  if (!is_valid_ether_addr(addr->sa_data))
4345  return -EADDRNOTAVAIL;
4346  memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4347  /* Update local copy of current mac address. */
4348  memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4349 
4350  status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4351  if (status)
4352  return status;
4353  status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4354  MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4355  if (status)
4356  netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4358  return status;
4359 }
4360 
4361 static void qlge_tx_timeout(struct net_device *ndev)
4362 {
4363  struct ql_adapter *qdev = netdev_priv(ndev);
4364  ql_queue_asic_error(qdev);
4365 }
4366 
4367 static void ql_asic_reset_work(struct work_struct *work)
4368 {
4369  struct ql_adapter *qdev =
4370  container_of(work, struct ql_adapter, asic_reset_work.work);
4371  int status;
4372  rtnl_lock();
4373  status = ql_adapter_down(qdev);
4374  if (status)
4375  goto error;
4376 
4377  status = ql_adapter_up(qdev);
4378  if (status)
4379  goto error;
4380 
4381  /* Restore rx mode. */
4382  clear_bit(QL_ALLMULTI, &qdev->flags);
4383  clear_bit(QL_PROMISCUOUS, &qdev->flags);
4384  qlge_set_multicast_list(qdev->ndev);
4385 
4386  rtnl_unlock();
4387  return;
4388 error:
4389  netif_alert(qdev, ifup, qdev->ndev,
4390  "Driver up/down cycle failed, closing device\n");
4391 
4392  set_bit(QL_ADAPTER_UP, &qdev->flags);
4393  dev_close(qdev->ndev);
4394  rtnl_unlock();
4395 }
4396 
4397 static const struct nic_operations qla8012_nic_ops = {
4398  .get_flash = ql_get_8012_flash_params,
4399  .port_initialize = ql_8012_port_initialize,
4400 };
4401 
4402 static const struct nic_operations qla8000_nic_ops = {
4403  .get_flash = ql_get_8000_flash_params,
4404  .port_initialize = ql_8000_port_initialize,
4405 };
4406 
4407 /* Find the pcie function number for the other NIC
4408  * on this chip. Since both NIC functions share a
4409  * common firmware we have the lowest enabled function
4410  * do any common work. Examples would be resetting
4411  * after a fatal firmware error, or doing a firmware
4412  * coredump.
4413  */
4414 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4415 {
4416  int status = 0;
4417  u32 temp;
4418  u32 nic_func1, nic_func2;
4419 
4420  status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4421  &temp);
4422  if (status)
4423  return status;
4424 
4425  nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4427  nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4429 
4430  if (qdev->func == nic_func1)
4431  qdev->alt_func = nic_func2;
4432  else if (qdev->func == nic_func2)
4433  qdev->alt_func = nic_func1;
4434  else
4435  status = -EIO;
4436 
4437  return status;
4438 }
4439 
4440 static int ql_get_board_info(struct ql_adapter *qdev)
4441 {
4442  int status;
4443  qdev->func =
4444  (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4445  if (qdev->func > 3)
4446  return -EIO;
4447 
4448  status = ql_get_alt_pcie_func(qdev);
4449  if (status)
4450  return status;
4451 
4452  qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4453  if (qdev->port) {
4454  qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4455  qdev->port_link_up = STS_PL1;
4456  qdev->port_init = STS_PI1;
4459  } else {
4460  qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4461  qdev->port_link_up = STS_PL0;
4462  qdev->port_init = STS_PI0;
4465  }
4466  qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4467  qdev->device_id = qdev->pdev->device;
4468  if (qdev->device_id == QLGE_DEVICE_ID_8012)
4469  qdev->nic_ops = &qla8012_nic_ops;
4470  else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4471  qdev->nic_ops = &qla8000_nic_ops;
4472  return status;
4473 }
4474 
4475 static void ql_release_all(struct pci_dev *pdev)
4476 {
4477  struct net_device *ndev = pci_get_drvdata(pdev);
4478  struct ql_adapter *qdev = netdev_priv(ndev);
4479 
4480  if (qdev->workqueue) {
4482  qdev->workqueue = NULL;
4483  }
4484 
4485  if (qdev->reg_base)
4486  iounmap(qdev->reg_base);
4487  if (qdev->doorbell_area)
4488  iounmap(qdev->doorbell_area);
4489  vfree(qdev->mpi_coredump);
4490  pci_release_regions(pdev);
4491  pci_set_drvdata(pdev, NULL);
4492 }
4493 
4494 static int __devinit ql_init_device(struct pci_dev *pdev,
4495  struct net_device *ndev, int cards_found)
4496 {
4497  struct ql_adapter *qdev = netdev_priv(ndev);
4498  int err = 0;
4499 
4500  memset((void *)qdev, 0, sizeof(*qdev));
4501  err = pci_enable_device(pdev);
4502  if (err) {
4503  dev_err(&pdev->dev, "PCI device enable failed.\n");
4504  return err;
4505  }
4506 
4507  qdev->ndev = ndev;
4508  qdev->pdev = pdev;
4509  pci_set_drvdata(pdev, ndev);
4510 
4511  /* Set PCIe read request size */
4512  err = pcie_set_readrq(pdev, 4096);
4513  if (err) {
4514  dev_err(&pdev->dev, "Set readrq failed.\n");
4515  goto err_out1;
4516  }
4517 
4518  err = pci_request_regions(pdev, DRV_NAME);
4519  if (err) {
4520  dev_err(&pdev->dev, "PCI region request failed.\n");
4521  return err;
4522  }
4523 
4524  pci_set_master(pdev);
4525  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4526  set_bit(QL_DMA64, &qdev->flags);
4527  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4528  } else {
4529  err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4530  if (!err)
4531  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4532  }
4533 
4534  if (err) {
4535  dev_err(&pdev->dev, "No usable DMA configuration.\n");
4536  goto err_out2;
4537  }
4538 
4539  /* Set PCIe reset type for EEH to fundamental. */
4540  pdev->needs_freset = 1;
4541  pci_save_state(pdev);
4542  qdev->reg_base =
4544  pci_resource_len(pdev, 1));
4545  if (!qdev->reg_base) {
4546  dev_err(&pdev->dev, "Register mapping failed.\n");
4547  err = -ENOMEM;
4548  goto err_out2;
4549  }
4550 
4551  qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4552  qdev->doorbell_area =
4554  pci_resource_len(pdev, 3));
4555  if (!qdev->doorbell_area) {
4556  dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4557  err = -ENOMEM;
4558  goto err_out2;
4559  }
4560 
4561  err = ql_get_board_info(qdev);
4562  if (err) {
4563  dev_err(&pdev->dev, "Register access failed.\n");
4564  err = -EIO;
4565  goto err_out2;
4566  }
4567  qdev->msg_enable = netif_msg_init(debug, default_msg);
4568  spin_lock_init(&qdev->hw_lock);
4569  spin_lock_init(&qdev->stats_lock);
4570 
4571  if (qlge_mpi_coredump) {
4572  qdev->mpi_coredump =
4573  vmalloc(sizeof(struct ql_mpi_coredump));
4574  if (qdev->mpi_coredump == NULL) {
4575  dev_err(&pdev->dev, "Coredump alloc failed.\n");
4576  err = -ENOMEM;
4577  goto err_out2;
4578  }
4579  if (qlge_force_coredump)
4580  set_bit(QL_FRC_COREDUMP, &qdev->flags);
4581  }
4582  /* make sure the EEPROM is good */
4583  err = qdev->nic_ops->get_flash(qdev);
4584  if (err) {
4585  dev_err(&pdev->dev, "Invalid FLASH.\n");
4586  goto err_out2;
4587  }
4588 
4589  memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4590  /* Keep local copy of current mac address. */
4591  memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4592 
4593  /* Set up the default ring sizes. */
4596 
4597  /* Set up the coalescing parameters. */
4602 
4603  /*
4604  * Set up the operating parameters.
4605  */
4607  INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4613  init_completion(&qdev->ide_completion);
4614  mutex_init(&qdev->mpi_mutex);
4615 
4616  if (!cards_found) {
4617  dev_info(&pdev->dev, "%s\n", DRV_STRING);
4618  dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4620  }
4621  return 0;
4622 err_out2:
4623  ql_release_all(pdev);
4624 err_out1:
4625  pci_disable_device(pdev);
4626  return err;
4627 }
4628 
4629 static const struct net_device_ops qlge_netdev_ops = {
4630  .ndo_open = qlge_open,
4631  .ndo_stop = qlge_close,
4632  .ndo_start_xmit = qlge_send,
4633  .ndo_change_mtu = qlge_change_mtu,
4634  .ndo_get_stats = qlge_get_stats,
4635  .ndo_set_rx_mode = qlge_set_multicast_list,
4636  .ndo_set_mac_address = qlge_set_mac_address,
4637  .ndo_validate_addr = eth_validate_addr,
4638  .ndo_tx_timeout = qlge_tx_timeout,
4639  .ndo_fix_features = qlge_fix_features,
4640  .ndo_set_features = qlge_set_features,
4641  .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4642  .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4643 };
4644 
4645 static void ql_timer(unsigned long data)
4646 {
4647  struct ql_adapter *qdev = (struct ql_adapter *)data;
4648  u32 var = 0;
4649 
4650  var = ql_read32(qdev, STS);
4651  if (pci_channel_offline(qdev->pdev)) {
4652  netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4653  return;
4654  }
4655 
4656  mod_timer(&qdev->timer, jiffies + (5*HZ));
4657 }
4658 
4659 static int __devinit qlge_probe(struct pci_dev *pdev,
4660  const struct pci_device_id *pci_entry)
4661 {
4662  struct net_device *ndev = NULL;
4663  struct ql_adapter *qdev = NULL;
4664  static int cards_found = 0;
4665  int err = 0;
4666 
4667  ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4669  if (!ndev)
4670  return -ENOMEM;
4671 
4672  err = ql_init_device(pdev, ndev, cards_found);
4673  if (err < 0) {
4674  free_netdev(ndev);
4675  return err;
4676  }
4677 
4678  qdev = netdev_priv(ndev);
4679  SET_NETDEV_DEV(ndev, &pdev->dev);
4683  ndev->features = ndev->hw_features |
4685  ndev->vlan_features = ndev->hw_features;
4686 
4687  if (test_bit(QL_DMA64, &qdev->flags))
4688  ndev->features |= NETIF_F_HIGHDMA;
4689 
4690  /*
4691  * Set up net_device structure.
4692  */
4693  ndev->tx_queue_len = qdev->tx_ring_size;
4694  ndev->irq = pdev->irq;
4695 
4696  ndev->netdev_ops = &qlge_netdev_ops;
4698  ndev->watchdog_timeo = 10 * HZ;
4699 
4700  err = register_netdev(ndev);
4701  if (err) {
4702  dev_err(&pdev->dev, "net device registration failed.\n");
4703  ql_release_all(pdev);
4704  pci_disable_device(pdev);
4705  return err;
4706  }
4707  /* Start up the timer to trigger EEH if
4708  * the bus goes dead
4709  */
4710  init_timer_deferrable(&qdev->timer);
4711  qdev->timer.data = (unsigned long)qdev;
4712  qdev->timer.function = ql_timer;
4713  qdev->timer.expires = jiffies + (5*HZ);
4714  add_timer(&qdev->timer);
4715  ql_link_off(qdev);
4716  ql_display_dev_info(ndev);
4717  atomic_set(&qdev->lb_count, 0);
4718  cards_found++;
4719  return 0;
4720 }
4721 
4722 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4723 {
4724  return qlge_send(skb, ndev);
4725 }
4726 
4727 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4728 {
4729  return ql_clean_inbound_rx_ring(rx_ring, budget);
4730 }
4731 
4732 static void __devexit qlge_remove(struct pci_dev *pdev)
4733 {
4734  struct net_device *ndev = pci_get_drvdata(pdev);
4735  struct ql_adapter *qdev = netdev_priv(ndev);
4736  del_timer_sync(&qdev->timer);
4737  ql_cancel_all_work_sync(qdev);
4738  unregister_netdev(ndev);
4739  ql_release_all(pdev);
4740  pci_disable_device(pdev);
4741  free_netdev(ndev);
4742 }
4743 
4744 /* Clean up resources without touching hardware. */
4745 static void ql_eeh_close(struct net_device *ndev)
4746 {
4747  int i;
4748  struct ql_adapter *qdev = netdev_priv(ndev);
4749 
4750  if (netif_carrier_ok(ndev)) {
4751  netif_carrier_off(ndev);
4752  netif_stop_queue(ndev);
4753  }
4754 
4755  /* Disabling the timer */
4756  del_timer_sync(&qdev->timer);
4757  ql_cancel_all_work_sync(qdev);
4758 
4759  for (i = 0; i < qdev->rss_ring_count; i++)
4760  netif_napi_del(&qdev->rx_ring[i].napi);
4761 
4762  clear_bit(QL_ADAPTER_UP, &qdev->flags);
4763  ql_tx_ring_clean(qdev);
4764  ql_free_rx_buffers(qdev);
4765  ql_release_adapter_resources(qdev);
4766 }
4767 
4768 /*
4769  * This callback is called by the PCI subsystem whenever
4770  * a PCI bus error is detected.
4771  */
4772 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4773  enum pci_channel_state state)
4774 {
4775  struct net_device *ndev = pci_get_drvdata(pdev);
4776  struct ql_adapter *qdev = netdev_priv(ndev);
4777 
4778  switch (state) {
4779  case pci_channel_io_normal:
4781  case pci_channel_io_frozen:
4782  netif_device_detach(ndev);
4783  if (netif_running(ndev))
4784  ql_eeh_close(ndev);
4785  pci_disable_device(pdev);
4788  dev_err(&pdev->dev,
4789  "%s: pci_channel_io_perm_failure.\n", __func__);
4790  ql_eeh_close(ndev);
4791  set_bit(QL_EEH_FATAL, &qdev->flags);
4793  }
4794 
4795  /* Request a slot reset. */
4797 }
4798 
4799 /*
4800  * This callback is called after the PCI buss has been reset.
4801  * Basically, this tries to restart the card from scratch.
4802  * This is a shortened version of the device probe/discovery code,
4803  * it resembles the first-half of the () routine.
4804  */
4805 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4806 {
4807  struct net_device *ndev = pci_get_drvdata(pdev);
4808  struct ql_adapter *qdev = netdev_priv(ndev);
4809 
4811 
4812  pci_restore_state(pdev);
4813  if (pci_enable_device(pdev)) {
4814  netif_err(qdev, ifup, qdev->ndev,
4815  "Cannot re-enable PCI device after reset.\n");
4817  }
4818  pci_set_master(pdev);
4819 
4820  if (ql_adapter_reset(qdev)) {
4821  netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4822  set_bit(QL_EEH_FATAL, &qdev->flags);
4824  }
4825 
4826  return PCI_ERS_RESULT_RECOVERED;
4827 }
4828 
4829 static void qlge_io_resume(struct pci_dev *pdev)
4830 {
4831  struct net_device *ndev = pci_get_drvdata(pdev);
4832  struct ql_adapter *qdev = netdev_priv(ndev);
4833  int err = 0;
4834 
4835  if (netif_running(ndev)) {
4836  err = qlge_open(ndev);
4837  if (err) {
4838  netif_err(qdev, ifup, qdev->ndev,
4839  "Device initialization failed after reset.\n");
4840  return;
4841  }
4842  } else {
4843  netif_err(qdev, ifup, qdev->ndev,
4844  "Device was not running prior to EEH.\n");
4845  }
4846  mod_timer(&qdev->timer, jiffies + (5*HZ));
4847  netif_device_attach(ndev);
4848 }
4849 
4850 static const struct pci_error_handlers qlge_err_handler = {
4851  .error_detected = qlge_io_error_detected,
4852  .slot_reset = qlge_io_slot_reset,
4853  .resume = qlge_io_resume,
4854 };
4855 
4856 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4857 {
4858  struct net_device *ndev = pci_get_drvdata(pdev);
4859  struct ql_adapter *qdev = netdev_priv(ndev);
4860  int err;
4861 
4862  netif_device_detach(ndev);
4863  del_timer_sync(&qdev->timer);
4864 
4865  if (netif_running(ndev)) {
4866  err = ql_adapter_down(qdev);
4867  if (!err)
4868  return err;
4869  }
4870 
4871  ql_wol(qdev);
4872  err = pci_save_state(pdev);
4873  if (err)
4874  return err;
4875 
4876  pci_disable_device(pdev);
4877 
4878  pci_set_power_state(pdev, pci_choose_state(pdev, state));
4879 
4880  return 0;
4881 }
4882 
4883 #ifdef CONFIG_PM
4884 static int qlge_resume(struct pci_dev *pdev)
4885 {
4886  struct net_device *ndev = pci_get_drvdata(pdev);
4887  struct ql_adapter *qdev = netdev_priv(ndev);
4888  int err;
4889 
4890  pci_set_power_state(pdev, PCI_D0);
4891  pci_restore_state(pdev);
4892  err = pci_enable_device(pdev);
4893  if (err) {
4894  netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4895  return err;
4896  }
4897  pci_set_master(pdev);
4898 
4899  pci_enable_wake(pdev, PCI_D3hot, 0);
4900  pci_enable_wake(pdev, PCI_D3cold, 0);
4901 
4902  if (netif_running(ndev)) {
4903  err = ql_adapter_up(qdev);
4904  if (err)
4905  return err;
4906  }
4907 
4908  mod_timer(&qdev->timer, jiffies + (5*HZ));
4909  netif_device_attach(ndev);
4910 
4911  return 0;
4912 }
4913 #endif /* CONFIG_PM */
4914 
4915 static void qlge_shutdown(struct pci_dev *pdev)
4916 {
4917  qlge_suspend(pdev, PMSG_SUSPEND);
4918 }
4919 
4920 static struct pci_driver qlge_driver = {
4921  .name = DRV_NAME,
4922  .id_table = qlge_pci_tbl,
4923  .probe = qlge_probe,
4924  .remove = __devexit_p(qlge_remove),
4925 #ifdef CONFIG_PM
4926  .suspend = qlge_suspend,
4927  .resume = qlge_resume,
4928 #endif
4929  .shutdown = qlge_shutdown,
4930  .err_handler = &qlge_err_handler
4931 };
4932 
4933 static int __init qlge_init_module(void)
4934 {
4935  return pci_register_driver(&qlge_driver);
4936 }
4937 
4938 static void __exit qlge_exit(void)
4939 {
4940  pci_unregister_driver(&qlge_driver);
4941 }
4942 
4943 module_init(qlge_init_module);
4944 module_exit(qlge_exit);