Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bnad.c
Go to the documentation of this file.
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
29 
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
33 
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
35 
36 /*
37  * Module params
38  */
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42 
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46 
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50  " Range[false:0|true:1]");
51 
52 /*
53  * Global variables
54  */
56 static u32 bna_id;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
60 
61 /*
62  * Local MACROS
63  */
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
65 
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
67 
68 #define BNAD_GET_MBOX_IRQ(_bnad) \
69  (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
70  ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71  ((_bnad)->pcidev->irq))
72 
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
74 do { \
75  (_res_info)->res_type = BNA_RES_T_MEM; \
76  (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77  (_res_info)->res_u.mem_info.num = (_num); \
78  (_res_info)->res_u.mem_info.len = \
79  sizeof(struct bnad_unmap_q) + \
80  (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81 } while (0)
82 
83 static void
84 bnad_add_to_list(struct bnad *bnad)
85 {
86  mutex_lock(&bnad_list_mutex);
87  list_add_tail(&bnad->list_entry, &bnad_list);
88  bnad->id = bna_id++;
89  mutex_unlock(&bnad_list_mutex);
90 }
91 
92 static void
93 bnad_remove_from_list(struct bnad *bnad)
94 {
95  mutex_lock(&bnad_list_mutex);
96  list_del(&bnad->list_entry);
97  mutex_unlock(&bnad_list_mutex);
98 }
99 
100 /*
101  * Reinitialize completions in CQ, once Rx is taken down
102  */
103 static void
104 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
105 {
106  struct bna_cq_entry *cmpl, *next_cmpl;
107  unsigned int wi_range, wis = 0, ccb_prod = 0;
108  int i;
109 
110  BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
111  wi_range);
112 
113  for (i = 0; i < ccb->q_depth; i++) {
114  wis++;
115  if (likely(--wi_range))
116  next_cmpl = cmpl + 1;
117  else {
118  BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
119  wis = 0;
120  BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
121  next_cmpl, wi_range);
122  }
123  cmpl->valid = 0;
124  cmpl = next_cmpl;
125  }
126 }
127 
128 static u32
129 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
130  u32 index, u32 depth, struct sk_buff *skb, u32 frag)
131 {
132  int j;
133  array[index].skb = NULL;
134 
135  dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
136  skb_headlen(skb), DMA_TO_DEVICE);
137  dma_unmap_addr_set(&array[index], dma_addr, 0);
138  BNA_QE_INDX_ADD(index, 1, depth);
139 
140  for (j = 0; j < frag; j++) {
141  dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
142  skb_frag_size(&skb_shinfo(skb)->frags[j]),
143  DMA_TO_DEVICE);
144  dma_unmap_addr_set(&array[index], dma_addr, 0);
145  BNA_QE_INDX_ADD(index, 1, depth);
146  }
147 
148  return index;
149 }
150 
151 /*
152  * Frees all pending Tx Bufs
153  * At this point no activity is expected on the Q,
154  * so DMA unmap & freeing is fine.
155  */
156 static void
157 bnad_txq_cleanup(struct bnad *bnad,
158  struct bna_tcb *tcb)
159 {
160  u32 unmap_cons;
161  struct bnad_unmap_q *unmap_q = tcb->unmap_q;
162  struct bnad_skb_unmap *unmap_array;
163  struct sk_buff *skb = NULL;
164  int q;
165 
166  unmap_array = unmap_q->unmap_array;
167 
168  for (q = 0; q < unmap_q->q_depth; q++) {
169  skb = unmap_array[q].skb;
170  if (!skb)
171  continue;
172 
173  unmap_cons = q;
174  unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
175  unmap_cons, unmap_q->q_depth, skb,
176  skb_shinfo(skb)->nr_frags);
177 
178  dev_kfree_skb_any(skb);
179  }
180 }
181 
182 /* Data Path Handlers */
183 
184 /*
185  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
186  * Can be called in a) Interrupt context
187  * b) Sending context
188  */
189 static u32
190 bnad_txcmpl_process(struct bnad *bnad,
191  struct bna_tcb *tcb)
192 {
193  u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
194  u16 wis, updated_hw_cons;
195  struct bnad_unmap_q *unmap_q = tcb->unmap_q;
196  struct bnad_skb_unmap *unmap_array;
197  struct sk_buff *skb;
198 
199  /* Just return if TX is stopped */
200  if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
201  return 0;
202 
203  updated_hw_cons = *(tcb->hw_consumer_index);
204 
206  updated_hw_cons, tcb->q_depth);
207 
208  BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
209 
210  unmap_array = unmap_q->unmap_array;
211  unmap_cons = unmap_q->consumer_index;
212 
213  prefetch(&unmap_array[unmap_cons + 1]);
214  while (wis) {
215  skb = unmap_array[unmap_cons].skb;
216 
217  sent_packets++;
218  sent_bytes += skb->len;
219  wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
220 
221  unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
222  unmap_cons, unmap_q->q_depth, skb,
223  skb_shinfo(skb)->nr_frags);
224 
225  dev_kfree_skb_any(skb);
226  }
227 
228  /* Update consumer pointers. */
229  tcb->consumer_index = updated_hw_cons;
230  unmap_q->consumer_index = unmap_cons;
231 
232  tcb->txq->tx_packets += sent_packets;
233  tcb->txq->tx_bytes += sent_bytes;
234 
235  return sent_packets;
236 }
237 
238 static u32
239 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
240 {
241  struct net_device *netdev = bnad->netdev;
242  u32 sent = 0;
243 
245  return 0;
246 
247  sent = bnad_txcmpl_process(bnad, tcb);
248  if (sent) {
249  if (netif_queue_stopped(netdev) &&
250  netif_carrier_ok(netdev) &&
251  BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
253  if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
254  netif_wake_queue(netdev);
255  BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
256  }
257  }
258  }
259 
261  bna_ib_ack(tcb->i_dbell, sent);
262 
265 
266  return sent;
267 }
268 
269 /* MSIX Tx Completion Handler */
270 static irqreturn_t
271 bnad_msix_tx(int irq, void *data)
272 {
273  struct bna_tcb *tcb = (struct bna_tcb *)data;
274  struct bnad *bnad = tcb->bnad;
275 
276  bnad_tx_complete(bnad, tcb);
277 
278  return IRQ_HANDLED;
279 }
280 
281 static void
282 bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
283 {
284  struct bnad_unmap_q *unmap_q = rcb->unmap_q;
285 
286  rcb->producer_index = 0;
287  rcb->consumer_index = 0;
288 
289  unmap_q->producer_index = 0;
290  unmap_q->consumer_index = 0;
291 }
292 
293 static void
294 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
295 {
296  struct bnad_unmap_q *unmap_q;
297  struct bnad_skb_unmap *unmap_array;
298  struct sk_buff *skb;
299  int unmap_cons;
300 
301  unmap_q = rcb->unmap_q;
302  unmap_array = unmap_q->unmap_array;
303  for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
304  skb = unmap_array[unmap_cons].skb;
305  if (!skb)
306  continue;
307  unmap_array[unmap_cons].skb = NULL;
308  dma_unmap_single(&bnad->pcidev->dev,
309  dma_unmap_addr(&unmap_array[unmap_cons],
310  dma_addr),
311  rcb->rxq->buffer_size,
313  dev_kfree_skb(skb);
314  }
315  bnad_rcb_cleanup(bnad, rcb);
316 }
317 
318 static void
319 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
320 {
321  u16 to_alloc, alloced, unmap_prod, wi_range;
322  struct bnad_unmap_q *unmap_q = rcb->unmap_q;
323  struct bnad_skb_unmap *unmap_array;
324  struct bna_rxq_entry *rxent;
325  struct sk_buff *skb;
327 
328  alloced = 0;
329  to_alloc =
330  BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
331 
332  unmap_array = unmap_q->unmap_array;
333  unmap_prod = unmap_q->producer_index;
334 
335  BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
336 
337  while (to_alloc--) {
338  if (!wi_range)
339  BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
340  wi_range);
341  skb = netdev_alloc_skb_ip_align(bnad->netdev,
342  rcb->rxq->buffer_size);
343  if (unlikely(!skb)) {
344  BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
345  rcb->rxq->rxbuf_alloc_failed++;
346  goto finishing;
347  }
348  unmap_array[unmap_prod].skb = skb;
349  dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
350  rcb->rxq->buffer_size,
352  dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
353  dma_addr);
354  BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
355  BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
356 
357  rxent++;
358  wi_range--;
359  alloced++;
360  }
361 
362 finishing:
363  if (likely(alloced)) {
364  unmap_q->producer_index = unmap_prod;
365  rcb->producer_index = unmap_prod;
366  smp_mb();
367  if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
369  }
370 }
371 
372 static inline void
373 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
374 {
375  struct bnad_unmap_q *unmap_q = rcb->unmap_q;
376 
377  if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
378  if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
380  bnad_rxq_post(bnad, rcb);
383  }
384 }
385 
386 static u32
387 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
388 {
389  struct bna_cq_entry *cmpl, *next_cmpl;
390  struct bna_rcb *rcb = NULL;
391  unsigned int wi_range, packets = 0, wis = 0;
392  struct bnad_unmap_q *unmap_q;
393  struct bnad_skb_unmap *unmap_array;
394  struct sk_buff *skb;
395  u32 flags, unmap_cons;
396  struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
397  struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
398 
399  if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
400  return 0;
401 
402  prefetch(bnad->netdev);
403  BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
404  wi_range);
405  BUG_ON(!(wi_range <= ccb->q_depth));
406  while (cmpl->valid && packets < budget) {
407  packets++;
408  BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
409 
410  if (bna_is_small_rxq(cmpl->rxq_id))
411  rcb = ccb->rcb[1];
412  else
413  rcb = ccb->rcb[0];
414 
415  unmap_q = rcb->unmap_q;
416  unmap_array = unmap_q->unmap_array;
417  unmap_cons = unmap_q->consumer_index;
418 
419  skb = unmap_array[unmap_cons].skb;
420  BUG_ON(!(skb));
421  unmap_array[unmap_cons].skb = NULL;
422  dma_unmap_single(&bnad->pcidev->dev,
423  dma_unmap_addr(&unmap_array[unmap_cons],
424  dma_addr),
425  rcb->rxq->buffer_size,
427  BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
428 
429  /* Should be more efficient ? Performance ? */
430  BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
431 
432  wis++;
433  if (likely(--wi_range))
434  next_cmpl = cmpl + 1;
435  else {
436  BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
437  wis = 0;
439  next_cmpl, wi_range);
440  BUG_ON(!(wi_range <= ccb->q_depth));
441  }
442  prefetch(next_cmpl);
443 
444  flags = ntohl(cmpl->flags);
445  if (unlikely
446  (flags &
448  BNA_CQ_EF_TOO_LONG))) {
449  dev_kfree_skb_any(skb);
450  rcb->rxq->rx_packets_with_error++;
451  goto next;
452  }
453 
454  skb_put(skb, ntohs(cmpl->length));
455  if (likely
456  ((bnad->netdev->features & NETIF_F_RXCSUM) &&
457  (((flags & BNA_CQ_EF_IPV4) &&
458  (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
459  (flags & BNA_CQ_EF_IPV6)) &&
460  (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
461  (flags & BNA_CQ_EF_L4_CKSUM_OK)))
463  else
464  skb_checksum_none_assert(skb);
465 
466  rcb->rxq->rx_packets++;
467  rcb->rxq->rx_bytes += skb->len;
468  skb->protocol = eth_type_trans(skb, bnad->netdev);
469 
470  if (flags & BNA_CQ_EF_VLAN)
471  __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
472 
473  if (skb->ip_summed == CHECKSUM_UNNECESSARY)
474  napi_gro_receive(&rx_ctrl->napi, skb);
475  else
476  netif_receive_skb(skb);
477 
478 next:
479  cmpl->valid = 0;
480  cmpl = next_cmpl;
481  }
482 
483  BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
484 
485  if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
486  bna_ib_ack_disable_irq(ccb->i_dbell, packets);
487 
488  bnad_refill_rxq(bnad, ccb->rcb[0]);
489  if (ccb->rcb[1])
490  bnad_refill_rxq(bnad, ccb->rcb[1]);
491 
492  clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
493 
494  return packets;
495 }
496 
497 static void
498 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
499 {
500  struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
501  struct napi_struct *napi = &rx_ctrl->napi;
502 
503  if (likely(napi_schedule_prep(napi))) {
504  __napi_schedule(napi);
505  rx_ctrl->rx_schedule++;
506  }
507 }
508 
509 /* MSIX Rx Path Handler */
510 static irqreturn_t
511 bnad_msix_rx(int irq, void *data)
512 {
513  struct bna_ccb *ccb = (struct bna_ccb *)data;
514 
515  if (ccb) {
516  ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
517  bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
518  }
519 
520  return IRQ_HANDLED;
521 }
522 
523 /* Interrupt handlers */
524 
525 /* Mbox Interrupt Handlers */
526 static irqreturn_t
527 bnad_msix_mbox_handler(int irq, void *data)
528 {
529  u32 intr_status;
530  unsigned long flags;
531  struct bnad *bnad = (struct bnad *)data;
532 
533  spin_lock_irqsave(&bnad->bna_lock, flags);
535  spin_unlock_irqrestore(&bnad->bna_lock, flags);
536  return IRQ_HANDLED;
537  }
538 
539  bna_intr_status_get(&bnad->bna, intr_status);
540 
541  if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
542  bna_mbox_handler(&bnad->bna, intr_status);
543 
544  spin_unlock_irqrestore(&bnad->bna_lock, flags);
545 
546  return IRQ_HANDLED;
547 }
548 
549 static irqreturn_t
550 bnad_isr(int irq, void *data)
551 {
552  int i, j;
553  u32 intr_status;
554  unsigned long flags;
555  struct bnad *bnad = (struct bnad *)data;
556  struct bnad_rx_info *rx_info;
557  struct bnad_rx_ctrl *rx_ctrl;
558  struct bna_tcb *tcb = NULL;
559 
560  spin_lock_irqsave(&bnad->bna_lock, flags);
562  spin_unlock_irqrestore(&bnad->bna_lock, flags);
563  return IRQ_NONE;
564  }
565 
566  bna_intr_status_get(&bnad->bna, intr_status);
567 
568  if (unlikely(!intr_status)) {
569  spin_unlock_irqrestore(&bnad->bna_lock, flags);
570  return IRQ_NONE;
571  }
572 
573  if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
574  bna_mbox_handler(&bnad->bna, intr_status);
575 
576  spin_unlock_irqrestore(&bnad->bna_lock, flags);
577 
578  if (!BNA_IS_INTX_DATA_INTR(intr_status))
579  return IRQ_HANDLED;
580 
581  /* Process data interrupts */
582  /* Tx processing */
583  for (i = 0; i < bnad->num_tx; i++) {
584  for (j = 0; j < bnad->num_txq_per_tx; j++) {
585  tcb = bnad->tx_info[i].tcb[j];
586  if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
587  bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
588  }
589  }
590  /* Rx processing */
591  for (i = 0; i < bnad->num_rx; i++) {
592  rx_info = &bnad->rx_info[i];
593  if (!rx_info->rx)
594  continue;
595  for (j = 0; j < bnad->num_rxp_per_rx; j++) {
596  rx_ctrl = &rx_info->rx_ctrl[j];
597  if (rx_ctrl->ccb)
598  bnad_netif_rx_schedule_poll(bnad,
599  rx_ctrl->ccb);
600  }
601  }
602  return IRQ_HANDLED;
603 }
604 
605 /*
606  * Called in interrupt / callback context
607  * with bna_lock held, so cfg_flags access is OK
608  */
609 static void
610 bnad_enable_mbox_irq(struct bnad *bnad)
611 {
613 
614  BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
615 }
616 
617 /*
618  * Called with bnad->bna_lock held b'cos of
619  * bnad->cfg_flags access.
620  */
621 static void
622 bnad_disable_mbox_irq(struct bnad *bnad)
623 {
625 
626  BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
627 }
628 
629 static void
630 bnad_set_netdev_perm_addr(struct bnad *bnad)
631 {
632  struct net_device *netdev = bnad->netdev;
633 
634  memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
635  if (is_zero_ether_addr(netdev->dev_addr))
636  memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
637 }
638 
639 /* Control Path Handlers */
640 
641 /* Callbacks */
642 void
643 bnad_cb_mbox_intr_enable(struct bnad *bnad)
644 {
645  bnad_enable_mbox_irq(bnad);
646 }
647 
648 void
649 bnad_cb_mbox_intr_disable(struct bnad *bnad)
650 {
651  bnad_disable_mbox_irq(bnad);
652 }
653 
654 void
655 bnad_cb_ioceth_ready(struct bnad *bnad)
656 {
657  bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
658  complete(&bnad->bnad_completions.ioc_comp);
659 }
660 
661 void
662 bnad_cb_ioceth_failed(struct bnad *bnad)
663 {
664  bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
665  complete(&bnad->bnad_completions.ioc_comp);
666 }
667 
668 void
669 bnad_cb_ioceth_disabled(struct bnad *bnad)
670 {
671  bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
672  complete(&bnad->bnad_completions.ioc_comp);
673 }
674 
675 static void
676 bnad_cb_enet_disabled(void *arg)
677 {
678  struct bnad *bnad = (struct bnad *)arg;
679 
680  netif_carrier_off(bnad->netdev);
681  complete(&bnad->bnad_completions.enet_comp);
682 }
683 
684 void
685 bnad_cb_ethport_link_status(struct bnad *bnad,
687 {
688  bool link_up = false;
689 
690  link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
691 
692  if (link_status == BNA_CEE_UP) {
693  if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
694  BNAD_UPDATE_CTR(bnad, cee_toggle);
696  } else {
698  BNAD_UPDATE_CTR(bnad, cee_toggle);
700  }
701 
702  if (link_up) {
703  if (!netif_carrier_ok(bnad->netdev)) {
704  uint tx_id, tcb_id;
705  printk(KERN_WARNING "bna: %s link up\n",
706  bnad->netdev->name);
707  netif_carrier_on(bnad->netdev);
708  BNAD_UPDATE_CTR(bnad, link_toggle);
709  for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
710  for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
711  tcb_id++) {
712  struct bna_tcb *tcb =
713  bnad->tx_info[tx_id].tcb[tcb_id];
714  u32 txq_id;
715  if (!tcb)
716  continue;
717 
718  txq_id = tcb->id;
719 
721  &tcb->flags)) {
722  /*
723  * Force an immediate
724  * Transmit Schedule */
725  printk(KERN_INFO "bna: %s %d "
726  "TXQ_STARTED\n",
727  bnad->netdev->name,
728  txq_id);
729  netif_wake_subqueue(
730  bnad->netdev,
731  txq_id);
732  BNAD_UPDATE_CTR(bnad,
733  netif_queue_wakeup);
734  } else {
735  netif_stop_subqueue(
736  bnad->netdev,
737  txq_id);
738  BNAD_UPDATE_CTR(bnad,
739  netif_queue_stop);
740  }
741  }
742  }
743  }
744  } else {
745  if (netif_carrier_ok(bnad->netdev)) {
746  printk(KERN_WARNING "bna: %s link down\n",
747  bnad->netdev->name);
748  netif_carrier_off(bnad->netdev);
749  BNAD_UPDATE_CTR(bnad, link_toggle);
750  }
751  }
752 }
753 
754 static void
755 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
756 {
757  struct bnad *bnad = (struct bnad *)arg;
758 
759  complete(&bnad->bnad_completions.tx_comp);
760 }
761 
762 static void
763 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
764 {
765  struct bnad_tx_info *tx_info =
766  (struct bnad_tx_info *)tcb->txq->tx->priv;
767  struct bnad_unmap_q *unmap_q = tcb->unmap_q;
768 
769  tx_info->tcb[tcb->id] = tcb;
770  unmap_q->producer_index = 0;
771  unmap_q->consumer_index = 0;
772  unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
773 }
774 
775 static void
776 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
777 {
778  struct bnad_tx_info *tx_info =
779  (struct bnad_tx_info *)tcb->txq->tx->priv;
780 
781  tx_info->tcb[tcb->id] = NULL;
782  tcb->priv = NULL;
783 }
784 
785 static void
786 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
787 {
788  struct bnad_unmap_q *unmap_q = rcb->unmap_q;
789 
790  unmap_q->producer_index = 0;
791  unmap_q->consumer_index = 0;
792  unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
793 }
794 
795 static void
796 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
797 {
798  struct bnad_rx_info *rx_info =
799  (struct bnad_rx_info *)ccb->cq->rx->priv;
800 
801  rx_info->rx_ctrl[ccb->id].ccb = ccb;
802  ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
803 }
804 
805 static void
806 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
807 {
808  struct bnad_rx_info *rx_info =
809  (struct bnad_rx_info *)ccb->cq->rx->priv;
810 
811  rx_info->rx_ctrl[ccb->id].ccb = NULL;
812 }
813 
814 static void
815 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
816 {
817  struct bnad_tx_info *tx_info =
818  (struct bnad_tx_info *)tx->priv;
819  struct bna_tcb *tcb;
820  u32 txq_id;
821  int i;
822 
823  for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
824  tcb = tx_info->tcb[i];
825  if (!tcb)
826  continue;
827  txq_id = tcb->id;
829  netif_stop_subqueue(bnad->netdev, txq_id);
830  printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
831  bnad->netdev->name, txq_id);
832  }
833 }
834 
835 static void
836 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
837 {
838  struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
839  struct bna_tcb *tcb;
840  u32 txq_id;
841  int i;
842 
843  for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
844  tcb = tx_info->tcb[i];
845  if (!tcb)
846  continue;
847  txq_id = tcb->id;
848 
851  BUG_ON(*(tcb->hw_consumer_index) != 0);
852 
853  if (netif_carrier_ok(bnad->netdev)) {
854  printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
855  bnad->netdev->name, txq_id);
856  netif_wake_subqueue(bnad->netdev, txq_id);
857  BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
858  }
859  }
860 
861  /*
862  * Workaround for first ioceth enable failure & we
863  * get a 0 MAC address. We try to get the MAC address
864  * again here.
865  */
866  if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
867  bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
868  bnad_set_netdev_perm_addr(bnad);
869  }
870 }
871 
872 /*
873  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874  */
875 static void
876 bnad_tx_cleanup(struct delayed_work *work)
877 {
878  struct bnad_tx_info *tx_info =
880  struct bnad *bnad = NULL;
881  struct bnad_unmap_q *unmap_q;
882  struct bna_tcb *tcb;
883  unsigned long flags;
884  uint32_t i, pending = 0;
885 
886  for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887  tcb = tx_info->tcb[i];
888  if (!tcb)
889  continue;
890 
891  bnad = tcb->bnad;
892 
894  pending++;
895  continue;
896  }
897 
898  bnad_txq_cleanup(bnad, tcb);
899 
900  unmap_q = tcb->unmap_q;
901  unmap_q->producer_index = 0;
902  unmap_q->consumer_index = 0;
903 
906  }
907 
908  if (pending) {
909  queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910  msecs_to_jiffies(1));
911  return;
912  }
913 
914  spin_lock_irqsave(&bnad->bna_lock, flags);
915  bna_tx_cleanup_complete(tx_info->tx);
916  spin_unlock_irqrestore(&bnad->bna_lock, flags);
917 }
918 
919 
920 static void
921 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
922 {
923  struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
924  struct bna_tcb *tcb;
925  int i;
926 
927  for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
928  tcb = tx_info->tcb[i];
929  if (!tcb)
930  continue;
931  }
932 
933  queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
934 }
935 
936 static void
937 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
938 {
939  struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
940  struct bna_ccb *ccb;
941  struct bnad_rx_ctrl *rx_ctrl;
942  int i;
943 
944  for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
945  rx_ctrl = &rx_info->rx_ctrl[i];
946  ccb = rx_ctrl->ccb;
947  if (!ccb)
948  continue;
949 
950  clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
951 
952  if (ccb->rcb[1])
953  clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
954  }
955 }
956 
957 /*
958  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959  */
960 static void
961 bnad_rx_cleanup(void *work)
962 {
963  struct bnad_rx_info *rx_info =
965  struct bnad_rx_ctrl *rx_ctrl;
966  struct bnad *bnad = NULL;
967  unsigned long flags;
968  uint32_t i;
969 
970  for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971  rx_ctrl = &rx_info->rx_ctrl[i];
972 
973  if (!rx_ctrl->ccb)
974  continue;
975 
976  bnad = rx_ctrl->ccb->bnad;
977 
978  /*
979  * Wait till the poll handler has exited
980  * and nothing can be scheduled anymore
981  */
982  napi_disable(&rx_ctrl->napi);
983 
984  bnad_cq_cleanup(bnad, rx_ctrl->ccb);
985  bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
986  if (rx_ctrl->ccb->rcb[1])
987  bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
988  }
989 
990  spin_lock_irqsave(&bnad->bna_lock, flags);
991  bna_rx_cleanup_complete(rx_info->rx);
992  spin_unlock_irqrestore(&bnad->bna_lock, flags);
993 }
994 
995 static void
996 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
997 {
998  struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
999  struct bna_ccb *ccb;
1000  struct bnad_rx_ctrl *rx_ctrl;
1001  int i;
1002 
1003  for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1004  rx_ctrl = &rx_info->rx_ctrl[i];
1005  ccb = rx_ctrl->ccb;
1006  if (!ccb)
1007  continue;
1008 
1009  clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1010 
1011  if (ccb->rcb[1])
1012  clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1013  }
1014 
1015  queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1016 }
1017 
1018 static void
1019 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1020 {
1021  struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1022  struct bna_ccb *ccb;
1023  struct bna_rcb *rcb;
1024  struct bnad_rx_ctrl *rx_ctrl;
1025  struct bnad_unmap_q *unmap_q;
1026  int i;
1027  int j;
1028 
1029  for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1030  rx_ctrl = &rx_info->rx_ctrl[i];
1031  ccb = rx_ctrl->ccb;
1032  if (!ccb)
1033  continue;
1034 
1035  napi_enable(&rx_ctrl->napi);
1036 
1037  for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1038  rcb = ccb->rcb[j];
1039  if (!rcb)
1040  continue;
1041 
1042  set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1043  set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1044  unmap_q = rcb->unmap_q;
1045 
1046  /* Now allocate & post buffers for this RCB */
1047  /* !!Allocation in callback context */
1048  if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1049  if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1051  bnad_rxq_post(bnad, rcb);
1054  }
1055  }
1056  }
1057 }
1058 
1059 static void
1060 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1061 {
1062  struct bnad *bnad = (struct bnad *)arg;
1063 
1064  complete(&bnad->bnad_completions.rx_comp);
1065 }
1066 
1067 static void
1068 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1069 {
1070  bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1071  complete(&bnad->bnad_completions.mcast_comp);
1072 }
1073 
1074 void
1075 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1076  struct bna_stats *stats)
1077 {
1078  if (status == BNA_CB_SUCCESS)
1079  BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1080 
1081  if (!netif_running(bnad->netdev) ||
1083  return;
1084 
1085  mod_timer(&bnad->stats_timer,
1087 }
1088 
1089 static void
1090 bnad_cb_enet_mtu_set(struct bnad *bnad)
1091 {
1092  bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1093  complete(&bnad->bnad_completions.mtu_comp);
1094 }
1095 
1096 void
1098 {
1099  struct bnad_iocmd_comp *iocmd_comp =
1100  (struct bnad_iocmd_comp *)arg;
1101 
1102  iocmd_comp->comp_status = (u32) status;
1103  complete(&iocmd_comp->comp);
1104 }
1105 
1106 /* Resource allocation, free functions */
1107 
1108 static void
1109 bnad_mem_free(struct bnad *bnad,
1110  struct bna_mem_info *mem_info)
1111 {
1112  int i;
1113  dma_addr_t dma_pa;
1114 
1115  if (mem_info->mdl == NULL)
1116  return;
1117 
1118  for (i = 0; i < mem_info->num; i++) {
1119  if (mem_info->mdl[i].kva != NULL) {
1120  if (mem_info->mem_type == BNA_MEM_T_DMA) {
1121  BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1122  dma_pa);
1123  dma_free_coherent(&bnad->pcidev->dev,
1124  mem_info->mdl[i].len,
1125  mem_info->mdl[i].kva, dma_pa);
1126  } else
1127  kfree(mem_info->mdl[i].kva);
1128  }
1129  }
1130  kfree(mem_info->mdl);
1131  mem_info->mdl = NULL;
1132 }
1133 
1134 static int
1135 bnad_mem_alloc(struct bnad *bnad,
1136  struct bna_mem_info *mem_info)
1137 {
1138  int i;
1139  dma_addr_t dma_pa;
1140 
1141  if ((mem_info->num == 0) || (mem_info->len == 0)) {
1142  mem_info->mdl = NULL;
1143  return 0;
1144  }
1145 
1146  mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1147  GFP_KERNEL);
1148  if (mem_info->mdl == NULL)
1149  return -ENOMEM;
1150 
1151  if (mem_info->mem_type == BNA_MEM_T_DMA) {
1152  for (i = 0; i < mem_info->num; i++) {
1153  mem_info->mdl[i].len = mem_info->len;
1154  mem_info->mdl[i].kva =
1155  dma_alloc_coherent(&bnad->pcidev->dev,
1156  mem_info->len, &dma_pa,
1157  GFP_KERNEL);
1158 
1159  if (mem_info->mdl[i].kva == NULL)
1160  goto err_return;
1161 
1162  BNA_SET_DMA_ADDR(dma_pa,
1163  &(mem_info->mdl[i].dma));
1164  }
1165  } else {
1166  for (i = 0; i < mem_info->num; i++) {
1167  mem_info->mdl[i].len = mem_info->len;
1168  mem_info->mdl[i].kva = kzalloc(mem_info->len,
1169  GFP_KERNEL);
1170  if (mem_info->mdl[i].kva == NULL)
1171  goto err_return;
1172  }
1173  }
1174 
1175  return 0;
1176 
1177 err_return:
1178  bnad_mem_free(bnad, mem_info);
1179  return -ENOMEM;
1180 }
1181 
1182 /* Free IRQ for Mailbox */
1183 static void
1184 bnad_mbox_irq_free(struct bnad *bnad)
1185 {
1186  int irq;
1187  unsigned long flags;
1188 
1189  spin_lock_irqsave(&bnad->bna_lock, flags);
1190  bnad_disable_mbox_irq(bnad);
1191  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1192 
1193  irq = BNAD_GET_MBOX_IRQ(bnad);
1194  free_irq(irq, bnad);
1195 }
1196 
1197 /*
1198  * Allocates IRQ for Mailbox, but keep it disabled
1199  * This will be enabled once we get the mbox enable callback
1200  * from bna
1201  */
1202 static int
1203 bnad_mbox_irq_alloc(struct bnad *bnad)
1204 {
1205  int err = 0;
1206  unsigned long irq_flags, flags;
1207  u32 irq;
1209 
1210  spin_lock_irqsave(&bnad->bna_lock, flags);
1211  if (bnad->cfg_flags & BNAD_CF_MSIX) {
1212  irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1213  irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1214  irq_flags = 0;
1215  } else {
1216  irq_handler = (irq_handler_t)bnad_isr;
1217  irq = bnad->pcidev->irq;
1218  irq_flags = IRQF_SHARED;
1219  }
1220 
1221  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1222  sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1223 
1224  /*
1225  * Set the Mbox IRQ disable flag, so that the IRQ handler
1226  * called from request_irq() for SHARED IRQs do not execute
1227  */
1229 
1230  BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1231 
1232  err = request_irq(irq, irq_handler, irq_flags,
1233  bnad->mbox_irq_name, bnad);
1234 
1235  return err;
1236 }
1237 
1238 static void
1239 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1240 {
1241  kfree(intr_info->idl);
1242  intr_info->idl = NULL;
1243 }
1244 
1245 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1246 static int
1247 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1248  u32 txrx_id, struct bna_intr_info *intr_info)
1249 {
1250  int i, vector_start = 0;
1251  u32 cfg_flags;
1252  unsigned long flags;
1253 
1254  spin_lock_irqsave(&bnad->bna_lock, flags);
1255  cfg_flags = bnad->cfg_flags;
1256  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1257 
1258  if (cfg_flags & BNAD_CF_MSIX) {
1259  intr_info->intr_type = BNA_INTR_T_MSIX;
1260  intr_info->idl = kcalloc(intr_info->num,
1261  sizeof(struct bna_intr_descr),
1262  GFP_KERNEL);
1263  if (!intr_info->idl)
1264  return -ENOMEM;
1265 
1266  switch (src) {
1267  case BNAD_INTR_TX:
1268  vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1269  break;
1270 
1271  case BNAD_INTR_RX:
1272  vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1273  (bnad->num_tx * bnad->num_txq_per_tx) +
1274  txrx_id;
1275  break;
1276 
1277  default:
1278  BUG();
1279  }
1280 
1281  for (i = 0; i < intr_info->num; i++)
1282  intr_info->idl[i].vector = vector_start + i;
1283  } else {
1284  intr_info->intr_type = BNA_INTR_T_INTX;
1285  intr_info->num = 1;
1286  intr_info->idl = kcalloc(intr_info->num,
1287  sizeof(struct bna_intr_descr),
1288  GFP_KERNEL);
1289  if (!intr_info->idl)
1290  return -ENOMEM;
1291 
1292  switch (src) {
1293  case BNAD_INTR_TX:
1294  intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1295  break;
1296 
1297  case BNAD_INTR_RX:
1298  intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1299  break;
1300  }
1301  }
1302  return 0;
1303 }
1304 
1305 /* NOTE: Should be called for MSIX only
1306  * Unregisters Tx MSIX vector(s) from the kernel
1307  */
1308 static void
1309 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1310  int num_txqs)
1311 {
1312  int i;
1313  int vector_num;
1314 
1315  for (i = 0; i < num_txqs; i++) {
1316  if (tx_info->tcb[i] == NULL)
1317  continue;
1318 
1319  vector_num = tx_info->tcb[i]->intr_vector;
1320  free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1321  }
1322 }
1323 
1324 /* NOTE: Should be called for MSIX only
1325  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1326  */
1327 static int
1328 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1329  u32 tx_id, int num_txqs)
1330 {
1331  int i;
1332  int err;
1333  int vector_num;
1334 
1335  for (i = 0; i < num_txqs; i++) {
1336  vector_num = tx_info->tcb[i]->intr_vector;
1337  sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1338  tx_id + tx_info->tcb[i]->id);
1339  err = request_irq(bnad->msix_table[vector_num].vector,
1340  (irq_handler_t)bnad_msix_tx, 0,
1341  tx_info->tcb[i]->name,
1342  tx_info->tcb[i]);
1343  if (err)
1344  goto err_return;
1345  }
1346 
1347  return 0;
1348 
1349 err_return:
1350  if (i > 0)
1351  bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1352  return -1;
1353 }
1354 
1355 /* NOTE: Should be called for MSIX only
1356  * Unregisters Rx MSIX vector(s) from the kernel
1357  */
1358 static void
1359 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1360  int num_rxps)
1361 {
1362  int i;
1363  int vector_num;
1364 
1365  for (i = 0; i < num_rxps; i++) {
1366  if (rx_info->rx_ctrl[i].ccb == NULL)
1367  continue;
1368 
1369  vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1370  free_irq(bnad->msix_table[vector_num].vector,
1371  rx_info->rx_ctrl[i].ccb);
1372  }
1373 }
1374 
1375 /* NOTE: Should be called for MSIX only
1376  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1377  */
1378 static int
1379 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1380  u32 rx_id, int num_rxps)
1381 {
1382  int i;
1383  int err;
1384  int vector_num;
1385 
1386  for (i = 0; i < num_rxps; i++) {
1387  vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1388  sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1389  bnad->netdev->name,
1390  rx_id + rx_info->rx_ctrl[i].ccb->id);
1391  err = request_irq(bnad->msix_table[vector_num].vector,
1392  (irq_handler_t)bnad_msix_rx, 0,
1393  rx_info->rx_ctrl[i].ccb->name,
1394  rx_info->rx_ctrl[i].ccb);
1395  if (err)
1396  goto err_return;
1397  }
1398 
1399  return 0;
1400 
1401 err_return:
1402  if (i > 0)
1403  bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1404  return -1;
1405 }
1406 
1407 /* Free Tx object Resources */
1408 static void
1409 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1410 {
1411  int i;
1412 
1413  for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1414  if (res_info[i].res_type == BNA_RES_T_MEM)
1415  bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1416  else if (res_info[i].res_type == BNA_RES_T_INTR)
1417  bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1418  }
1419 }
1420 
1421 /* Allocates memory and interrupt resources for Tx object */
1422 static int
1423 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1424  u32 tx_id)
1425 {
1426  int i, err = 0;
1427 
1428  for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1429  if (res_info[i].res_type == BNA_RES_T_MEM)
1430  err = bnad_mem_alloc(bnad,
1431  &res_info[i].res_u.mem_info);
1432  else if (res_info[i].res_type == BNA_RES_T_INTR)
1433  err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1434  &res_info[i].res_u.intr_info);
1435  if (err)
1436  goto err_return;
1437  }
1438  return 0;
1439 
1440 err_return:
1441  bnad_tx_res_free(bnad, res_info);
1442  return err;
1443 }
1444 
1445 /* Free Rx object Resources */
1446 static void
1447 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1448 {
1449  int i;
1450 
1451  for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1452  if (res_info[i].res_type == BNA_RES_T_MEM)
1453  bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1454  else if (res_info[i].res_type == BNA_RES_T_INTR)
1455  bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1456  }
1457 }
1458 
1459 /* Allocates memory and interrupt resources for Rx object */
1460 static int
1461 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1462  uint rx_id)
1463 {
1464  int i, err = 0;
1465 
1466  /* All memory needs to be allocated before setup_ccbs */
1467  for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1468  if (res_info[i].res_type == BNA_RES_T_MEM)
1469  err = bnad_mem_alloc(bnad,
1470  &res_info[i].res_u.mem_info);
1471  else if (res_info[i].res_type == BNA_RES_T_INTR)
1472  err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1473  &res_info[i].res_u.intr_info);
1474  if (err)
1475  goto err_return;
1476  }
1477  return 0;
1478 
1479 err_return:
1480  bnad_rx_res_free(bnad, res_info);
1481  return err;
1482 }
1483 
1484 /* Timer callbacks */
1485 /* a) IOC timer */
1486 static void
1487 bnad_ioc_timeout(unsigned long data)
1488 {
1489  struct bnad *bnad = (struct bnad *)data;
1490  unsigned long flags;
1491 
1492  spin_lock_irqsave(&bnad->bna_lock, flags);
1493  bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1494  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1495 }
1496 
1497 static void
1498 bnad_ioc_hb_check(unsigned long data)
1499 {
1500  struct bnad *bnad = (struct bnad *)data;
1501  unsigned long flags;
1502 
1503  spin_lock_irqsave(&bnad->bna_lock, flags);
1504  bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1505  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1506 }
1507 
1508 static void
1509 bnad_iocpf_timeout(unsigned long data)
1510 {
1511  struct bnad *bnad = (struct bnad *)data;
1512  unsigned long flags;
1513 
1514  spin_lock_irqsave(&bnad->bna_lock, flags);
1515  bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1516  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1517 }
1518 
1519 static void
1520 bnad_iocpf_sem_timeout(unsigned long data)
1521 {
1522  struct bnad *bnad = (struct bnad *)data;
1523  unsigned long flags;
1524 
1525  spin_lock_irqsave(&bnad->bna_lock, flags);
1526  bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1527  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1528 }
1529 
1530 /*
1531  * All timer routines use bnad->bna_lock to protect against
1532  * the following race, which may occur in case of no locking:
1533  * Time CPU m CPU n
1534  * 0 1 = test_bit
1535  * 1 clear_bit
1536  * 2 del_timer_sync
1537  * 3 mod_timer
1538  */
1539 
1540 /* b) Dynamic Interrupt Moderation Timer */
1541 static void
1542 bnad_dim_timeout(unsigned long data)
1543 {
1544  struct bnad *bnad = (struct bnad *)data;
1545  struct bnad_rx_info *rx_info;
1546  struct bnad_rx_ctrl *rx_ctrl;
1547  int i, j;
1548  unsigned long flags;
1549 
1550  if (!netif_carrier_ok(bnad->netdev))
1551  return;
1552 
1553  spin_lock_irqsave(&bnad->bna_lock, flags);
1554  for (i = 0; i < bnad->num_rx; i++) {
1555  rx_info = &bnad->rx_info[i];
1556  if (!rx_info->rx)
1557  continue;
1558  for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1559  rx_ctrl = &rx_info->rx_ctrl[j];
1560  if (!rx_ctrl->ccb)
1561  continue;
1562  bna_rx_dim_update(rx_ctrl->ccb);
1563  }
1564  }
1565 
1566  /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1568  mod_timer(&bnad->dim_timer,
1570  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1571 }
1572 
1573 /* c) Statistics Timer */
1574 static void
1575 bnad_stats_timeout(unsigned long data)
1576 {
1577  struct bnad *bnad = (struct bnad *)data;
1578  unsigned long flags;
1579 
1580  if (!netif_running(bnad->netdev) ||
1582  return;
1583 
1584  spin_lock_irqsave(&bnad->bna_lock, flags);
1585  bna_hw_stats_get(&bnad->bna);
1586  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1587 }
1588 
1589 /*
1590  * Set up timer for DIM
1591  * Called with bnad->bna_lock held
1592  */
1593 void
1594 bnad_dim_timer_start(struct bnad *bnad)
1595 {
1596  if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1598  setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1599  (unsigned long)bnad);
1600  set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1601  mod_timer(&bnad->dim_timer,
1603  }
1604 }
1605 
1606 /*
1607  * Set up timer for statistics
1608  * Called with mutex_lock(&bnad->conf_mutex) held
1609  */
1610 static void
1611 bnad_stats_timer_start(struct bnad *bnad)
1612 {
1613  unsigned long flags;
1614 
1615  spin_lock_irqsave(&bnad->bna_lock, flags);
1617  setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1618  (unsigned long)bnad);
1619  mod_timer(&bnad->stats_timer,
1621  }
1622  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1623 }
1624 
1625 /*
1626  * Stops the stats timer
1627  * Called with mutex_lock(&bnad->conf_mutex) held
1628  */
1629 static void
1630 bnad_stats_timer_stop(struct bnad *bnad)
1631 {
1632  int to_del = 0;
1633  unsigned long flags;
1634 
1635  spin_lock_irqsave(&bnad->bna_lock, flags);
1637  to_del = 1;
1638  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1639  if (to_del)
1640  del_timer_sync(&bnad->stats_timer);
1641 }
1642 
1643 /* Utilities */
1644 
1645 static void
1646 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1647 {
1648  int i = 1; /* Index 0 has broadcast address */
1649  struct netdev_hw_addr *mc_addr;
1650 
1651  netdev_for_each_mc_addr(mc_addr, netdev) {
1652  memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1653  ETH_ALEN);
1654  i++;
1655  }
1656 }
1657 
1658 static int
1659 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1660 {
1661  struct bnad_rx_ctrl *rx_ctrl =
1662  container_of(napi, struct bnad_rx_ctrl, napi);
1663  struct bnad *bnad = rx_ctrl->bnad;
1664  int rcvd = 0;
1665 
1666  rx_ctrl->rx_poll_ctr++;
1667 
1668  if (!netif_carrier_ok(bnad->netdev))
1669  goto poll_exit;
1670 
1671  rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1672  if (rcvd >= budget)
1673  return rcvd;
1674 
1675 poll_exit:
1676  napi_complete(napi);
1677 
1678  rx_ctrl->rx_complete++;
1679 
1680  if (rx_ctrl->ccb)
1681  bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1682 
1683  return rcvd;
1684 }
1685 
1686 #define BNAD_NAPI_POLL_QUOTA 64
1687 static void
1688 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1689 {
1690  struct bnad_rx_ctrl *rx_ctrl;
1691  int i;
1692 
1693  /* Initialize & enable NAPI */
1694  for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1695  rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1696  netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1697  bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1698  }
1699 }
1700 
1701 static void
1702 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1703 {
1704  int i;
1705 
1706  /* First disable and then clean up */
1707  for (i = 0; i < bnad->num_rxp_per_rx; i++)
1708  netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1709 }
1710 
1711 /* Should be held with conf_lock held */
1712 void
1713 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1714 {
1715  struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1716  struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1717  unsigned long flags;
1718 
1719  if (!tx_info->tx)
1720  return;
1721 
1722  init_completion(&bnad->bnad_completions.tx_comp);
1723  spin_lock_irqsave(&bnad->bna_lock, flags);
1724  bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1725  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1726  wait_for_completion(&bnad->bnad_completions.tx_comp);
1727 
1728  if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1729  bnad_tx_msix_unregister(bnad, tx_info,
1730  bnad->num_txq_per_tx);
1731 
1732  spin_lock_irqsave(&bnad->bna_lock, flags);
1733  bna_tx_destroy(tx_info->tx);
1734  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1735 
1736  tx_info->tx = NULL;
1737  tx_info->tx_id = 0;
1738 
1739  bnad_tx_res_free(bnad, res_info);
1740 }
1741 
1742 /* Should be held with conf_lock held */
1743 int
1744 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1745 {
1746  int err;
1747  struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1748  struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1749  struct bna_intr_info *intr_info =
1751  struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1752  static const struct bna_tx_event_cbfn tx_cbfn = {
1753  .tcb_setup_cbfn = bnad_cb_tcb_setup,
1754  .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1755  .tx_stall_cbfn = bnad_cb_tx_stall,
1756  .tx_resume_cbfn = bnad_cb_tx_resume,
1757  .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1758  };
1759 
1760  struct bna_tx *tx;
1761  unsigned long flags;
1762 
1763  tx_info->tx_id = tx_id;
1764 
1765  /* Initialize the Tx object configuration */
1766  tx_config->num_txq = bnad->num_txq_per_tx;
1767  tx_config->txq_depth = bnad->txq_depth;
1768  tx_config->tx_type = BNA_TX_T_REGULAR;
1769  tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1770 
1771  /* Get BNA's resource requirement for one tx object */
1772  spin_lock_irqsave(&bnad->bna_lock, flags);
1774  bnad->txq_depth, res_info);
1775  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776 
1777  /* Fill Unmap Q memory requirements */
1779  &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1780  bnad->num_txq_per_tx,
1782 
1783  /* Allocate resources */
1784  err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1785  if (err)
1786  return err;
1787 
1788  /* Ask BNA to create one Tx object, supplying required resources */
1789  spin_lock_irqsave(&bnad->bna_lock, flags);
1790  tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1791  tx_info);
1792  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1793  if (!tx)
1794  goto err_return;
1795  tx_info->tx = tx;
1796 
1798  (work_func_t)bnad_tx_cleanup);
1799 
1800  /* Register ISR for the Tx object */
1801  if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1802  err = bnad_tx_msix_register(bnad, tx_info,
1803  tx_id, bnad->num_txq_per_tx);
1804  if (err)
1805  goto err_return;
1806  }
1807 
1808  spin_lock_irqsave(&bnad->bna_lock, flags);
1809  bna_tx_enable(tx);
1810  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1811 
1812  return 0;
1813 
1814 err_return:
1815  bnad_tx_res_free(bnad, res_info);
1816  return err;
1817 }
1818 
1819 /* Setup the rx config for bna_rx_create */
1820 /* bnad decides the configuration */
1821 static void
1822 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1823 {
1824  rx_config->rx_type = BNA_RX_T_REGULAR;
1825  rx_config->num_paths = bnad->num_rxp_per_rx;
1826  rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1827 
1828  if (bnad->num_rxp_per_rx > 1) {
1829  rx_config->rss_status = BNA_STATUS_T_ENABLED;
1830  rx_config->rss_config.hash_type =
1835  rx_config->rss_config.hash_mask =
1836  bnad->num_rxp_per_rx - 1;
1837  get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1838  sizeof(rx_config->rss_config.toeplitz_hash_key));
1839  } else {
1840  rx_config->rss_status = BNA_STATUS_T_DISABLED;
1841  memset(&rx_config->rss_config, 0,
1842  sizeof(rx_config->rss_config));
1843  }
1844  rx_config->rxp_type = BNA_RXP_SLR;
1845  rx_config->q_depth = bnad->rxq_depth;
1846 
1848 
1850 }
1851 
1852 static void
1853 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1854 {
1855  struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1856  int i;
1857 
1858  for (i = 0; i < bnad->num_rxp_per_rx; i++)
1859  rx_info->rx_ctrl[i].bnad = bnad;
1860 }
1861 
1862 /* Called with mutex_lock(&bnad->conf_mutex) held */
1863 void
1864 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1865 {
1866  struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1867  struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1868  struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1869  unsigned long flags;
1870  int to_del = 0;
1871 
1872  if (!rx_info->rx)
1873  return;
1874 
1875  if (0 == rx_id) {
1876  spin_lock_irqsave(&bnad->bna_lock, flags);
1877  if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1880  to_del = 1;
1881  }
1882  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1883  if (to_del)
1884  del_timer_sync(&bnad->dim_timer);
1885  }
1886 
1887  init_completion(&bnad->bnad_completions.rx_comp);
1888  spin_lock_irqsave(&bnad->bna_lock, flags);
1889  bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1890  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1891  wait_for_completion(&bnad->bnad_completions.rx_comp);
1892 
1893  if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1894  bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1895 
1896  bnad_napi_delete(bnad, rx_id);
1897 
1898  spin_lock_irqsave(&bnad->bna_lock, flags);
1899  bna_rx_destroy(rx_info->rx);
1900 
1901  rx_info->rx = NULL;
1902  rx_info->rx_id = 0;
1903  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1904 
1905  bnad_rx_res_free(bnad, res_info);
1906 }
1907 
1908 /* Called with mutex_lock(&bnad->conf_mutex) held */
1909 int
1910 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1911 {
1912  int err;
1913  struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1914  struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1915  struct bna_intr_info *intr_info =
1916  &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1917  struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1918  static const struct bna_rx_event_cbfn rx_cbfn = {
1919  .rcb_setup_cbfn = bnad_cb_rcb_setup,
1920  .rcb_destroy_cbfn = NULL,
1921  .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922  .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1923  .rx_stall_cbfn = bnad_cb_rx_stall,
1924  .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1925  .rx_post_cbfn = bnad_cb_rx_post,
1926  };
1927  struct bna_rx *rx;
1928  unsigned long flags;
1929 
1930  rx_info->rx_id = rx_id;
1931 
1932  /* Initialize the Rx object configuration */
1933  bnad_init_rx_config(bnad, rx_config);
1934 
1935  /* Get BNA's resource requirement for one Rx object */
1936  spin_lock_irqsave(&bnad->bna_lock, flags);
1937  bna_rx_res_req(rx_config, res_info);
1938  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939 
1940  /* Fill Unmap Q memory requirements */
1942  &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1943  rx_config->num_paths +
1944  ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1945  rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1946 
1947  /* Allocate resource */
1948  err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1949  if (err)
1950  return err;
1951 
1952  bnad_rx_ctrl_init(bnad, rx_id);
1953 
1954  /* Ask BNA to create one Rx object, supplying required resources */
1955  spin_lock_irqsave(&bnad->bna_lock, flags);
1956  rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1957  rx_info);
1958  if (!rx) {
1959  err = -ENOMEM;
1960  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1961  goto err_return;
1962  }
1963  rx_info->rx = rx;
1964  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965 
1966  INIT_WORK(&rx_info->rx_cleanup_work,
1967  (work_func_t)(bnad_rx_cleanup));
1968 
1969  /*
1970  * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1971  * so that IRQ handler cannot schedule NAPI at this point.
1972  */
1973  bnad_napi_add(bnad, rx_id);
1974 
1975  /* Register ISR for the Rx object */
1976  if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1977  err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1978  rx_config->num_paths);
1979  if (err)
1980  goto err_return;
1981  }
1982 
1983  spin_lock_irqsave(&bnad->bna_lock, flags);
1984  if (0 == rx_id) {
1985  /* Set up Dynamic Interrupt Moderation Vector */
1986  if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1988 
1989  /* Enable VLAN filtering only on the default Rx */
1991 
1992  /* Start the DIM timer */
1993  bnad_dim_timer_start(bnad);
1994  }
1995 
1996  bna_rx_enable(rx);
1997  spin_unlock_irqrestore(&bnad->bna_lock, flags);
1998 
1999  return 0;
2000 
2001 err_return:
2002  bnad_destroy_rx(bnad, rx_id);
2003  return err;
2004 }
2005 
2006 /* Called with conf_lock & bnad->bna_lock held */
2007 void
2009 {
2010  struct bnad_tx_info *tx_info;
2011 
2012  tx_info = &bnad->tx_info[0];
2013  if (!tx_info->tx)
2014  return;
2015 
2017 }
2018 
2019 /* Called with conf_lock & bnad->bna_lock held */
2020 void
2022 {
2023  struct bnad_rx_info *rx_info;
2024  int i;
2025 
2026  for (i = 0; i < bnad->num_rx; i++) {
2027  rx_info = &bnad->rx_info[i];
2028  if (!rx_info->rx)
2029  continue;
2031  bnad->rx_coalescing_timeo);
2032  }
2033 }
2034 
2035 /*
2036  * Called with bnad->bna_lock held
2037  */
2038 int
2040 {
2041  int ret;
2042 
2043  if (!is_valid_ether_addr(mac_addr))
2044  return -EADDRNOTAVAIL;
2045 
2046  /* If datapath is down, pretend everything went through */
2047  if (!bnad->rx_info[0].rx)
2048  return 0;
2049 
2050  ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2051  if (ret != BNA_CB_SUCCESS)
2052  return -EADDRNOTAVAIL;
2053 
2054  return 0;
2055 }
2056 
2057 /* Should be called with conf_lock held */
2058 int
2059 bnad_enable_default_bcast(struct bnad *bnad)
2060 {
2061  struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2062  int ret;
2063  unsigned long flags;
2064 
2065  init_completion(&bnad->bnad_completions.mcast_comp);
2066 
2067  spin_lock_irqsave(&bnad->bna_lock, flags);
2068  ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2069  bnad_cb_rx_mcast_add);
2070  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071 
2072  if (ret == BNA_CB_SUCCESS)
2073  wait_for_completion(&bnad->bnad_completions.mcast_comp);
2074  else
2075  return -ENODEV;
2076 
2077  if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2078  return -ENODEV;
2079 
2080  return 0;
2081 }
2082 
2083 /* Called with mutex_lock(&bnad->conf_mutex) held */
2084 void
2085 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2086 {
2087  u16 vid;
2088  unsigned long flags;
2089 
2091  spin_lock_irqsave(&bnad->bna_lock, flags);
2092  bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2093  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2094  }
2095 }
2096 
2097 /* Statistics utilities */
2098 void
2100 {
2101  int i, j;
2102 
2103  for (i = 0; i < bnad->num_rx; i++) {
2104  for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2105  if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2106  stats->rx_packets += bnad->rx_info[i].
2107  rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2108  stats->rx_bytes += bnad->rx_info[i].
2109  rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2110  if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2111  bnad->rx_info[i].rx_ctrl[j].ccb->
2112  rcb[1]->rxq) {
2113  stats->rx_packets +=
2114  bnad->rx_info[i].rx_ctrl[j].
2115  ccb->rcb[1]->rxq->rx_packets;
2116  stats->rx_bytes +=
2117  bnad->rx_info[i].rx_ctrl[j].
2118  ccb->rcb[1]->rxq->rx_bytes;
2119  }
2120  }
2121  }
2122  }
2123  for (i = 0; i < bnad->num_tx; i++) {
2124  for (j = 0; j < bnad->num_txq_per_tx; j++) {
2125  if (bnad->tx_info[i].tcb[j]) {
2126  stats->tx_packets +=
2127  bnad->tx_info[i].tcb[j]->txq->tx_packets;
2128  stats->tx_bytes +=
2129  bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2130  }
2131  }
2132  }
2133 }
2134 
2135 /*
2136  * Must be called with the bna_lock held.
2137  */
2138 void
2140 {
2141  struct bfi_enet_stats_mac *mac_stats;
2142  u32 bmap;
2143  int i;
2144 
2145  mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2146  stats->rx_errors =
2147  mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2148  mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2149  mac_stats->rx_undersize;
2150  stats->tx_errors = mac_stats->tx_fcs_error +
2151  mac_stats->tx_undersize;
2152  stats->rx_dropped = mac_stats->rx_drop;
2153  stats->tx_dropped = mac_stats->tx_drop;
2154  stats->multicast = mac_stats->rx_multicast;
2155  stats->collisions = mac_stats->tx_total_collision;
2156 
2157  stats->rx_length_errors = mac_stats->rx_frame_length_error;
2158 
2159  /* receive ring buffer overflow ?? */
2160 
2161  stats->rx_crc_errors = mac_stats->rx_fcs_error;
2162  stats->rx_frame_errors = mac_stats->rx_alignment_error;
2163  /* recv'r fifo overrun */
2164  bmap = bna_rx_rid_mask(&bnad->bna);
2165  for (i = 0; bmap; i++) {
2166  if (bmap & 1) {
2167  stats->rx_fifo_errors +=
2168  bnad->stats.bna_stats->
2169  hw_stats.rxf_stats[i].frame_drops;
2170  break;
2171  }
2172  bmap >>= 1;
2173  }
2174 }
2175 
2176 static void
2177 bnad_mbox_irq_sync(struct bnad *bnad)
2178 {
2179  u32 irq;
2180  unsigned long flags;
2181 
2182  spin_lock_irqsave(&bnad->bna_lock, flags);
2183  if (bnad->cfg_flags & BNAD_CF_MSIX)
2184  irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2185  else
2186  irq = bnad->pcidev->irq;
2187  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188 
2189  synchronize_irq(irq);
2190 }
2191 
2192 /* Utility used by bnad_start_xmit, for doing TSO */
2193 static int
2194 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2195 {
2196  int err;
2197 
2198  if (skb_header_cloned(skb)) {
2199  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2200  if (err) {
2201  BNAD_UPDATE_CTR(bnad, tso_err);
2202  return err;
2203  }
2204  }
2205 
2206  /*
2207  * For TSO, the TCP checksum field is seeded with pseudo-header sum
2208  * excluding the length field.
2209  */
2210  if (skb->protocol == htons(ETH_P_IP)) {
2211  struct iphdr *iph = ip_hdr(skb);
2212 
2213  /* Do we really need these? */
2214  iph->tot_len = 0;
2215  iph->check = 0;
2216 
2217  tcp_hdr(skb)->check =
2218  ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2219  IPPROTO_TCP, 0);
2220  BNAD_UPDATE_CTR(bnad, tso4);
2221  } else {
2222  struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2223 
2224  ipv6h->payload_len = 0;
2225  tcp_hdr(skb)->check =
2226  ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2227  IPPROTO_TCP, 0);
2228  BNAD_UPDATE_CTR(bnad, tso6);
2229  }
2230 
2231  return 0;
2232 }
2233 
2234 /*
2235  * Initialize Q numbers depending on Rx Paths
2236  * Called with bnad->bna_lock held, because of cfg_flags
2237  * access.
2238  */
2239 static void
2240 bnad_q_num_init(struct bnad *bnad)
2241 {
2242  int rxps;
2243 
2244  rxps = min((uint)num_online_cpus(),
2245  (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2246 
2247  if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2248  rxps = 1; /* INTx */
2249 
2250  bnad->num_rx = 1;
2251  bnad->num_tx = 1;
2252  bnad->num_rxp_per_rx = rxps;
2253  bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2254 }
2255 
2256 /*
2257  * Adjusts the Q numbers, given a number of msix vectors
2258  * Give preference to RSS as opposed to Tx priority Queues,
2259  * in such a case, just use 1 Tx Q
2260  * Called with bnad->bna_lock held b'cos of cfg_flags access
2261  */
2262 static void
2263 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2264 {
2265  bnad->num_txq_per_tx = 1;
2266  if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2268  (bnad->cfg_flags & BNAD_CF_MSIX)) {
2269  bnad->num_rxp_per_rx = msix_vectors -
2270  (bnad->num_tx * bnad->num_txq_per_tx) -
2272  } else
2273  bnad->num_rxp_per_rx = 1;
2274 }
2275 
2276 /* Enable / disable ioceth */
2277 static int
2278 bnad_ioceth_disable(struct bnad *bnad)
2279 {
2280  unsigned long flags;
2281  int err = 0;
2282 
2283  spin_lock_irqsave(&bnad->bna_lock, flags);
2284  init_completion(&bnad->bnad_completions.ioc_comp);
2285  bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2286  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287 
2290 
2291  err = bnad->bnad_completions.ioc_comp_status;
2292  return err;
2293 }
2294 
2295 static int
2296 bnad_ioceth_enable(struct bnad *bnad)
2297 {
2298  int err = 0;
2299  unsigned long flags;
2300 
2301  spin_lock_irqsave(&bnad->bna_lock, flags);
2302  init_completion(&bnad->bnad_completions.ioc_comp);
2303  bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2304  bna_ioceth_enable(&bnad->bna.ioceth);
2305  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2306 
2309 
2310  err = bnad->bnad_completions.ioc_comp_status;
2311 
2312  return err;
2313 }
2314 
2315 /* Free BNA resources */
2316 static void
2317 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2318  u32 res_val_max)
2319 {
2320  int i;
2321 
2322  for (i = 0; i < res_val_max; i++)
2323  bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2324 }
2325 
2326 /* Allocates memory and interrupt resources for BNA */
2327 static int
2328 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2329  u32 res_val_max)
2330 {
2331  int i, err;
2332 
2333  for (i = 0; i < res_val_max; i++) {
2334  err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2335  if (err)
2336  goto err_return;
2337  }
2338  return 0;
2339 
2340 err_return:
2341  bnad_res_free(bnad, res_info, res_val_max);
2342  return err;
2343 }
2344 
2345 /* Interrupt enable / disable */
2346 static void
2347 bnad_enable_msix(struct bnad *bnad)
2348 {
2349  int i, ret;
2350  unsigned long flags;
2351 
2352  spin_lock_irqsave(&bnad->bna_lock, flags);
2353  if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2354  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2355  return;
2356  }
2357  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358 
2359  if (bnad->msix_table)
2360  return;
2361 
2362  bnad->msix_table =
2363  kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2364 
2365  if (!bnad->msix_table)
2366  goto intx_mode;
2367 
2368  for (i = 0; i < bnad->msix_num; i++)
2369  bnad->msix_table[i].entry = i;
2370 
2371  ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2372  if (ret > 0) {
2373  /* Not enough MSI-X vectors. */
2374  pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2375  ret, bnad->msix_num);
2376 
2377  spin_lock_irqsave(&bnad->bna_lock, flags);
2378  /* ret = #of vectors that we got */
2379  bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2380  (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2381  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2382 
2383  bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2385 
2386  if (bnad->msix_num > ret)
2387  goto intx_mode;
2388 
2389  /* Try once more with adjusted numbers */
2390  /* If this fails, fall back to INTx */
2391  ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2392  bnad->msix_num);
2393  if (ret)
2394  goto intx_mode;
2395 
2396  } else if (ret < 0)
2397  goto intx_mode;
2398 
2399  pci_intx(bnad->pcidev, 0);
2400 
2401  return;
2402 
2403 intx_mode:
2404  pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2405 
2406  kfree(bnad->msix_table);
2407  bnad->msix_table = NULL;
2408  bnad->msix_num = 0;
2409  spin_lock_irqsave(&bnad->bna_lock, flags);
2410  bnad->cfg_flags &= ~BNAD_CF_MSIX;
2411  bnad_q_num_init(bnad);
2412  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2413 }
2414 
2415 static void
2416 bnad_disable_msix(struct bnad *bnad)
2417 {
2418  u32 cfg_flags;
2419  unsigned long flags;
2420 
2421  spin_lock_irqsave(&bnad->bna_lock, flags);
2422  cfg_flags = bnad->cfg_flags;
2423  if (bnad->cfg_flags & BNAD_CF_MSIX)
2424  bnad->cfg_flags &= ~BNAD_CF_MSIX;
2425  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2426 
2427  if (cfg_flags & BNAD_CF_MSIX) {
2428  pci_disable_msix(bnad->pcidev);
2429  kfree(bnad->msix_table);
2430  bnad->msix_table = NULL;
2431  }
2432 }
2433 
2434 /* Netdev entry points */
2435 static int
2436 bnad_open(struct net_device *netdev)
2437 {
2438  int err;
2439  struct bnad *bnad = netdev_priv(netdev);
2440  struct bna_pause_config pause_config;
2441  int mtu;
2442  unsigned long flags;
2443 
2444  mutex_lock(&bnad->conf_mutex);
2445 
2446  /* Tx */
2447  err = bnad_setup_tx(bnad, 0);
2448  if (err)
2449  goto err_return;
2450 
2451  /* Rx */
2452  err = bnad_setup_rx(bnad, 0);
2453  if (err)
2454  goto cleanup_tx;
2455 
2456  /* Port */
2457  pause_config.tx_pause = 0;
2458  pause_config.rx_pause = 0;
2459 
2460  mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2461 
2462  spin_lock_irqsave(&bnad->bna_lock, flags);
2463  bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2464  bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2465  bna_enet_enable(&bnad->bna.enet);
2466  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2467 
2468  /* Enable broadcast */
2470 
2471  /* Restore VLANs, if any */
2472  bnad_restore_vlans(bnad, 0);
2473 
2474  /* Set the UCAST address */
2475  spin_lock_irqsave(&bnad->bna_lock, flags);
2476  bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2477  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2478 
2479  /* Start the stats timer */
2480  bnad_stats_timer_start(bnad);
2481 
2482  mutex_unlock(&bnad->conf_mutex);
2483 
2484  return 0;
2485 
2486 cleanup_tx:
2487  bnad_destroy_tx(bnad, 0);
2488 
2489 err_return:
2490  mutex_unlock(&bnad->conf_mutex);
2491  return err;
2492 }
2493 
2494 static int
2495 bnad_stop(struct net_device *netdev)
2496 {
2497  struct bnad *bnad = netdev_priv(netdev);
2498  unsigned long flags;
2499 
2500  mutex_lock(&bnad->conf_mutex);
2501 
2502  /* Stop the stats timer */
2503  bnad_stats_timer_stop(bnad);
2504 
2505  init_completion(&bnad->bnad_completions.enet_comp);
2506 
2507  spin_lock_irqsave(&bnad->bna_lock, flags);
2508  bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2509  bnad_cb_enet_disabled);
2510  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2511 
2512  wait_for_completion(&bnad->bnad_completions.enet_comp);
2513 
2514  bnad_destroy_tx(bnad, 0);
2515  bnad_destroy_rx(bnad, 0);
2516 
2517  /* Synchronize mailbox IRQ */
2518  bnad_mbox_irq_sync(bnad);
2519 
2520  mutex_unlock(&bnad->conf_mutex);
2521 
2522  return 0;
2523 }
2524 
2525 /* TX */
2526 /*
2527  * bnad_start_xmit : Netdev entry point for Transmit
2528  * Called under lock held by net_device
2529  */
2530 static netdev_tx_t
2531 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2532 {
2533  struct bnad *bnad = netdev_priv(netdev);
2534  u32 txq_id = 0;
2535  struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2536 
2537  u16 txq_prod, vlan_tag = 0;
2538  u32 unmap_prod, wis, wis_used, wi_range;
2539  u32 vectors, vect_id, i, acked;
2540  int err;
2541  unsigned int len;
2542  u32 gso_size;
2543 
2544  struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2546  struct bna_txq_entry *txqent;
2547  u16 flags;
2548 
2549  if (unlikely(skb->len <= ETH_HLEN)) {
2550  dev_kfree_skb(skb);
2551  BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2552  return NETDEV_TX_OK;
2553  }
2554  if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2555  dev_kfree_skb(skb);
2556  BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2557  return NETDEV_TX_OK;
2558  }
2559  if (unlikely(skb_headlen(skb) == 0)) {
2560  dev_kfree_skb(skb);
2561  BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2562  return NETDEV_TX_OK;
2563  }
2564 
2565  /*
2566  * Takes care of the Tx that is scheduled between clearing the flag
2567  * and the netif_tx_stop_all_queues() call.
2568  */
2569  if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2570  dev_kfree_skb(skb);
2571  BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2572  return NETDEV_TX_OK;
2573  }
2574 
2575  vectors = 1 + skb_shinfo(skb)->nr_frags;
2576  if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2577  dev_kfree_skb(skb);
2578  BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2579  return NETDEV_TX_OK;
2580  }
2581  wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2582  acked = 0;
2583  if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2584  vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2585  if ((u16) (*tcb->hw_consumer_index) !=
2586  tcb->consumer_index &&
2588  acked = bnad_txcmpl_process(bnad, tcb);
2590  bna_ib_ack(tcb->i_dbell, acked);
2593  } else {
2594  netif_stop_queue(netdev);
2595  BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2596  }
2597 
2598  smp_mb();
2599  /*
2600  * Check again to deal with race condition between
2601  * netif_stop_queue here, and netif_wake_queue in
2602  * interrupt handler which is not inside netif tx lock.
2603  */
2604  if (likely
2605  (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2606  vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2607  BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2608  return NETDEV_TX_BUSY;
2609  } else {
2610  netif_wake_queue(netdev);
2611  BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2612  }
2613  }
2614 
2615  unmap_prod = unmap_q->producer_index;
2616  flags = 0;
2617 
2618  txq_prod = tcb->producer_index;
2619  BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2620  txqent->hdr.wi.reserved = 0;
2621  txqent->hdr.wi.num_vectors = vectors;
2622 
2623  if (vlan_tx_tag_present(skb)) {
2624  vlan_tag = (u16) vlan_tx_tag_get(skb);
2626  }
2627  if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2628  vlan_tag =
2629  (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2631  }
2632 
2633  txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2634 
2635  if (skb_is_gso(skb)) {
2636  gso_size = skb_shinfo(skb)->gso_size;
2637 
2638  if (unlikely(gso_size > netdev->mtu)) {
2639  dev_kfree_skb(skb);
2640  BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2641  return NETDEV_TX_OK;
2642  }
2643  if (unlikely((gso_size + skb_transport_offset(skb) +
2644  tcp_hdrlen(skb)) >= skb->len)) {
2645  txqent->hdr.wi.opcode =
2647  txqent->hdr.wi.lso_mss = 0;
2648  BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2649  } else {
2650  txqent->hdr.wi.opcode =
2652  txqent->hdr.wi.lso_mss = htons(gso_size);
2653  }
2654 
2655  err = bnad_tso_prepare(bnad, skb);
2656  if (unlikely(err)) {
2657  dev_kfree_skb(skb);
2658  BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2659  return NETDEV_TX_OK;
2660  }
2662  txqent->hdr.wi.l4_hdr_size_n_offset =
2664  (tcp_hdrlen(skb) >> 2,
2665  skb_transport_offset(skb)));
2666  } else {
2667  txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2668  txqent->hdr.wi.lso_mss = 0;
2669 
2670  if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2671  dev_kfree_skb(skb);
2672  BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2673  return NETDEV_TX_OK;
2674  }
2675 
2676  if (skb->ip_summed == CHECKSUM_PARTIAL) {
2677  u8 proto = 0;
2678 
2679  if (skb->protocol == __constant_htons(ETH_P_IP))
2680  proto = ip_hdr(skb)->protocol;
2681  else if (skb->protocol ==
2683  /* nexthdr may not be TCP immediately. */
2684  proto = ipv6_hdr(skb)->nexthdr;
2685  }
2686  if (proto == IPPROTO_TCP) {
2687  flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2688  txqent->hdr.wi.l4_hdr_size_n_offset =
2690  (0, skb_transport_offset(skb)));
2691 
2692  BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2693 
2694  if (unlikely(skb_headlen(skb) <
2695  skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2696  dev_kfree_skb(skb);
2697  BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2698  return NETDEV_TX_OK;
2699  }
2700 
2701  } else if (proto == IPPROTO_UDP) {
2702  flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2703  txqent->hdr.wi.l4_hdr_size_n_offset =
2705  (0, skb_transport_offset(skb)));
2706 
2707  BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2708  if (unlikely(skb_headlen(skb) <
2709  skb_transport_offset(skb) +
2710  sizeof(struct udphdr))) {
2711  dev_kfree_skb(skb);
2712  BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2713  return NETDEV_TX_OK;
2714  }
2715  } else {
2716  dev_kfree_skb(skb);
2717  BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2718  return NETDEV_TX_OK;
2719  }
2720  } else {
2721  txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2722  }
2723  }
2724 
2725  txqent->hdr.wi.flags = htons(flags);
2726 
2727  txqent->hdr.wi.frame_length = htonl(skb->len);
2728 
2729  unmap_q->unmap_array[unmap_prod].skb = skb;
2730  len = skb_headlen(skb);
2731  txqent->vector[0].length = htons(len);
2732  dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2733  skb_headlen(skb), DMA_TO_DEVICE);
2734  dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2735  dma_addr);
2736 
2737  BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2738  BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2739 
2740  vect_id = 0;
2741  wis_used = 1;
2742 
2743  for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2744  const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2745  u16 size = skb_frag_size(frag);
2746 
2747  if (unlikely(size == 0)) {
2748  unmap_prod = unmap_q->producer_index;
2749 
2750  unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2751  unmap_q->unmap_array,
2752  unmap_prod, unmap_q->q_depth, skb,
2753  i);
2754  dev_kfree_skb(skb);
2755  BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2756  return NETDEV_TX_OK;
2757  }
2758 
2759  len += size;
2760 
2761  if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2762  vect_id = 0;
2763  if (--wi_range)
2764  txqent++;
2765  else {
2766  BNA_QE_INDX_ADD(txq_prod, wis_used,
2767  tcb->q_depth);
2768  wis_used = 0;
2769  BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2770  txqent, wi_range);
2771  }
2772  wis_used++;
2773  txqent->hdr.wi_ext.opcode =
2775  }
2776 
2777  BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2778  txqent->vector[vect_id].length = htons(size);
2779  dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2780  0, size, DMA_TO_DEVICE);
2781  dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2782  dma_addr);
2783  BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2784  BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2785  }
2786 
2787  if (unlikely(len != skb->len)) {
2788  unmap_prod = unmap_q->producer_index;
2789 
2790  unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2791  unmap_q->unmap_array, unmap_prod,
2792  unmap_q->q_depth, skb,
2793  skb_shinfo(skb)->nr_frags);
2794  dev_kfree_skb(skb);
2795  BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2796  return NETDEV_TX_OK;
2797  }
2798 
2799  unmap_q->producer_index = unmap_prod;
2800  BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2801  tcb->producer_index = txq_prod;
2802 
2803  smp_mb();
2804 
2806  return NETDEV_TX_OK;
2807 
2809  smp_mb();
2810 
2811  return NETDEV_TX_OK;
2812 }
2813 
2814 /*
2815  * Used spin_lock to synchronize reading of stats structures, which
2816  * is written by BNA under the same lock.
2817  */
2818 static struct rtnl_link_stats64 *
2819 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2820 {
2821  struct bnad *bnad = netdev_priv(netdev);
2822  unsigned long flags;
2823 
2824  spin_lock_irqsave(&bnad->bna_lock, flags);
2825 
2826  bnad_netdev_qstats_fill(bnad, stats);
2827  bnad_netdev_hwstats_fill(bnad, stats);
2828 
2829  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2830 
2831  return stats;
2832 }
2833 
2834 void
2836 {
2837  struct bnad *bnad = netdev_priv(netdev);
2838  u32 new_mask, valid_mask;
2839  unsigned long flags;
2840 
2841  spin_lock_irqsave(&bnad->bna_lock, flags);
2842 
2843  new_mask = valid_mask = 0;
2844 
2845  if (netdev->flags & IFF_PROMISC) {
2846  if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2847  new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2848  valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2849  bnad->cfg_flags |= BNAD_CF_PROMISC;
2850  }
2851  } else {
2852  if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2853  new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2854  valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2855  bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2856  }
2857  }
2858 
2859  if (netdev->flags & IFF_ALLMULTI) {
2860  if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2861  new_mask |= BNA_RXMODE_ALLMULTI;
2862  valid_mask |= BNA_RXMODE_ALLMULTI;
2863  bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2864  }
2865  } else {
2866  if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2867  new_mask &= ~BNA_RXMODE_ALLMULTI;
2868  valid_mask |= BNA_RXMODE_ALLMULTI;
2869  bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2870  }
2871  }
2872 
2873  if (bnad->rx_info[0].rx == NULL)
2874  goto unlock;
2875 
2876  bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2877 
2878  if (!netdev_mc_empty(netdev)) {
2879  u8 *mcaddr_list;
2880  int mc_count = netdev_mc_count(netdev);
2881 
2882  /* Index 0 holds the broadcast address */
2883  mcaddr_list =
2884  kzalloc((mc_count + 1) * ETH_ALEN,
2885  GFP_ATOMIC);
2886  if (!mcaddr_list)
2887  goto unlock;
2888 
2889  memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2890 
2891  /* Copy rest of the MC addresses */
2892  bnad_netdev_mc_list_get(netdev, mcaddr_list);
2893 
2894  bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2895  mcaddr_list, NULL);
2896 
2897  /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2898  kfree(mcaddr_list);
2899  }
2900 unlock:
2901  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2902 }
2903 
2904 /*
2905  * bna_lock is used to sync writes to netdev->addr
2906  * conf_lock cannot be used since this call may be made
2907  * in a non-blocking context.
2908  */
2909 static int
2910 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2911 {
2912  int err;
2913  struct bnad *bnad = netdev_priv(netdev);
2914  struct sockaddr *sa = (struct sockaddr *)mac_addr;
2915  unsigned long flags;
2916 
2917  spin_lock_irqsave(&bnad->bna_lock, flags);
2918 
2919  err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2920 
2921  if (!err)
2922  memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2923 
2924  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2925 
2926  return err;
2927 }
2928 
2929 static int
2930 bnad_mtu_set(struct bnad *bnad, int mtu)
2931 {
2932  unsigned long flags;
2933 
2934  init_completion(&bnad->bnad_completions.mtu_comp);
2935 
2936  spin_lock_irqsave(&bnad->bna_lock, flags);
2937  bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2938  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2939 
2940  wait_for_completion(&bnad->bnad_completions.mtu_comp);
2941 
2942  return bnad->bnad_completions.mtu_comp_status;
2943 }
2944 
2945 static int
2946 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2947 {
2948  int err, mtu = netdev->mtu;
2949  struct bnad *bnad = netdev_priv(netdev);
2950 
2951  if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2952  return -EINVAL;
2953 
2954  mutex_lock(&bnad->conf_mutex);
2955 
2956  netdev->mtu = new_mtu;
2957 
2958  mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2959  err = bnad_mtu_set(bnad, mtu);
2960  if (err)
2961  err = -EBUSY;
2962 
2963  mutex_unlock(&bnad->conf_mutex);
2964  return err;
2965 }
2966 
2967 static int
2968 bnad_vlan_rx_add_vid(struct net_device *netdev,
2969  unsigned short vid)
2970 {
2971  struct bnad *bnad = netdev_priv(netdev);
2972  unsigned long flags;
2973 
2974  if (!bnad->rx_info[0].rx)
2975  return 0;
2976 
2977  mutex_lock(&bnad->conf_mutex);
2978 
2979  spin_lock_irqsave(&bnad->bna_lock, flags);
2980  bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2981  set_bit(vid, bnad->active_vlans);
2982  spin_unlock_irqrestore(&bnad->bna_lock, flags);
2983 
2984  mutex_unlock(&bnad->conf_mutex);
2985 
2986  return 0;
2987 }
2988 
2989 static int
2990 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2991  unsigned short vid)
2992 {
2993  struct bnad *bnad = netdev_priv(netdev);
2994  unsigned long flags;
2995 
2996  if (!bnad->rx_info[0].rx)
2997  return 0;
2998 
2999  mutex_lock(&bnad->conf_mutex);
3000 
3001  spin_lock_irqsave(&bnad->bna_lock, flags);
3002  clear_bit(vid, bnad->active_vlans);
3003  bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3004  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3005 
3006  mutex_unlock(&bnad->conf_mutex);
3007 
3008  return 0;
3009 }
3010 
3011 #ifdef CONFIG_NET_POLL_CONTROLLER
3012 static void
3013 bnad_netpoll(struct net_device *netdev)
3014 {
3015  struct bnad *bnad = netdev_priv(netdev);
3016  struct bnad_rx_info *rx_info;
3017  struct bnad_rx_ctrl *rx_ctrl;
3018  u32 curr_mask;
3019  int i, j;
3020 
3021  if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3022  bna_intx_disable(&bnad->bna, curr_mask);
3023  bnad_isr(bnad->pcidev->irq, netdev);
3024  bna_intx_enable(&bnad->bna, curr_mask);
3025  } else {
3026  /*
3027  * Tx processing may happen in sending context, so no need
3028  * to explicitly process completions here
3029  */
3030 
3031  /* Rx processing */
3032  for (i = 0; i < bnad->num_rx; i++) {
3033  rx_info = &bnad->rx_info[i];
3034  if (!rx_info->rx)
3035  continue;
3036  for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3037  rx_ctrl = &rx_info->rx_ctrl[j];
3038  if (rx_ctrl->ccb)
3039  bnad_netif_rx_schedule_poll(bnad,
3040  rx_ctrl->ccb);
3041  }
3042  }
3043  }
3044 }
3045 #endif
3046 
3047 static const struct net_device_ops bnad_netdev_ops = {
3048  .ndo_open = bnad_open,
3049  .ndo_stop = bnad_stop,
3050  .ndo_start_xmit = bnad_start_xmit,
3051  .ndo_get_stats64 = bnad_get_stats64,
3052  .ndo_set_rx_mode = bnad_set_rx_mode,
3053  .ndo_validate_addr = eth_validate_addr,
3054  .ndo_set_mac_address = bnad_set_mac_address,
3055  .ndo_change_mtu = bnad_change_mtu,
3056  .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3057  .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3058 #ifdef CONFIG_NET_POLL_CONTROLLER
3059  .ndo_poll_controller = bnad_netpoll
3060 #endif
3061 };
3062 
3063 static void
3064 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3065 {
3066  struct net_device *netdev = bnad->netdev;
3067 
3068  netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3071 
3075 
3076  netdev->features |= netdev->hw_features |
3078 
3079  if (using_dac)
3080  netdev->features |= NETIF_F_HIGHDMA;
3081 
3082  netdev->mem_start = bnad->mmio_start;
3083  netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3084 
3085  netdev->netdev_ops = &bnad_netdev_ops;
3086  bnad_set_ethtool_ops(netdev);
3087 }
3088 
3089 /*
3090  * 1. Initialize the bnad structure
3091  * 2. Setup netdev pointer in pci_dev
3092  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3093  * 4. Initialize work queue.
3094  */
3095 static int
3096 bnad_init(struct bnad *bnad,
3097  struct pci_dev *pdev, struct net_device *netdev)
3098 {
3099  unsigned long flags;
3100 
3101  SET_NETDEV_DEV(netdev, &pdev->dev);
3102  pci_set_drvdata(pdev, netdev);
3103 
3104  bnad->netdev = netdev;
3105  bnad->pcidev = pdev;
3106  bnad->mmio_start = pci_resource_start(pdev, 0);
3107  bnad->mmio_len = pci_resource_len(pdev, 0);
3108  bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3109  if (!bnad->bar0) {
3110  dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3111  pci_set_drvdata(pdev, NULL);
3112  return -ENOMEM;
3113  }
3114  pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3115  (unsigned long long) bnad->mmio_len);
3116 
3117  spin_lock_irqsave(&bnad->bna_lock, flags);
3118  if (!bnad_msix_disable)
3119  bnad->cfg_flags = BNAD_CF_MSIX;
3120 
3121  bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3122 
3123  bnad_q_num_init(bnad);
3124  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3125 
3126  bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3127  (bnad->num_rx * bnad->num_rxp_per_rx) +
3129 
3130  bnad->txq_depth = BNAD_TXQ_DEPTH;
3131  bnad->rxq_depth = BNAD_RXQ_DEPTH;
3132 
3135 
3136  sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3138 
3139  if (!bnad->work_q)
3140  return -ENOMEM;
3141 
3142  return 0;
3143 }
3144 
3145 /*
3146  * Must be called after bnad_pci_uninit()
3147  * so that iounmap() and pci_set_drvdata(NULL)
3148  * happens only after PCI uninitialization.
3149  */
3150 static void
3151 bnad_uninit(struct bnad *bnad)
3152 {
3153  if (bnad->work_q) {
3154  flush_workqueue(bnad->work_q);
3155  destroy_workqueue(bnad->work_q);
3156  bnad->work_q = NULL;
3157  }
3158 
3159  if (bnad->bar0)
3160  iounmap(bnad->bar0);
3161  pci_set_drvdata(bnad->pcidev, NULL);
3162 }
3163 
3164 /*
3165  * Initialize locks
3166  a) Per ioceth mutes used for serializing configuration
3167  changes from OS interface
3168  b) spin lock used to protect bna state machine
3169  */
3170 static void
3171 bnad_lock_init(struct bnad *bnad)
3172 {
3173  spin_lock_init(&bnad->bna_lock);
3174  mutex_init(&bnad->conf_mutex);
3175  mutex_init(&bnad_list_mutex);
3176 }
3177 
3178 static void
3179 bnad_lock_uninit(struct bnad *bnad)
3180 {
3181  mutex_destroy(&bnad->conf_mutex);
3182  mutex_destroy(&bnad_list_mutex);
3183 }
3184 
3185 /* PCI Initialization */
3186 static int
3187 bnad_pci_init(struct bnad *bnad,
3188  struct pci_dev *pdev, bool *using_dac)
3189 {
3190  int err;
3191 
3192  err = pci_enable_device(pdev);
3193  if (err)
3194  return err;
3195  err = pci_request_regions(pdev, BNAD_NAME);
3196  if (err)
3197  goto disable_device;
3198  if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3199  !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3200  *using_dac = true;
3201  } else {
3202  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3203  if (err) {
3204  err = dma_set_coherent_mask(&pdev->dev,
3205  DMA_BIT_MASK(32));
3206  if (err)
3207  goto release_regions;
3208  }
3209  *using_dac = false;
3210  }
3211  pci_set_master(pdev);
3212  return 0;
3213 
3214 release_regions:
3215  pci_release_regions(pdev);
3216 disable_device:
3217  pci_disable_device(pdev);
3218 
3219  return err;
3220 }
3221 
3222 static void
3223 bnad_pci_uninit(struct pci_dev *pdev)
3224 {
3225  pci_release_regions(pdev);
3226  pci_disable_device(pdev);
3227 }
3228 
3229 static int __devinit
3230 bnad_pci_probe(struct pci_dev *pdev,
3231  const struct pci_device_id *pcidev_id)
3232 {
3233  bool using_dac;
3234  int err;
3235  struct bnad *bnad;
3236  struct bna *bna;
3237  struct net_device *netdev;
3238  struct bfa_pcidev pcidev_info;
3239  unsigned long flags;
3240 
3241  pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3242  pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3243 
3244  mutex_lock(&bnad_fwimg_mutex);
3245  if (!cna_get_firmware_buf(pdev)) {
3246  mutex_unlock(&bnad_fwimg_mutex);
3247  pr_warn("Failed to load Firmware Image!\n");
3248  return -ENODEV;
3249  }
3250  mutex_unlock(&bnad_fwimg_mutex);
3251 
3252  /*
3253  * Allocates sizeof(struct net_device + struct bnad)
3254  * bnad = netdev->priv
3255  */
3256  netdev = alloc_etherdev(sizeof(struct bnad));
3257  if (!netdev) {
3258  err = -ENOMEM;
3259  return err;
3260  }
3261  bnad = netdev_priv(netdev);
3262  bnad_lock_init(bnad);
3263  bnad_add_to_list(bnad);
3264 
3265  mutex_lock(&bnad->conf_mutex);
3266  /*
3267  * PCI initialization
3268  * Output : using_dac = 1 for 64 bit DMA
3269  * = 0 for 32 bit DMA
3270  */
3271  using_dac = false;
3272  err = bnad_pci_init(bnad, pdev, &using_dac);
3273  if (err)
3274  goto unlock_mutex;
3275 
3276  /*
3277  * Initialize bnad structure
3278  * Setup relation between pci_dev & netdev
3279  */
3280  err = bnad_init(bnad, pdev, netdev);
3281  if (err)
3282  goto pci_uninit;
3283 
3284  /* Initialize netdev structure, set up ethtool ops */
3285  bnad_netdev_init(bnad, using_dac);
3286 
3287  /* Set link to down state */
3288  netif_carrier_off(netdev);
3289 
3290  /* Setup the debugfs node for this bfad */
3291  if (bna_debugfs_enable)
3292  bnad_debugfs_init(bnad);
3293 
3294  /* Get resource requirement form bna */
3295  spin_lock_irqsave(&bnad->bna_lock, flags);
3296  bna_res_req(&bnad->res_info[0]);
3297  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3298 
3299  /* Allocate resources from bna */
3300  err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3301  if (err)
3302  goto drv_uninit;
3303 
3304  bna = &bnad->bna;
3305 
3306  /* Setup pcidev_info for bna_init() */
3307  pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3308  pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3309  pcidev_info.device_id = bnad->pcidev->device;
3310  pcidev_info.pci_bar_kva = bnad->bar0;
3311 
3312  spin_lock_irqsave(&bnad->bna_lock, flags);
3313  bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3314  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3315 
3316  bnad->stats.bna_stats = &bna->stats;
3317 
3318  bnad_enable_msix(bnad);
3319  err = bnad_mbox_irq_alloc(bnad);
3320  if (err)
3321  goto res_free;
3322 
3323 
3324  /* Set up timers */
3325  setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3326  ((unsigned long)bnad));
3327  setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3328  ((unsigned long)bnad));
3329  setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3330  ((unsigned long)bnad));
3331  setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3332  ((unsigned long)bnad));
3333 
3334  /* Now start the timer before calling IOC */
3335  mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3337 
3338  /*
3339  * Start the chip
3340  * If the call back comes with error, we bail out.
3341  * This is a catastrophic error.
3342  */
3343  err = bnad_ioceth_enable(bnad);
3344  if (err) {
3345  pr_err("BNA: Initialization failed err=%d\n",
3346  err);
3347  goto probe_success;
3348  }
3349 
3350  spin_lock_irqsave(&bnad->bna_lock, flags);
3351  if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3352  bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3353  bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3354  bna_attr(bna)->num_rxp - 1);
3355  if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3356  bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3357  err = -EIO;
3358  }
3359  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3360  if (err)
3361  goto disable_ioceth;
3362 
3363  spin_lock_irqsave(&bnad->bna_lock, flags);
3364  bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3365  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3366 
3367  err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3368  if (err) {
3369  err = -EIO;
3370  goto disable_ioceth;
3371  }
3372 
3373  spin_lock_irqsave(&bnad->bna_lock, flags);
3374  bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3375  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3376 
3377  /* Get the burnt-in mac */
3378  spin_lock_irqsave(&bnad->bna_lock, flags);
3379  bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3380  bnad_set_netdev_perm_addr(bnad);
3381  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3382 
3383  mutex_unlock(&bnad->conf_mutex);
3384 
3385  /* Finally, reguister with net_device layer */
3386  err = register_netdev(netdev);
3387  if (err) {
3388  pr_err("BNA : Registering with netdev failed\n");
3389  goto probe_uninit;
3390  }
3392 
3393  return 0;
3394 
3395 probe_success:
3396  mutex_unlock(&bnad->conf_mutex);
3397  return 0;
3398 
3399 probe_uninit:
3400  mutex_lock(&bnad->conf_mutex);
3401  bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3402 disable_ioceth:
3403  bnad_ioceth_disable(bnad);
3404  del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3405  del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3406  del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3407  spin_lock_irqsave(&bnad->bna_lock, flags);
3408  bna_uninit(bna);
3409  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3410  bnad_mbox_irq_free(bnad);
3411  bnad_disable_msix(bnad);
3412 res_free:
3413  bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3414 drv_uninit:
3415  /* Remove the debugfs node for this bnad */
3416  kfree(bnad->regdata);
3417  bnad_debugfs_uninit(bnad);
3418  bnad_uninit(bnad);
3419 pci_uninit:
3420  bnad_pci_uninit(pdev);
3421 unlock_mutex:
3422  mutex_unlock(&bnad->conf_mutex);
3423  bnad_remove_from_list(bnad);
3424  bnad_lock_uninit(bnad);
3425  free_netdev(netdev);
3426  return err;
3427 }
3428 
3429 static void __devexit
3430 bnad_pci_remove(struct pci_dev *pdev)
3431 {
3432  struct net_device *netdev = pci_get_drvdata(pdev);
3433  struct bnad *bnad;
3434  struct bna *bna;
3435  unsigned long flags;
3436 
3437  if (!netdev)
3438  return;
3439 
3440  pr_info("%s bnad_pci_remove\n", netdev->name);
3441  bnad = netdev_priv(netdev);
3442  bna = &bnad->bna;
3443 
3445  unregister_netdev(netdev);
3446 
3447  mutex_lock(&bnad->conf_mutex);
3448  bnad_ioceth_disable(bnad);
3449  del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3450  del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3451  del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3452  spin_lock_irqsave(&bnad->bna_lock, flags);
3453  bna_uninit(bna);
3454  spin_unlock_irqrestore(&bnad->bna_lock, flags);
3455 
3456  bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3457  bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3458  bnad_mbox_irq_free(bnad);
3459  bnad_disable_msix(bnad);
3460  bnad_pci_uninit(pdev);
3461  mutex_unlock(&bnad->conf_mutex);
3462  bnad_remove_from_list(bnad);
3463  bnad_lock_uninit(bnad);
3464  /* Remove the debugfs node for this bnad */
3465  kfree(bnad->regdata);
3466  bnad_debugfs_uninit(bnad);
3467  bnad_uninit(bnad);
3468  free_netdev(netdev);
3469 }
3470 
3471 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3472  {
3475  .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3476  .class_mask = 0xffff00
3477  },
3478  {
3481  .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3482  .class_mask = 0xffff00
3483  },
3484  {0, },
3485 };
3486 
3487 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3488 
3489 static struct pci_driver bnad_pci_driver = {
3490  .name = BNAD_NAME,
3491  .id_table = bnad_pci_id_table,
3492  .probe = bnad_pci_probe,
3493  .remove = __devexit_p(bnad_pci_remove),
3494 };
3495 
3496 static int __init
3497 bnad_module_init(void)
3498 {
3499  int err;
3500 
3501  pr_info("Brocade 10G Ethernet driver - version: %s\n",
3502  BNAD_VERSION);
3503 
3504  bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3505 
3506  err = pci_register_driver(&bnad_pci_driver);
3507  if (err < 0) {
3508  pr_err("bna : PCI registration failed in module init "
3509  "(%d)\n", err);
3510  return err;
3511  }
3512 
3513  return 0;
3514 }
3515 
3516 static void __exit
3517 bnad_module_exit(void)
3518 {
3519  pci_unregister_driver(&bnad_pci_driver);
3521 }
3522 
3523 module_init(bnad_module_init);
3524 module_exit(bnad_module_exit);
3525 
3526 MODULE_AUTHOR("Brocade");
3527 MODULE_LICENSE("GPL");
3528 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");