Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bna_tx_rx.c
Go to the documentation of this file.
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19 #include "bfi.h"
20 
21 /* IB */
22 static void
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
24 {
25  ib->coalescing_timeo = coalescing_timeo;
26  ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27  (u32)ib->coalescing_timeo, 0);
28 }
29 
30 /* RXF */
31 
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
33 do { \
34  (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35  (rxf)->vlan_strip_pending = true; \
36 } while (0)
37 
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
39 do { \
40  if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41  (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42  BNA_RSS_F_CFG_PENDING | \
43  BNA_RSS_F_STATUS_PENDING); \
44 } while (0)
45 
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54  enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56  enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58  enum bna_cleanup_type cleanup);
59 
61  enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
63  enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
65  enum bna_rxf_event);
67  enum bna_rxf_event);
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
69  enum bna_rxf_event);
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
71  enum bna_rxf_event);
72 
73 static void
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
75 {
76  call_rxf_stop_cbfn(rxf);
77 }
78 
79 static void
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
81 {
82  switch (event) {
83  case RXF_E_START:
84  if (rxf->flags & BNA_RXF_F_PAUSED) {
85  bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
87  } else
88  bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
89  break;
90 
91  case RXF_E_STOP:
92  call_rxf_stop_cbfn(rxf);
93  break;
94 
95  case RXF_E_FAIL:
96  /* No-op */
97  break;
98 
99  case RXF_E_CONFIG:
101  break;
102 
103  case RXF_E_PAUSE:
104  rxf->flags |= BNA_RXF_F_PAUSED;
105  call_rxf_pause_cbfn(rxf);
106  break;
107 
108  case RXF_E_RESUME:
109  rxf->flags &= ~BNA_RXF_F_PAUSED;
111  break;
112 
113  default:
114  bfa_sm_fault(event);
115  }
116 }
117 
118 static void
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
120 {
121  call_rxf_pause_cbfn(rxf);
122 }
123 
124 static void
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
126 {
127  switch (event) {
128  case RXF_E_STOP:
129  case RXF_E_FAIL:
130  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
131  break;
132 
133  case RXF_E_CONFIG:
135  break;
136 
137  case RXF_E_RESUME:
138  rxf->flags &= ~BNA_RXF_F_PAUSED;
139  bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
140  break;
141 
142  default:
143  bfa_sm_fault(event);
144  }
145 }
146 
147 static void
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
149 {
150  if (!bna_rxf_cfg_apply(rxf)) {
151  /* No more pending config updates */
152  bfa_fsm_set_state(rxf, bna_rxf_sm_started);
153  }
154 }
155 
156 static void
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
158 {
159  switch (event) {
160  case RXF_E_STOP:
161  bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
162  break;
163 
164  case RXF_E_FAIL:
165  bna_rxf_cfg_reset(rxf);
166  call_rxf_start_cbfn(rxf);
169  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
170  break;
171 
172  case RXF_E_CONFIG:
173  /* No-op */
174  break;
175 
176  case RXF_E_PAUSE:
177  rxf->flags |= BNA_RXF_F_PAUSED;
178  call_rxf_start_cbfn(rxf);
179  bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
180  break;
181 
182  case RXF_E_FW_RESP:
183  if (!bna_rxf_cfg_apply(rxf)) {
184  /* No more pending config updates */
185  bfa_fsm_set_state(rxf, bna_rxf_sm_started);
186  }
187  break;
188 
189  default:
190  bfa_sm_fault(event);
191  }
192 }
193 
194 static void
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
196 {
197  call_rxf_start_cbfn(rxf);
200 }
201 
202 static void
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
204 {
205  switch (event) {
206  case RXF_E_STOP:
207  case RXF_E_FAIL:
208  bna_rxf_cfg_reset(rxf);
209  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
210  break;
211 
212  case RXF_E_CONFIG:
213  bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
214  break;
215 
216  case RXF_E_PAUSE:
217  rxf->flags |= BNA_RXF_F_PAUSED;
218  if (!bna_rxf_fltr_clear(rxf))
219  bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
220  else
221  bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
222  break;
223 
224  default:
225  bfa_sm_fault(event);
226  }
227 }
228 
229 static void
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
231 {
232 }
233 
234 static void
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
236 {
237  switch (event) {
238  case RXF_E_FAIL:
239  bna_rxf_cfg_reset(rxf);
240  call_rxf_pause_cbfn(rxf);
241  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
242  break;
243 
244  case RXF_E_FW_RESP:
245  if (!bna_rxf_fltr_clear(rxf)) {
246  /* No more pending CAM entries to clear */
247  bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
248  }
249  break;
250 
251  default:
252  bfa_sm_fault(event);
253  }
254 }
255 
256 static void
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
258 {
259 }
260 
261 static void
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
263 {
264  switch (event) {
265  case RXF_E_FAIL:
266  case RXF_E_FW_RESP:
267  bna_rxf_cfg_reset(rxf);
268  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
269  break;
270 
271  default:
272  bfa_sm_fault(event);
273  }
274 }
275 
276 static void
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278  enum bfi_enet_h2i_msgs req_type)
279 {
281 
282  bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283  req->mh.num_entries = htons(
285  memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
287  sizeof(struct bfi_enet_ucast_req), &req->mh);
288  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
289 }
290 
291 static void
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
293 {
294  struct bfi_enet_mcast_add_req *req =
296 
298  0, rxf->rx->rid);
299  req->mh.num_entries = htons(
301  memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
303  sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
305 }
306 
307 static void
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
309 {
310  struct bfi_enet_mcast_del_req *req =
312 
314  0, rxf->rx->rid);
315  req->mh.num_entries = htons(
317  req->handle = htons(handle);
319  sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
321 }
322 
323 static void
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
325 {
326  struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
327 
329  BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330  req->mh.num_entries = htons(
332  req->enable = status;
334  sizeof(struct bfi_enet_enable_req), &req->mh);
335  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
336 }
337 
338 static void
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
340 {
341  struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
342 
344  BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345  req->mh.num_entries = htons(
347  req->enable = status;
349  sizeof(struct bfi_enet_enable_req), &req->mh);
350  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
351 }
352 
353 static void
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
355 {
356  struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
357  int i;
358  int j;
359 
361  BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362  req->mh.num_entries = htons(
364  req->block_idx = block_idx;
365  for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366  j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
368  req->bit_mask[i] =
369  htonl(rxf->vlan_filter_table[j]);
370  else
371  req->bit_mask[i] = 0xFFFFFFFF;
372  }
374  sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
376 }
377 
378 static void
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
380 {
381  struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
382 
385  req->mh.num_entries = htons(
387  req->enable = rxf->vlan_strip_status;
389  sizeof(struct bfi_enet_enable_req), &req->mh);
390  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
391 }
392 
393 static void
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
395 {
396  struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
397 
399  BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400  req->mh.num_entries = htons(
402  req->size = htons(rxf->rit_size);
403  memcpy(&req->table[0], rxf->rit, rxf->rit_size);
405  sizeof(struct bfi_enet_rit_req), &req->mh);
406  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
407 }
408 
409 static void
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
411 {
412  struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
413  int i;
414 
416  BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417  req->mh.num_entries = htons(
419  req->cfg.type = rxf->rss_cfg.hash_type;
420  req->cfg.mask = rxf->rss_cfg.hash_mask;
421  for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
422  req->cfg.key[i] =
423  htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
425  sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
427 }
428 
429 static void
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
431 {
432  struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
433 
435  BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436  req->mh.num_entries = htons(
438  req->enable = rxf->rss_status;
440  sizeof(struct bfi_enet_enable_req), &req->mh);
441  bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
442 }
443 
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
447 {
448  struct bna_mac *mac;
449  struct list_head *qe;
450 
451  list_for_each(qe, &rxf->mcast_active_q) {
452  mac = (struct bna_mac *)qe;
453  if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
454  return mac;
455  }
456 
458  mac = (struct bna_mac *)qe;
459  if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
460  return mac;
461  }
462 
463  return NULL;
464 }
465 
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
468 {
469  struct bna_mcam_handle *mchandle;
470  struct list_head *qe;
471 
472  list_for_each(qe, &rxf->mcast_handle_q) {
473  mchandle = (struct bna_mcam_handle *)qe;
474  if (mchandle->handle == handle)
475  return mchandle;
476  }
477 
478  return NULL;
479 }
480 
481 static void
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
483 {
484  struct bna_mac *mcmac;
485  struct bna_mcam_handle *mchandle;
486 
487  mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488  mchandle = bna_rxf_mchandle_get(rxf, handle);
489  if (mchandle == NULL) {
490  mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491  mchandle->handle = handle;
492  mchandle->refcnt = 0;
493  list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
494  }
495  mchandle->refcnt++;
496  mcmac->handle = mchandle;
497 }
498 
499 static int
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501  enum bna_cleanup_type cleanup)
502 {
503  struct bna_mcam_handle *mchandle;
504  int ret = 0;
505 
506  mchandle = mac->handle;
507  if (mchandle == NULL)
508  return ret;
509 
510  mchandle->refcnt--;
511  if (mchandle->refcnt == 0) {
512  if (cleanup == BNA_HARD_CLEANUP) {
513  bna_bfi_mcast_del_req(rxf, mchandle->handle);
514  ret = 1;
515  }
516  list_del(&mchandle->qe);
517  bfa_q_qe_init(&mchandle->qe);
518  bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
519  }
520  mac->handle = NULL;
521 
522  return ret;
523 }
524 
525 static int
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
527 {
528  struct bna_mac *mac = NULL;
529  struct list_head *qe;
530  int ret;
531 
532  /* Delete multicast entries previousely added */
533  while (!list_empty(&rxf->mcast_pending_del_q)) {
534  bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
535  bfa_q_qe_init(qe);
536  mac = (struct bna_mac *)qe;
537  ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538  bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
539  if (ret)
540  return ret;
541  }
542 
543  /* Add multicast entries */
544  if (!list_empty(&rxf->mcast_pending_add_q)) {
545  bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
546  bfa_q_qe_init(qe);
547  mac = (struct bna_mac *)qe;
548  list_add_tail(&mac->qe, &rxf->mcast_active_q);
549  bna_bfi_mcast_add_req(rxf, mac);
550  return 1;
551  }
552 
553  return 0;
554 }
555 
556 static int
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
558 {
559  u8 vlan_pending_bitmask;
560  int block_idx = 0;
561 
562  if (rxf->vlan_pending_bitmask) {
563  vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564  while (!(vlan_pending_bitmask & 0x1)) {
565  block_idx++;
566  vlan_pending_bitmask >>= 1;
567  }
568  rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569  bna_bfi_rx_vlan_filter_set(rxf, block_idx);
570  return 1;
571  }
572 
573  return 0;
574 }
575 
576 static int
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
578 {
579  struct list_head *qe;
580  struct bna_mac *mac;
581  int ret;
582 
583  /* Throw away delete pending mcast entries */
584  while (!list_empty(&rxf->mcast_pending_del_q)) {
585  bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
586  bfa_q_qe_init(qe);
587  mac = (struct bna_mac *)qe;
588  ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589  bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
590  if (ret)
591  return ret;
592  }
593 
594  /* Move active mcast entries to pending_add_q */
595  while (!list_empty(&rxf->mcast_active_q)) {
596  bfa_q_deq(&rxf->mcast_active_q, &qe);
597  bfa_q_qe_init(qe);
599  mac = (struct bna_mac *)qe;
600  if (bna_rxf_mcast_del(rxf, mac, cleanup))
601  return 1;
602  }
603 
604  return 0;
605 }
606 
607 static int
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
609 {
610  if (rxf->rss_pending) {
611  if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
613  bna_bfi_rit_cfg(rxf);
614  return 1;
615  }
616 
617  if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
619  bna_bfi_rss_cfg(rxf);
620  return 1;
621  }
622 
625  bna_bfi_rss_enable(rxf);
626  return 1;
627  }
628  }
629 
630  return 0;
631 }
632 
633 static int
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
635 {
636  if (bna_rxf_ucast_cfg_apply(rxf))
637  return 1;
638 
639  if (bna_rxf_mcast_cfg_apply(rxf))
640  return 1;
641 
642  if (bna_rxf_promisc_cfg_apply(rxf))
643  return 1;
644 
645  if (bna_rxf_allmulti_cfg_apply(rxf))
646  return 1;
647 
648  if (bna_rxf_vlan_cfg_apply(rxf))
649  return 1;
650 
651  if (bna_rxf_vlan_strip_cfg_apply(rxf))
652  return 1;
653 
654  if (bna_rxf_rss_cfg_apply(rxf))
655  return 1;
656 
657  return 0;
658 }
659 
660 /* Only software reset */
661 static int
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
663 {
664  if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
665  return 1;
666 
667  if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668  return 1;
669 
670  if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
671  return 1;
672 
673  if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
674  return 1;
675 
676  return 0;
677 }
678 
679 static void
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
681 {
682  bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683  bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684  bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685  bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
688 }
689 
690 static void
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
692 {
693  struct bna_rx *rx = rxf->rx;
694  struct bna_rxp *rxp;
695  struct list_head *qe;
696  int offset = 0;
697 
698  rxf->rit_size = rit_size;
699  list_for_each(qe, &rx->rxp_q) {
700  rxp = (struct bna_rxp *)qe;
701  rxf->rit[offset] = rxp->cq.ccb->id;
702  offset++;
703  }
704 
705 }
706 
707 void
709 {
711 }
712 
713 void
715  struct bfi_msgq_mhdr *msghdr)
716 {
717  struct bfi_enet_mcast_add_req *req =
719  struct bfi_enet_mcast_add_rsp *rsp =
720  (struct bfi_enet_mcast_add_rsp *)msghdr;
721 
722  bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
723  ntohs(rsp->handle));
725 }
726 
727 static void
728 bna_rxf_init(struct bna_rxf *rxf,
729  struct bna_rx *rx,
730  struct bna_rx_config *q_config,
731  struct bna_res_info *res_info)
732 {
733  rxf->rx = rx;
734 
735  INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
736  INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
737  rxf->ucast_pending_set = 0;
738  rxf->ucast_active_set = 0;
739  INIT_LIST_HEAD(&rxf->ucast_active_q);
740  rxf->ucast_pending_mac = NULL;
741 
742  INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
743  INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
744  INIT_LIST_HEAD(&rxf->mcast_active_q);
745  INIT_LIST_HEAD(&rxf->mcast_handle_q);
746 
747  if (q_config->paused)
748  rxf->flags |= BNA_RXF_F_PAUSED;
749 
750  rxf->rit = (u8 *)
751  res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
752  bna_rit_init(rxf, q_config->num_paths);
753 
754  rxf->rss_status = q_config->rss_status;
755  if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
756  rxf->rss_cfg = q_config->rss_config;
760  }
761 
763  memset(rxf->vlan_filter_table, 0,
764  (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
765  rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
767 
768  rxf->vlan_strip_status = q_config->vlan_strip_status;
769 
770  bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
771 }
772 
773 static void
774 bna_rxf_uninit(struct bna_rxf *rxf)
775 {
776  struct bna_mac *mac;
777 
778  rxf->ucast_pending_set = 0;
779  rxf->ucast_active_set = 0;
780 
781  while (!list_empty(&rxf->ucast_pending_add_q)) {
782  bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
783  bfa_q_qe_init(&mac->qe);
784  bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
785  }
786 
787  if (rxf->ucast_pending_mac) {
788  bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
789  bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
790  rxf->ucast_pending_mac);
791  rxf->ucast_pending_mac = NULL;
792  }
793 
794  while (!list_empty(&rxf->mcast_pending_add_q)) {
795  bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
796  bfa_q_qe_init(&mac->qe);
797  bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
798  }
799 
800  rxf->rxmode_pending = 0;
801  rxf->rxmode_pending_bitmask = 0;
802  if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
803  rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
804  if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
805  rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
806 
807  rxf->rss_pending = 0;
808  rxf->vlan_strip_pending = false;
809 
810  rxf->flags = 0;
811 
812  rxf->rx = NULL;
813 }
814 
815 static void
816 bna_rx_cb_rxf_started(struct bna_rx *rx)
817 {
819 }
820 
821 static void
822 bna_rxf_start(struct bna_rxf *rxf)
823 {
824  rxf->start_cbfn = bna_rx_cb_rxf_started;
825  rxf->start_cbarg = rxf->rx;
827 }
828 
829 static void
830 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
831 {
833 }
834 
835 static void
836 bna_rxf_stop(struct bna_rxf *rxf)
837 {
838  rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
839  rxf->stop_cbarg = rxf->rx;
841 }
842 
843 static void
844 bna_rxf_fail(struct bna_rxf *rxf)
845 {
847 }
848 
849 enum bna_cb_status
850 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
851  void (*cbfn)(struct bnad *, struct bna_rx *))
852 {
853  struct bna_rxf *rxf = &rx->rxf;
854 
855  if (rxf->ucast_pending_mac == NULL) {
856  rxf->ucast_pending_mac =
857  bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
858  if (rxf->ucast_pending_mac == NULL)
859  return BNA_CB_UCAST_CAM_FULL;
860  bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
861  }
862 
863  memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
864  rxf->ucast_pending_set = 1;
865  rxf->cam_fltr_cbfn = cbfn;
866  rxf->cam_fltr_cbarg = rx->bna->bnad;
867 
869 
870  return BNA_CB_SUCCESS;
871 }
872 
873 enum bna_cb_status
875  void (*cbfn)(struct bnad *, struct bna_rx *))
876 {
877  struct bna_rxf *rxf = &rx->rxf;
878  struct bna_mac *mac;
879 
880  /* Check if already added or pending addition */
881  if (bna_mac_find(&rxf->mcast_active_q, addr) ||
882  bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
883  if (cbfn)
884  cbfn(rx->bna->bnad, rx);
885  return BNA_CB_SUCCESS;
886  }
887 
888  mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
889  if (mac == NULL)
890  return BNA_CB_MCAST_LIST_FULL;
891  bfa_q_qe_init(&mac->qe);
892  memcpy(mac->addr, addr, ETH_ALEN);
893  list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
894 
895  rxf->cam_fltr_cbfn = cbfn;
896  rxf->cam_fltr_cbarg = rx->bna->bnad;
897 
899 
900  return BNA_CB_SUCCESS;
901 }
902 
903 enum bna_cb_status
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
905  void (*cbfn)(struct bnad *, struct bna_rx *))
906 {
907  struct bna_rxf *rxf = &rx->rxf;
908  struct list_head list_head;
909  struct list_head *qe;
910  u8 *mcaddr;
911  struct bna_mac *mac;
912  int i;
913 
914  /* Allocate nodes */
915  INIT_LIST_HEAD(&list_head);
916  for (i = 0, mcaddr = mclist; i < count; i++) {
917  mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
918  if (mac == NULL)
919  goto err_return;
920  bfa_q_qe_init(&mac->qe);
921  memcpy(mac->addr, mcaddr, ETH_ALEN);
922  list_add_tail(&mac->qe, &list_head);
923 
924  mcaddr += ETH_ALEN;
925  }
926 
927  /* Purge the pending_add_q */
928  while (!list_empty(&rxf->mcast_pending_add_q)) {
929  bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
930  bfa_q_qe_init(qe);
931  mac = (struct bna_mac *)qe;
932  bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
933  }
934 
935  /* Schedule active_q entries for deletion */
936  while (!list_empty(&rxf->mcast_active_q)) {
937  bfa_q_deq(&rxf->mcast_active_q, &qe);
938  mac = (struct bna_mac *)qe;
939  bfa_q_qe_init(&mac->qe);
940  list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
941  }
942 
943  /* Add the new entries */
944  while (!list_empty(&list_head)) {
945  bfa_q_deq(&list_head, &qe);
946  mac = (struct bna_mac *)qe;
947  bfa_q_qe_init(&mac->qe);
948  list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
949  }
950 
951  rxf->cam_fltr_cbfn = cbfn;
952  rxf->cam_fltr_cbarg = rx->bna->bnad;
954 
955  return BNA_CB_SUCCESS;
956 
957 err_return:
958  while (!list_empty(&list_head)) {
959  bfa_q_deq(&list_head, &qe);
960  mac = (struct bna_mac *)qe;
961  bfa_q_qe_init(&mac->qe);
962  bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
963  }
964 
965  return BNA_CB_MCAST_LIST_FULL;
966 }
967 
968 void
970 {
971  struct bna_rxf *rxf = &rx->rxf;
972  int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
973  int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
974  int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
975 
976  rxf->vlan_filter_table[index] |= bit;
978  rxf->vlan_pending_bitmask |= (1 << group_id);
980  }
981 }
982 
983 void
985 {
986  struct bna_rxf *rxf = &rx->rxf;
987  int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988  int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989  int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
990 
991  rxf->vlan_filter_table[index] &= ~bit;
993  rxf->vlan_pending_bitmask |= (1 << group_id);
995  }
996 }
997 
998 static int
999 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1000 {
1001  struct bna_mac *mac = NULL;
1002  struct list_head *qe;
1003 
1004  /* Delete MAC addresses previousely added */
1005  if (!list_empty(&rxf->ucast_pending_del_q)) {
1006  bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1007  bfa_q_qe_init(qe);
1008  mac = (struct bna_mac *)qe;
1009  bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1010  bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1011  return 1;
1012  }
1013 
1014  /* Set default unicast MAC */
1015  if (rxf->ucast_pending_set) {
1016  rxf->ucast_pending_set = 0;
1017  memcpy(rxf->ucast_active_mac.addr,
1018  rxf->ucast_pending_mac->addr, ETH_ALEN);
1019  rxf->ucast_active_set = 1;
1020  bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1022  return 1;
1023  }
1024 
1025  /* Add additional MAC entries */
1026  if (!list_empty(&rxf->ucast_pending_add_q)) {
1027  bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1028  bfa_q_qe_init(qe);
1029  mac = (struct bna_mac *)qe;
1030  list_add_tail(&mac->qe, &rxf->ucast_active_q);
1031  bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1032  return 1;
1033  }
1034 
1035  return 0;
1036 }
1037 
1038 static int
1039 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1040 {
1041  struct list_head *qe;
1042  struct bna_mac *mac;
1043 
1044  /* Throw away delete pending ucast entries */
1045  while (!list_empty(&rxf->ucast_pending_del_q)) {
1046  bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1047  bfa_q_qe_init(qe);
1048  mac = (struct bna_mac *)qe;
1049  if (cleanup == BNA_SOFT_CLEANUP)
1050  bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1051  else {
1052  bna_bfi_ucast_req(rxf, mac,
1054  bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1055  return 1;
1056  }
1057  }
1058 
1059  /* Move active ucast entries to pending_add_q */
1060  while (!list_empty(&rxf->ucast_active_q)) {
1061  bfa_q_deq(&rxf->ucast_active_q, &qe);
1062  bfa_q_qe_init(qe);
1064  if (cleanup == BNA_HARD_CLEANUP) {
1065  mac = (struct bna_mac *)qe;
1066  bna_bfi_ucast_req(rxf, mac,
1068  return 1;
1069  }
1070  }
1071 
1072  if (rxf->ucast_active_set) {
1073  rxf->ucast_pending_set = 1;
1074  rxf->ucast_active_set = 0;
1075  if (cleanup == BNA_HARD_CLEANUP) {
1076  bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1078  return 1;
1079  }
1080  }
1081 
1082  return 0;
1083 }
1084 
1085 static int
1086 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1087 {
1088  struct bna *bna = rxf->rx->bna;
1089 
1090  /* Enable/disable promiscuous mode */
1092  rxf->rxmode_pending_bitmask)) {
1093  /* move promisc configuration from pending -> active */
1095  rxf->rxmode_pending_bitmask);
1097  bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1098  return 1;
1099  } else if (is_promisc_disable(rxf->rxmode_pending,
1100  rxf->rxmode_pending_bitmask)) {
1101  /* move promisc configuration from pending -> active */
1103  rxf->rxmode_pending_bitmask);
1106  bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1107  return 1;
1108  }
1109 
1110  return 0;
1111 }
1112 
1113 static int
1114 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1115 {
1116  struct bna *bna = rxf->rx->bna;
1117 
1118  /* Clear pending promisc mode disable */
1120  rxf->rxmode_pending_bitmask)) {
1122  rxf->rxmode_pending_bitmask);
1125  if (cleanup == BNA_HARD_CLEANUP) {
1126  bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1127  return 1;
1128  }
1129  }
1130 
1131  /* Move promisc mode config from active -> pending */
1132  if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1134  rxf->rxmode_pending_bitmask);
1136  if (cleanup == BNA_HARD_CLEANUP) {
1137  bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1138  return 1;
1139  }
1140  }
1141 
1142  return 0;
1143 }
1144 
1145 static int
1146 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1147 {
1148  /* Enable/disable allmulti mode */
1150  rxf->rxmode_pending_bitmask)) {
1151  /* move allmulti configuration from pending -> active */
1153  rxf->rxmode_pending_bitmask);
1155  bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1156  return 1;
1157  } else if (is_allmulti_disable(rxf->rxmode_pending,
1158  rxf->rxmode_pending_bitmask)) {
1159  /* move allmulti configuration from pending -> active */
1161  rxf->rxmode_pending_bitmask);
1163  bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1164  return 1;
1165  }
1166 
1167  return 0;
1168 }
1169 
1170 static int
1171 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1172 {
1173  /* Clear pending allmulti mode disable */
1175  rxf->rxmode_pending_bitmask)) {
1177  rxf->rxmode_pending_bitmask);
1179  if (cleanup == BNA_HARD_CLEANUP) {
1180  bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1181  return 1;
1182  }
1183  }
1184 
1185  /* Move allmulti mode config from active -> pending */
1186  if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1188  rxf->rxmode_pending_bitmask);
1190  if (cleanup == BNA_HARD_CLEANUP) {
1191  bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1192  return 1;
1193  }
1194  }
1195 
1196  return 0;
1197 }
1198 
1199 static int
1200 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1201 {
1202  struct bna *bna = rxf->rx->bna;
1203  int ret = 0;
1204 
1206  rxf->rxmode_pending_bitmask) ||
1207  (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1208  /* Do nothing if pending enable or already enabled */
1209  } else if (is_promisc_disable(rxf->rxmode_pending,
1210  rxf->rxmode_pending_bitmask)) {
1211  /* Turn off pending disable command */
1213  rxf->rxmode_pending_bitmask);
1214  } else {
1215  /* Schedule enable */
1217  rxf->rxmode_pending_bitmask);
1218  bna->promisc_rid = rxf->rx->rid;
1219  ret = 1;
1220  }
1221 
1222  return ret;
1223 }
1224 
1225 static int
1226 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1227 {
1228  struct bna *bna = rxf->rx->bna;
1229  int ret = 0;
1230 
1232  rxf->rxmode_pending_bitmask) ||
1233  (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1234  /* Do nothing if pending disable or already disabled */
1235  } else if (is_promisc_enable(rxf->rxmode_pending,
1236  rxf->rxmode_pending_bitmask)) {
1237  /* Turn off pending enable command */
1239  rxf->rxmode_pending_bitmask);
1241  } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1242  /* Schedule disable */
1244  rxf->rxmode_pending_bitmask);
1245  ret = 1;
1246  }
1247 
1248  return ret;
1249 }
1250 
1251 static int
1252 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1253 {
1254  int ret = 0;
1255 
1257  rxf->rxmode_pending_bitmask) ||
1259  /* Do nothing if pending enable or already enabled */
1260  } else if (is_allmulti_disable(rxf->rxmode_pending,
1261  rxf->rxmode_pending_bitmask)) {
1262  /* Turn off pending disable command */
1264  rxf->rxmode_pending_bitmask);
1265  } else {
1266  /* Schedule enable */
1268  rxf->rxmode_pending_bitmask);
1269  ret = 1;
1270  }
1271 
1272  return ret;
1273 }
1274 
1275 static int
1276 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1277 {
1278  int ret = 0;
1279 
1281  rxf->rxmode_pending_bitmask) ||
1282  (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1283  /* Do nothing if pending disable or already disabled */
1284  } else if (is_allmulti_enable(rxf->rxmode_pending,
1285  rxf->rxmode_pending_bitmask)) {
1286  /* Turn off pending enable command */
1288  rxf->rxmode_pending_bitmask);
1289  } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1290  /* Schedule disable */
1292  rxf->rxmode_pending_bitmask);
1293  ret = 1;
1294  }
1295 
1296  return ret;
1297 }
1298 
1299 static int
1300 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1301 {
1302  if (rxf->vlan_strip_pending) {
1303  rxf->vlan_strip_pending = false;
1304  bna_bfi_vlan_strip_enable(rxf);
1305  return 1;
1306  }
1307 
1308  return 0;
1309 }
1310 
1311 /* RX */
1312 
1313 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1314  (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1315 
1316 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1317  (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1318 
1319 #define call_rx_stop_cbfn(rx) \
1320 do { \
1321  if ((rx)->stop_cbfn) { \
1322  void (*cbfn)(void *, struct bna_rx *); \
1323  void *cbarg; \
1324  cbfn = (rx)->stop_cbfn; \
1325  cbarg = (rx)->stop_cbarg; \
1326  (rx)->stop_cbfn = NULL; \
1327  (rx)->stop_cbarg = NULL; \
1328  cbfn(cbarg, rx); \
1329  } \
1330 } while (0)
1331 
1332 #define call_rx_stall_cbfn(rx) \
1333 do { \
1334  if ((rx)->rx_stall_cbfn) \
1335  (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1336 } while (0)
1337 
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1339 do { \
1340  struct bna_dma_addr cur_q_addr = \
1341  *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1342  (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1343  (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1344  (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1345  (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1346  (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1347  (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1348 } while (0)
1349 
1350 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1351 static void bna_rx_enet_stop(struct bna_rx *rx);
1352 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1353 
1355  struct bna_rx, enum bna_rx_event);
1356 bfa_fsm_state_decl(bna_rx, start_wait,
1357  struct bna_rx, enum bna_rx_event);
1358 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1359  struct bna_rx, enum bna_rx_event);
1361  struct bna_rx, enum bna_rx_event);
1362 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1363  struct bna_rx, enum bna_rx_event);
1364 bfa_fsm_state_decl(bna_rx, stop_wait,
1365  struct bna_rx, enum bna_rx_event);
1366 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1367  struct bna_rx, enum bna_rx_event);
1368 bfa_fsm_state_decl(bna_rx, failed,
1369  struct bna_rx, enum bna_rx_event);
1370 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1371  struct bna_rx, enum bna_rx_event);
1372 
1373 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1374 {
1375  call_rx_stop_cbfn(rx);
1376 }
1377 
1378 static void bna_rx_sm_stopped(struct bna_rx *rx,
1379  enum bna_rx_event event)
1380 {
1381  switch (event) {
1382  case RX_E_START:
1383  bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1384  break;
1385 
1386  case RX_E_STOP:
1387  call_rx_stop_cbfn(rx);
1388  break;
1389 
1390  case RX_E_FAIL:
1391  /* no-op */
1392  break;
1393 
1394  default:
1395  bfa_sm_fault(event);
1396  break;
1397  }
1398 }
1399 
1400 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1401 {
1402  bna_bfi_rx_enet_start(rx);
1403 }
1404 
1405 void
1407 {
1408 }
1409 
1410 static void
1411 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1412 {
1413  switch (event) {
1414  case RX_E_FAIL:
1415  case RX_E_STOPPED:
1417  rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1418  break;
1419 
1420  case RX_E_STARTED:
1421  bna_rx_enet_stop(rx);
1422  break;
1423 
1424  default:
1425  bfa_sm_fault(event);
1426  break;
1427  }
1428 }
1429 
1430 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1431  enum bna_rx_event event)
1432 {
1433  switch (event) {
1434  case RX_E_STOP:
1435  bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1436  break;
1437 
1438  case RX_E_FAIL:
1439  bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1440  break;
1441 
1442  case RX_E_STARTED:
1443  bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1444  break;
1445 
1446  default:
1447  bfa_sm_fault(event);
1448  break;
1449  }
1450 }
1451 
1452 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1453 {
1454  rx->rx_post_cbfn(rx->bna->bnad, rx);
1455  bna_rxf_start(&rx->rxf);
1456 }
1457 
1458 void
1460 {
1461 }
1462 
1463 static void
1464 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1465 {
1466  switch (event) {
1467  case RX_E_FAIL:
1469  bna_rxf_fail(&rx->rxf);
1470  call_rx_stall_cbfn(rx);
1471  rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1472  break;
1473 
1474  case RX_E_RXF_STARTED:
1475  bna_rxf_stop(&rx->rxf);
1476  break;
1477 
1478  case RX_E_RXF_STOPPED:
1479  bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1480  call_rx_stall_cbfn(rx);
1481  bna_rx_enet_stop(rx);
1482  break;
1483 
1484  default:
1485  bfa_sm_fault(event);
1486  break;
1487  }
1488 
1489 }
1490 
1491 void
1493 {
1494  struct bna_rxp *rxp;
1495  struct list_head *qe_rxp;
1496  int is_regular = (rx->type == BNA_RX_T_REGULAR);
1497 
1498  /* Start IB */
1499  list_for_each(qe_rxp, &rx->rxp_q) {
1500  rxp = (struct bna_rxp *)qe_rxp;
1501  bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1502  }
1503 
1504  bna_ethport_cb_rx_started(&rx->bna->ethport);
1505 }
1506 
1507 static void
1508 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1509 {
1510  switch (event) {
1511  case RX_E_STOP:
1512  bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1513  bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1514  bna_rxf_stop(&rx->rxf);
1515  break;
1516 
1517  case RX_E_FAIL:
1518  bfa_fsm_set_state(rx, bna_rx_sm_failed);
1519  bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1520  bna_rxf_fail(&rx->rxf);
1521  call_rx_stall_cbfn(rx);
1522  rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1523  break;
1524 
1525  default:
1526  bfa_sm_fault(event);
1527  break;
1528  }
1529 }
1530 
1531 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1532  enum bna_rx_event event)
1533 {
1534  switch (event) {
1535  case RX_E_STOP:
1536  bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1537  break;
1538 
1539  case RX_E_FAIL:
1540  bfa_fsm_set_state(rx, bna_rx_sm_failed);
1541  bna_rxf_fail(&rx->rxf);
1542  call_rx_stall_cbfn(rx);
1543  rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1544  break;
1545 
1546  case RX_E_RXF_STARTED:
1547  bfa_fsm_set_state(rx, bna_rx_sm_started);
1548  break;
1549 
1550  default:
1551  bfa_sm_fault(event);
1552  break;
1553  }
1554 }
1555 
1556 void
1558 {
1559 }
1560 
1561 void
1563 {
1564  switch (event) {
1565  case RX_E_FAIL:
1566  case RX_E_RXF_STOPPED:
1567  /* No-op */
1568  break;
1569 
1570  case RX_E_CLEANUP_DONE:
1571  bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1572  break;
1573 
1574  default:
1575  bfa_sm_fault(event);
1576  break;
1577  }
1578 }
1579 
1580 static void
1581 bna_rx_sm_failed_entry(struct bna_rx *rx)
1582 {
1583 }
1584 
1585 static void
1586 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1587 {
1588  switch (event) {
1589  case RX_E_START:
1590  bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1591  break;
1592 
1593  case RX_E_STOP:
1595  break;
1596 
1597  case RX_E_FAIL:
1598  case RX_E_RXF_STARTED:
1599  case RX_E_RXF_STOPPED:
1600  /* No-op */
1601  break;
1602 
1603  case RX_E_CLEANUP_DONE:
1604  bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1605  break;
1606 
1607  default:
1608  bfa_sm_fault(event);
1609  break;
1610 } }
1611 
1612 static void
1613 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1614 {
1615 }
1616 
1617 static void
1618 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1619 {
1620  switch (event) {
1621  case RX_E_STOP:
1623  break;
1624 
1625  case RX_E_FAIL:
1626  bfa_fsm_set_state(rx, bna_rx_sm_failed);
1627  break;
1628 
1629  case RX_E_CLEANUP_DONE:
1630  bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1631  break;
1632 
1633  default:
1634  bfa_sm_fault(event);
1635  break;
1636  }
1637 }
1638 
1639 static void
1640 bna_bfi_rx_enet_start(struct bna_rx *rx)
1641 {
1642  struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1643  struct bna_rxp *rxp = NULL;
1644  struct bna_rxq *q0 = NULL, *q1 = NULL;
1645  struct list_head *rxp_qe;
1646  int i;
1647 
1648  bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1650  cfg_req->mh.num_entries = htons(
1652 
1653  cfg_req->num_queue_sets = rx->num_paths;
1654  for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1655  i < rx->num_paths;
1656  i++, rxp_qe = bfa_q_next(rxp_qe)) {
1657  rxp = (struct bna_rxp *)rxp_qe;
1658 
1659  GET_RXQS(rxp, q0, q1);
1660  switch (rxp->type) {
1661  case BNA_RXP_SLR:
1662  case BNA_RXP_HDS:
1663  /* Small RxQ */
1664  bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1665  &q1->qpt);
1666  cfg_req->q_cfg[i].qs.rx_buffer_size =
1667  htons((u16)q1->buffer_size);
1668  /* Fall through */
1669 
1670  case BNA_RXP_SINGLE:
1671  /* Large/Single RxQ */
1672  bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1673  &q0->qpt);
1674  q0->buffer_size =
1675  bna_enet_mtu_get(&rx->bna->enet);
1676  cfg_req->q_cfg[i].ql.rx_buffer_size =
1677  htons((u16)q0->buffer_size);
1678  break;
1679 
1680  default:
1681  BUG_ON(1);
1682  }
1683 
1684  bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1685  &rxp->cq.qpt);
1686 
1687  cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1688  rxp->cq.ib.ib_seg_host_addr.lsb;
1689  cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1690  rxp->cq.ib.ib_seg_host_addr.msb;
1691  cfg_req->q_cfg[i].ib.intr.msix_index =
1692  htons((u16)rxp->cq.ib.intr_vector);
1693  }
1694 
1695  cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1696  cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1697  cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1698  cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1699  cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1702  cfg_req->ib_cfg.coalescing_timeout =
1703  htonl((u32)rxp->cq.ib.coalescing_timeo);
1704  cfg_req->ib_cfg.inter_pkt_timeout =
1705  htonl((u32)rxp->cq.ib.interpkt_timeo);
1706  cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1707 
1708  switch (rxp->type) {
1709  case BNA_RXP_SLR:
1710  cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1711  break;
1712 
1713  case BNA_RXP_HDS:
1714  cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1715  cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1716  cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1717  cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1718  break;
1719 
1720  case BNA_RXP_SINGLE:
1721  cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1722  break;
1723 
1724  default:
1725  BUG_ON(1);
1726  }
1727  cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1728 
1730  sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1731  bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1732 }
1733 
1734 static void
1735 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1736 {
1737  struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1738 
1741  req->mh.num_entries = htons(
1742  bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1743  bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1744  &req->mh);
1745  bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1746 }
1747 
1748 static void
1749 bna_rx_enet_stop(struct bna_rx *rx)
1750 {
1751  struct bna_rxp *rxp;
1752  struct list_head *qe_rxp;
1753 
1754  /* Stop IB */
1755  list_for_each(qe_rxp, &rx->rxp_q) {
1756  rxp = (struct bna_rxp *)qe_rxp;
1757  bna_ib_stop(rx->bna, &rxp->cq.ib);
1758  }
1759 
1760  bna_bfi_rx_enet_stop(rx);
1761 }
1762 
1763 static int
1764 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1765 {
1766  if ((rx_mod->rx_free_count == 0) ||
1767  (rx_mod->rxp_free_count == 0) ||
1768  (rx_mod->rxq_free_count == 0))
1769  return 0;
1770 
1771  if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1772  if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1773  (rx_mod->rxq_free_count < rx_cfg->num_paths))
1774  return 0;
1775  } else {
1776  if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1777  (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1778  return 0;
1779  }
1780 
1781  return 1;
1782 }
1783 
1784 static struct bna_rxq *
1785 bna_rxq_get(struct bna_rx_mod *rx_mod)
1786 {
1787  struct bna_rxq *rxq = NULL;
1788  struct list_head *qe = NULL;
1789 
1790  bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1791  rx_mod->rxq_free_count--;
1792  rxq = (struct bna_rxq *)qe;
1793  bfa_q_qe_init(&rxq->qe);
1794 
1795  return rxq;
1796 }
1797 
1798 static void
1799 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1800 {
1801  bfa_q_qe_init(&rxq->qe);
1802  list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1803  rx_mod->rxq_free_count++;
1804 }
1805 
1806 static struct bna_rxp *
1807 bna_rxp_get(struct bna_rx_mod *rx_mod)
1808 {
1809  struct list_head *qe = NULL;
1810  struct bna_rxp *rxp = NULL;
1811 
1812  bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1813  rx_mod->rxp_free_count--;
1814  rxp = (struct bna_rxp *)qe;
1815  bfa_q_qe_init(&rxp->qe);
1816 
1817  return rxp;
1818 }
1819 
1820 static void
1821 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1822 {
1823  bfa_q_qe_init(&rxp->qe);
1824  list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1825  rx_mod->rxp_free_count++;
1826 }
1827 
1828 static struct bna_rx *
1829 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1830 {
1831  struct list_head *qe = NULL;
1832  struct bna_rx *rx = NULL;
1833 
1834  if (type == BNA_RX_T_REGULAR) {
1835  bfa_q_deq(&rx_mod->rx_free_q, &qe);
1836  } else
1837  bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1838 
1839  rx_mod->rx_free_count--;
1840  rx = (struct bna_rx *)qe;
1841  bfa_q_qe_init(&rx->qe);
1842  list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1843  rx->type = type;
1844 
1845  return rx;
1846 }
1847 
1848 static void
1849 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1850 {
1851  struct list_head *prev_qe = NULL;
1852  struct list_head *qe;
1853 
1854  bfa_q_qe_init(&rx->qe);
1855 
1856  list_for_each(qe, &rx_mod->rx_free_q) {
1857  if (((struct bna_rx *)qe)->rid < rx->rid)
1858  prev_qe = qe;
1859  else
1860  break;
1861  }
1862 
1863  if (prev_qe == NULL) {
1864  /* This is the first entry */
1865  bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1866  } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1867  /* This is the last entry */
1868  list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1869  } else {
1870  /* Somewhere in the middle */
1871  bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1872  bfa_q_prev(&rx->qe) = prev_qe;
1873  bfa_q_next(prev_qe) = &rx->qe;
1874  bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1875  }
1876 
1877  rx_mod->rx_free_count++;
1878 }
1879 
1880 static void
1881 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1882  struct bna_rxq *q1)
1883 {
1884  switch (rxp->type) {
1885  case BNA_RXP_SINGLE:
1886  rxp->rxq.single.only = q0;
1887  rxp->rxq.single.reserved = NULL;
1888  break;
1889  case BNA_RXP_SLR:
1890  rxp->rxq.slr.large = q0;
1891  rxp->rxq.slr.small = q1;
1892  break;
1893  case BNA_RXP_HDS:
1894  rxp->rxq.hds.data = q0;
1895  rxp->rxq.hds.hdr = q1;
1896  break;
1897  default:
1898  break;
1899  }
1900 }
1901 
1902 static void
1903 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1904  struct bna_rxp *rxp,
1905  u32 page_count,
1906  u32 page_size,
1907  struct bna_mem_descr *qpt_mem,
1908  struct bna_mem_descr *swqpt_mem,
1909  struct bna_mem_descr *page_mem)
1910 {
1911  int i;
1912 
1913  rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1914  rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1915  rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1916  rxq->qpt.page_count = page_count;
1917  rxq->qpt.page_size = page_size;
1918 
1919  rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1920 
1921  for (i = 0; i < rxq->qpt.page_count; i++) {
1922  rxq->rcb->sw_qpt[i] = page_mem[i].kva;
1923  ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1924  page_mem[i].dma.lsb;
1925  ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1926  page_mem[i].dma.msb;
1927  }
1928 }
1929 
1930 static void
1931 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1932  u32 page_count,
1933  u32 page_size,
1934  struct bna_mem_descr *qpt_mem,
1935  struct bna_mem_descr *swqpt_mem,
1936  struct bna_mem_descr *page_mem)
1937 {
1938  int i;
1939 
1940  rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1941  rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1942  rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1943  rxp->cq.qpt.page_count = page_count;
1944  rxp->cq.qpt.page_size = page_size;
1945 
1946  rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1947 
1948  for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1949  rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
1950 
1951  ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1952  page_mem[i].dma.lsb;
1953  ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1954  page_mem[i].dma.msb;
1955  }
1956 }
1957 
1958 static void
1959 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1960 {
1961  struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1962 
1963  bfa_wc_down(&rx_mod->rx_stop_wc);
1964 }
1965 
1966 static void
1967 bna_rx_mod_cb_rx_stopped_all(void *arg)
1968 {
1969  struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1970 
1971  if (rx_mod->stop_cbfn)
1972  rx_mod->stop_cbfn(&rx_mod->bna->enet);
1973  rx_mod->stop_cbfn = NULL;
1974 }
1975 
1976 static void
1977 bna_rx_start(struct bna_rx *rx)
1978 {
1980  if (rx->rx_flags & BNA_RX_F_ENABLED)
1982 }
1983 
1984 static void
1985 bna_rx_stop(struct bna_rx *rx)
1986 {
1988  if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
1989  bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
1990  else {
1991  rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
1992  rx->stop_cbarg = &rx->bna->rx_mod;
1994  }
1995 }
1996 
1997 static void
1998 bna_rx_fail(struct bna_rx *rx)
1999 {
2000  /* Indicate Enet is not enabled, and failed */
2003 }
2004 
2005 void
2006 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2007 {
2008  struct bna_rx *rx;
2009  struct list_head *qe;
2010 
2011  rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2012  if (type == BNA_RX_T_LOOPBACK)
2013  rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2014 
2015  list_for_each(qe, &rx_mod->rx_active_q) {
2016  rx = (struct bna_rx *)qe;
2017  if (rx->type == type)
2018  bna_rx_start(rx);
2019  }
2020 }
2021 
2022 void
2023 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2024 {
2025  struct bna_rx *rx;
2026  struct list_head *qe;
2027 
2028  rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2029  rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2030 
2032 
2033  bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2034 
2035  list_for_each(qe, &rx_mod->rx_active_q) {
2036  rx = (struct bna_rx *)qe;
2037  if (rx->type == type) {
2038  bfa_wc_up(&rx_mod->rx_stop_wc);
2039  bna_rx_stop(rx);
2040  }
2041  }
2042 
2043  bfa_wc_wait(&rx_mod->rx_stop_wc);
2044 }
2045 
2046 void
2048 {
2049  struct bna_rx *rx;
2050  struct list_head *qe;
2051 
2052  rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2053  rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2054 
2055  list_for_each(qe, &rx_mod->rx_active_q) {
2056  rx = (struct bna_rx *)qe;
2057  bna_rx_fail(rx);
2058  }
2059 }
2060 
2061 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2062  struct bna_res_info *res_info)
2063 {
2064  int index;
2065  struct bna_rx *rx_ptr;
2066  struct bna_rxp *rxp_ptr;
2067  struct bna_rxq *rxq_ptr;
2068 
2069  rx_mod->bna = bna;
2070  rx_mod->flags = 0;
2071 
2072  rx_mod->rx = (struct bna_rx *)
2073  res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2074  rx_mod->rxp = (struct bna_rxp *)
2075  res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2076  rx_mod->rxq = (struct bna_rxq *)
2077  res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2078 
2079  /* Initialize the queues */
2080  INIT_LIST_HEAD(&rx_mod->rx_free_q);
2081  rx_mod->rx_free_count = 0;
2082  INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2083  rx_mod->rxq_free_count = 0;
2084  INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2085  rx_mod->rxp_free_count = 0;
2086  INIT_LIST_HEAD(&rx_mod->rx_active_q);
2087 
2088  /* Build RX queues */
2089  for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2090  rx_ptr = &rx_mod->rx[index];
2091 
2092  bfa_q_qe_init(&rx_ptr->qe);
2093  INIT_LIST_HEAD(&rx_ptr->rxp_q);
2094  rx_ptr->bna = NULL;
2095  rx_ptr->rid = index;
2096  rx_ptr->stop_cbfn = NULL;
2097  rx_ptr->stop_cbarg = NULL;
2098 
2099  list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2100  rx_mod->rx_free_count++;
2101  }
2102 
2103  /* build RX-path queue */
2104  for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2105  rxp_ptr = &rx_mod->rxp[index];
2106  bfa_q_qe_init(&rxp_ptr->qe);
2107  list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2108  rx_mod->rxp_free_count++;
2109  }
2110 
2111  /* build RXQ queue */
2112  for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2113  rxq_ptr = &rx_mod->rxq[index];
2114  bfa_q_qe_init(&rxq_ptr->qe);
2115  list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2116  rx_mod->rxq_free_count++;
2117  }
2118 }
2119 
2120 void
2122 {
2123  struct list_head *qe;
2124  int i;
2125 
2126  i = 0;
2127  list_for_each(qe, &rx_mod->rx_free_q)
2128  i++;
2129 
2130  i = 0;
2131  list_for_each(qe, &rx_mod->rxp_free_q)
2132  i++;
2133 
2134  i = 0;
2135  list_for_each(qe, &rx_mod->rxq_free_q)
2136  i++;
2137 
2138  rx_mod->bna = NULL;
2139 }
2140 
2141 void
2143 {
2144  struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2145  struct bna_rxp *rxp = NULL;
2146  struct bna_rxq *q0 = NULL, *q1 = NULL;
2147  struct list_head *rxp_qe;
2148  int i;
2149 
2150  bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2151  sizeof(struct bfi_enet_rx_cfg_rsp));
2152 
2153  rx->hw_id = cfg_rsp->hw_id;
2154 
2155  for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2156  i < rx->num_paths;
2157  i++, rxp_qe = bfa_q_next(rxp_qe)) {
2158  rxp = (struct bna_rxp *)rxp_qe;
2159  GET_RXQS(rxp, q0, q1);
2160 
2161  /* Setup doorbells */
2162  rxp->cq.ccb->i_dbell->doorbell_addr =
2163  rx->bna->pcidev.pci_bar_kva
2164  + ntohl(cfg_rsp->q_handles[i].i_dbell);
2165  rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2166  q0->rcb->q_dbell =
2167  rx->bna->pcidev.pci_bar_kva
2168  + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2169  q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2170  if (q1) {
2171  q1->rcb->q_dbell =
2172  rx->bna->pcidev.pci_bar_kva
2173  + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2174  q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2175  }
2176 
2177  /* Initialize producer/consumer indexes */
2178  (*rxp->cq.ccb->hw_producer_index) = 0;
2179  rxp->cq.ccb->producer_index = 0;
2180  q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2181  if (q1)
2182  q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2183  }
2184 
2186 }
2187 
2188 void
2190 {
2192 }
2193 
2194 void
2195 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2196 {
2197  u32 cq_size, hq_size, dq_size;
2198  u32 cpage_count, hpage_count, dpage_count;
2199  struct bna_mem_info *mem_info;
2200  u32 cq_depth;
2201  u32 hq_depth;
2202  u32 dq_depth;
2203 
2204  dq_depth = q_cfg->q_depth;
2205  hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2206  cq_depth = dq_depth + hq_depth;
2207 
2208  BNA_TO_POWER_OF_2_HIGH(cq_depth);
2209  cq_size = cq_depth * BFI_CQ_WI_SIZE;
2210  cq_size = ALIGN(cq_size, PAGE_SIZE);
2211  cpage_count = SIZE_TO_PAGES(cq_size);
2212 
2213  BNA_TO_POWER_OF_2_HIGH(dq_depth);
2214  dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2215  dq_size = ALIGN(dq_size, PAGE_SIZE);
2216  dpage_count = SIZE_TO_PAGES(dq_size);
2217 
2218  if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2219  BNA_TO_POWER_OF_2_HIGH(hq_depth);
2220  hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2221  hq_size = ALIGN(hq_size, PAGE_SIZE);
2222  hpage_count = SIZE_TO_PAGES(hq_size);
2223  } else
2224  hpage_count = 0;
2225 
2227  mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2228  mem_info->mem_type = BNA_MEM_T_KVA;
2229  mem_info->len = sizeof(struct bna_ccb);
2230  mem_info->num = q_cfg->num_paths;
2231 
2233  mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2234  mem_info->mem_type = BNA_MEM_T_KVA;
2235  mem_info->len = sizeof(struct bna_rcb);
2236  mem_info->num = BNA_GET_RXQS(q_cfg);
2237 
2239  mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2240  mem_info->mem_type = BNA_MEM_T_DMA;
2241  mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2242  mem_info->num = q_cfg->num_paths;
2243 
2245  mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2246  mem_info->mem_type = BNA_MEM_T_KVA;
2247  mem_info->len = cpage_count * sizeof(void *);
2248  mem_info->num = q_cfg->num_paths;
2249 
2251  mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2252  mem_info->mem_type = BNA_MEM_T_DMA;
2253  mem_info->len = PAGE_SIZE;
2254  mem_info->num = cpage_count * q_cfg->num_paths;
2255 
2257  mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2258  mem_info->mem_type = BNA_MEM_T_DMA;
2259  mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2260  mem_info->num = q_cfg->num_paths;
2261 
2263  mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2264  mem_info->mem_type = BNA_MEM_T_KVA;
2265  mem_info->len = dpage_count * sizeof(void *);
2266  mem_info->num = q_cfg->num_paths;
2267 
2269  mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2270  mem_info->mem_type = BNA_MEM_T_DMA;
2271  mem_info->len = PAGE_SIZE;
2272  mem_info->num = dpage_count * q_cfg->num_paths;
2273 
2275  mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2276  mem_info->mem_type = BNA_MEM_T_DMA;
2277  mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2278  mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2279 
2281  mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2282  mem_info->mem_type = BNA_MEM_T_KVA;
2283  mem_info->len = hpage_count * sizeof(void *);
2284  mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2285 
2287  mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2288  mem_info->mem_type = BNA_MEM_T_DMA;
2289  mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2290  mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2291 
2293  mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2294  mem_info->mem_type = BNA_MEM_T_DMA;
2295  mem_info->len = BFI_IBIDX_SIZE;
2296  mem_info->num = q_cfg->num_paths;
2297 
2299  mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2300  mem_info->mem_type = BNA_MEM_T_KVA;
2301  mem_info->len = BFI_ENET_RSS_RIT_MAX;
2302  mem_info->num = 1;
2303 
2305  res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2306  res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2307 }
2308 
2309 struct bna_rx *
2310 bna_rx_create(struct bna *bna, struct bnad *bnad,
2311  struct bna_rx_config *rx_cfg,
2312  const struct bna_rx_event_cbfn *rx_cbfn,
2313  struct bna_res_info *res_info,
2314  void *priv)
2315 {
2316  struct bna_rx_mod *rx_mod = &bna->rx_mod;
2317  struct bna_rx *rx;
2318  struct bna_rxp *rxp;
2319  struct bna_rxq *q0;
2320  struct bna_rxq *q1;
2321  struct bna_intr_info *intr_info;
2322  u32 page_count;
2323  struct bna_mem_descr *ccb_mem;
2324  struct bna_mem_descr *rcb_mem;
2325  struct bna_mem_descr *unmapq_mem;
2326  struct bna_mem_descr *cqpt_mem;
2327  struct bna_mem_descr *cswqpt_mem;
2328  struct bna_mem_descr *cpage_mem;
2329  struct bna_mem_descr *hqpt_mem;
2330  struct bna_mem_descr *dqpt_mem;
2331  struct bna_mem_descr *hsqpt_mem;
2332  struct bna_mem_descr *dsqpt_mem;
2333  struct bna_mem_descr *hpage_mem;
2334  struct bna_mem_descr *dpage_mem;
2335  int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
2336  int dpage_count, hpage_count, rcb_idx;
2337 
2338  if (!bna_rx_res_check(rx_mod, rx_cfg))
2339  return NULL;
2340 
2341  intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2342  ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2343  rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2344  unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2345  cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2346  cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2347  cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2348  hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2349  dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2350  hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2351  dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2352  hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2353  dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2354 
2355  page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2356  rx_cfg->num_paths;
2357 
2358  dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2359  rx_cfg->num_paths;
2360 
2361  hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2362  rx_cfg->num_paths;
2363 
2364  rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2365  rx->bna = bna;
2366  rx->rx_flags = 0;
2367  INIT_LIST_HEAD(&rx->rxp_q);
2368  rx->stop_cbfn = NULL;
2369  rx->stop_cbarg = NULL;
2370  rx->priv = priv;
2371 
2372  rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2373  rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2374  rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2375  rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2376  rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2377  /* Following callbacks are mandatory */
2378  rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2379  rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2380 
2381  if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2382  switch (rx->type) {
2383  case BNA_RX_T_REGULAR:
2384  if (!(rx->bna->rx_mod.flags &
2387  break;
2388  case BNA_RX_T_LOOPBACK:
2389  if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2391  break;
2392  }
2393  }
2394 
2395  rx->num_paths = rx_cfg->num_paths;
2396  for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2397  rxp = bna_rxp_get(rx_mod);
2398  list_add_tail(&rxp->qe, &rx->rxp_q);
2399  rxp->type = rx_cfg->rxp_type;
2400  rxp->rx = rx;
2401  rxp->cq.rx = rx;
2402 
2403  q0 = bna_rxq_get(rx_mod);
2404  if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2405  q1 = NULL;
2406  else
2407  q1 = bna_rxq_get(rx_mod);
2408 
2409  if (1 == intr_info->num)
2410  rxp->vector = intr_info->idl[0].vector;
2411  else
2412  rxp->vector = intr_info->idl[i].vector;
2413 
2414  /* Setup IB */
2415 
2416  rxp->cq.ib.ib_seg_host_addr.lsb =
2417  res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2418  rxp->cq.ib.ib_seg_host_addr.msb =
2419  res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2420  rxp->cq.ib.ib_seg_host_addr_kva =
2421  res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2422  rxp->cq.ib.intr_type = intr_info->intr_type;
2423  if (intr_info->intr_type == BNA_INTR_T_MSIX)
2424  rxp->cq.ib.intr_vector = rxp->vector;
2425  else
2426  rxp->cq.ib.intr_vector = (1 << rxp->vector);
2427  rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2428  rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2429  rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2430 
2431  bna_rxp_add_rxqs(rxp, q0, q1);
2432 
2433  /* Setup large Q */
2434 
2435  q0->rx = rx;
2436  q0->rxp = rxp;
2437 
2438  q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2439  q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2440  rcb_idx++;
2441  q0->rcb->q_depth = rx_cfg->q_depth;
2442  q0->rcb->rxq = q0;
2443  q0->rcb->bnad = bna->bnad;
2444  q0->rcb->id = 0;
2445  q0->rx_packets = q0->rx_bytes = 0;
2447 
2448  bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2449  &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2450  q0->rcb->page_idx = dpage_idx;
2451  q0->rcb->page_count = dpage_count;
2452  dpage_idx += dpage_count;
2453 
2454  if (rx->rcb_setup_cbfn)
2455  rx->rcb_setup_cbfn(bnad, q0->rcb);
2456 
2457  /* Setup small Q */
2458 
2459  if (q1) {
2460  q1->rx = rx;
2461  q1->rxp = rxp;
2462 
2463  q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2464  q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2465  rcb_idx++;
2466  q1->rcb->q_depth = rx_cfg->q_depth;
2467  q1->rcb->rxq = q1;
2468  q1->rcb->bnad = bna->bnad;
2469  q1->rcb->id = 1;
2470  q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2471  rx_cfg->hds_config.forced_offset
2472  : rx_cfg->small_buff_size;
2473  q1->rx_packets = q1->rx_bytes = 0;
2475 
2476  bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2477  &hqpt_mem[i], &hsqpt_mem[i],
2478  &hpage_mem[hpage_idx]);
2479  q1->rcb->page_idx = hpage_idx;
2480  q1->rcb->page_count = hpage_count;
2481  hpage_idx += hpage_count;
2482 
2483  if (rx->rcb_setup_cbfn)
2484  rx->rcb_setup_cbfn(bnad, q1->rcb);
2485  }
2486 
2487  /* Setup CQ */
2488 
2489  rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2490  rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2491  ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2492  0 : rx_cfg->q_depth);
2493  rxp->cq.ccb->cq = &rxp->cq;
2494  rxp->cq.ccb->rcb[0] = q0->rcb;
2495  q0->rcb->ccb = rxp->cq.ccb;
2496  if (q1) {
2497  rxp->cq.ccb->rcb[1] = q1->rcb;
2498  q1->rcb->ccb = rxp->cq.ccb;
2499  }
2500  rxp->cq.ccb->hw_producer_index =
2501  (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2502  rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2503  rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2504  rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2505  rxp->cq.ccb->rx_coalescing_timeo =
2506  rxp->cq.ib.coalescing_timeo;
2507  rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2508  rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2509  rxp->cq.ccb->bnad = bna->bnad;
2510  rxp->cq.ccb->id = i;
2511 
2512  bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2513  &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2514  rxp->cq.ccb->page_idx = cpage_idx;
2515  rxp->cq.ccb->page_count = page_count;
2516  cpage_idx += page_count;
2517 
2518  if (rx->ccb_setup_cbfn)
2519  rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2520  }
2521 
2522  rx->hds_cfg = rx_cfg->hds_config;
2523 
2524  bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2525 
2526  bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2527 
2528  rx_mod->rid_mask |= (1 << rx->rid);
2529 
2530  return rx;
2531 }
2532 
2533 void
2535 {
2536  struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2537  struct bna_rxq *q0 = NULL;
2538  struct bna_rxq *q1 = NULL;
2539  struct bna_rxp *rxp;
2540  struct list_head *qe;
2541 
2542  bna_rxf_uninit(&rx->rxf);
2543 
2544  while (!list_empty(&rx->rxp_q)) {
2545  bfa_q_deq(&rx->rxp_q, &rxp);
2546  GET_RXQS(rxp, q0, q1);
2547  if (rx->rcb_destroy_cbfn)
2548  rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2549  q0->rcb = NULL;
2550  q0->rxp = NULL;
2551  q0->rx = NULL;
2552  bna_rxq_put(rx_mod, q0);
2553 
2554  if (q1) {
2555  if (rx->rcb_destroy_cbfn)
2556  rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2557  q1->rcb = NULL;
2558  q1->rxp = NULL;
2559  q1->rx = NULL;
2560  bna_rxq_put(rx_mod, q1);
2561  }
2562  rxp->rxq.slr.large = NULL;
2563  rxp->rxq.slr.small = NULL;
2564 
2565  if (rx->ccb_destroy_cbfn)
2566  rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2567  rxp->cq.ccb = NULL;
2568  rxp->rx = NULL;
2569  bna_rxp_put(rx_mod, rxp);
2570  }
2571 
2572  list_for_each(qe, &rx_mod->rx_active_q) {
2573  if (qe == &rx->qe) {
2574  list_del(&rx->qe);
2575  bfa_q_qe_init(&rx->qe);
2576  break;
2577  }
2578  }
2579 
2580  rx_mod->rid_mask &= ~(1 << rx->rid);
2581 
2582  rx->bna = NULL;
2583  rx->priv = NULL;
2584  bna_rx_put(rx_mod, rx);
2585 }
2586 
2587 void
2589 {
2590  if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2591  return;
2592 
2593  rx->rx_flags |= BNA_RX_F_ENABLED;
2594  if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2596 }
2597 
2598 void
2600  void (*cbfn)(void *, struct bna_rx *))
2601 {
2602  if (type == BNA_SOFT_CLEANUP) {
2603  /* h/w should not be accessed. Treat we're stopped */
2604  (*cbfn)(rx->bna->bnad, rx);
2605  } else {
2606  rx->stop_cbfn = cbfn;
2607  rx->stop_cbarg = rx->bna->bnad;
2608 
2609  rx->rx_flags &= ~BNA_RX_F_ENABLED;
2610 
2612  }
2613 }
2614 
2615 void
2617 {
2619 }
2620 
2621 enum bna_cb_status
2622 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2623  enum bna_rxmode bitmask,
2624  void (*cbfn)(struct bnad *, struct bna_rx *))
2625 {
2626  struct bna_rxf *rxf = &rx->rxf;
2627  int need_hw_config = 0;
2628 
2629  /* Error checks */
2630 
2631  if (is_promisc_enable(new_mode, bitmask)) {
2632  /* If promisc mode is already enabled elsewhere in the system */
2633  if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2634  (rx->bna->promisc_rid != rxf->rx->rid))
2635  goto err_return;
2636 
2637  /* If default mode is already enabled in the system */
2638  if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2639  goto err_return;
2640 
2641  /* Trying to enable promiscuous and default mode together */
2642  if (is_default_enable(new_mode, bitmask))
2643  goto err_return;
2644  }
2645 
2646  if (is_default_enable(new_mode, bitmask)) {
2647  /* If default mode is already enabled elsewhere in the system */
2648  if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2649  (rx->bna->default_mode_rid != rxf->rx->rid)) {
2650  goto err_return;
2651  }
2652 
2653  /* If promiscuous mode is already enabled in the system */
2654  if (rx->bna->promisc_rid != BFI_INVALID_RID)
2655  goto err_return;
2656  }
2657 
2658  /* Process the commands */
2659 
2660  if (is_promisc_enable(new_mode, bitmask)) {
2661  if (bna_rxf_promisc_enable(rxf))
2662  need_hw_config = 1;
2663  } else if (is_promisc_disable(new_mode, bitmask)) {
2664  if (bna_rxf_promisc_disable(rxf))
2665  need_hw_config = 1;
2666  }
2667 
2668  if (is_allmulti_enable(new_mode, bitmask)) {
2669  if (bna_rxf_allmulti_enable(rxf))
2670  need_hw_config = 1;
2671  } else if (is_allmulti_disable(new_mode, bitmask)) {
2672  if (bna_rxf_allmulti_disable(rxf))
2673  need_hw_config = 1;
2674  }
2675 
2676  /* Trigger h/w if needed */
2677 
2678  if (need_hw_config) {
2679  rxf->cam_fltr_cbfn = cbfn;
2680  rxf->cam_fltr_cbarg = rx->bna->bnad;
2682  } else if (cbfn)
2683  (*cbfn)(rx->bna->bnad, rx);
2684 
2685  return BNA_CB_SUCCESS;
2686 
2687 err_return:
2688  return BNA_CB_FAIL;
2689 }
2690 
2691 void
2693 {
2694  struct bna_rxf *rxf = &rx->rxf;
2695 
2700  }
2701 }
2702 
2703 void
2704 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2705 {
2706  struct bna_rxp *rxp;
2707  struct list_head *qe;
2708 
2709  list_for_each(qe, &rx->rxp_q) {
2710  rxp = (struct bna_rxp *)qe;
2711  rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2712  bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2713  }
2714 }
2715 
2716 void
2717 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2718 {
2719  int i, j;
2720 
2721  for (i = 0; i < BNA_LOAD_T_MAX; i++)
2722  for (j = 0; j < BNA_BIAS_T_MAX; j++)
2723  bna->rx_mod.dim_vector[i][j] = vector[i][j];
2724 }
2725 
2726 void
2728 {
2729  struct bna *bna = ccb->cq->rx->bna;
2730  u32 load, bias;
2731  u32 pkt_rt, small_rt, large_rt;
2732  u8 coalescing_timeo;
2733 
2734  if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2735  (ccb->pkt_rate.large_pkt_cnt == 0))
2736  return;
2737 
2738  /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2739 
2740  small_rt = ccb->pkt_rate.small_pkt_cnt;
2741  large_rt = ccb->pkt_rate.large_pkt_cnt;
2742 
2743  pkt_rt = small_rt + large_rt;
2744 
2745  if (pkt_rt < BNA_PKT_RATE_10K)
2746  load = BNA_LOAD_T_LOW_4;
2747  else if (pkt_rt < BNA_PKT_RATE_20K)
2748  load = BNA_LOAD_T_LOW_3;
2749  else if (pkt_rt < BNA_PKT_RATE_30K)
2750  load = BNA_LOAD_T_LOW_2;
2751  else if (pkt_rt < BNA_PKT_RATE_40K)
2752  load = BNA_LOAD_T_LOW_1;
2753  else if (pkt_rt < BNA_PKT_RATE_50K)
2754  load = BNA_LOAD_T_HIGH_1;
2755  else if (pkt_rt < BNA_PKT_RATE_60K)
2756  load = BNA_LOAD_T_HIGH_2;
2757  else if (pkt_rt < BNA_PKT_RATE_80K)
2758  load = BNA_LOAD_T_HIGH_3;
2759  else
2760  load = BNA_LOAD_T_HIGH_4;
2761 
2762  if (small_rt > (large_rt << 1))
2763  bias = 0;
2764  else
2765  bias = 1;
2766 
2767  ccb->pkt_rate.small_pkt_cnt = 0;
2768  ccb->pkt_rate.large_pkt_cnt = 0;
2769 
2770  coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2771  ccb->rx_coalescing_timeo = coalescing_timeo;
2772 
2773  /* Set it to IB */
2774  bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2775 }
2776 
2778  {12, 12},
2779  {6, 10},
2780  {5, 10},
2781  {4, 8},
2782  {3, 6},
2783  {3, 6},
2784  {2, 4},
2785  {1, 2},
2786 };
2787 
2788 /* TX */
2789 
2790 #define call_tx_stop_cbfn(tx) \
2791 do { \
2792  if ((tx)->stop_cbfn) { \
2793  void (*cbfn)(void *, struct bna_tx *); \
2794  void *cbarg; \
2795  cbfn = (tx)->stop_cbfn; \
2796  cbarg = (tx)->stop_cbarg; \
2797  (tx)->stop_cbfn = NULL; \
2798  (tx)->stop_cbarg = NULL; \
2799  cbfn(cbarg, (tx)); \
2800  } \
2801 } while (0)
2802 
2803 #define call_tx_prio_change_cbfn(tx) \
2804 do { \
2805  if ((tx)->prio_change_cbfn) { \
2806  void (*cbfn)(struct bnad *, struct bna_tx *); \
2807  cbfn = (tx)->prio_change_cbfn; \
2808  (tx)->prio_change_cbfn = NULL; \
2809  cbfn((tx)->bna->bnad, (tx)); \
2810  } \
2811 } while (0)
2812 
2813 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2814 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2815 static void bna_tx_enet_stop(struct bna_tx *tx);
2816 
2826 };
2827 
2829 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2831 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2832 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2833  enum bna_tx_event);
2834 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2835  enum bna_tx_event);
2836 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2837  enum bna_tx_event);
2838 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2839 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2840  enum bna_tx_event);
2841 
2842 static void
2843 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2844 {
2845  call_tx_stop_cbfn(tx);
2846 }
2847 
2848 static void
2849 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2850 {
2851  switch (event) {
2852  case TX_E_START:
2853  bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2854  break;
2855 
2856  case TX_E_STOP:
2857  call_tx_stop_cbfn(tx);
2858  break;
2859 
2860  case TX_E_FAIL:
2861  /* No-op */
2862  break;
2863 
2864  case TX_E_PRIO_CHANGE:
2866  break;
2867 
2868  case TX_E_BW_UPDATE:
2869  /* No-op */
2870  break;
2871 
2872  default:
2873  bfa_sm_fault(event);
2874  }
2875 }
2876 
2877 static void
2878 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2879 {
2880  bna_bfi_tx_enet_start(tx);
2881 }
2882 
2883 static void
2884 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2885 {
2886  switch (event) {
2887  case TX_E_STOP:
2889  bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2890  break;
2891 
2892  case TX_E_FAIL:
2894  bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2895  break;
2896 
2897  case TX_E_STARTED:
2899  tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2901  bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2902  } else
2903  bfa_fsm_set_state(tx, bna_tx_sm_started);
2904  break;
2905 
2906  case TX_E_PRIO_CHANGE:
2908  break;
2909 
2910  case TX_E_BW_UPDATE:
2911  tx->flags |= BNA_TX_F_BW_UPDATED;
2912  break;
2913 
2914  default:
2915  bfa_sm_fault(event);
2916  }
2917 }
2918 
2919 static void
2920 bna_tx_sm_started_entry(struct bna_tx *tx)
2921 {
2922  struct bna_txq *txq;
2923  struct list_head *qe;
2924  int is_regular = (tx->type == BNA_TX_T_REGULAR);
2925 
2926  list_for_each(qe, &tx->txq_q) {
2927  txq = (struct bna_txq *)qe;
2928  txq->tcb->priority = txq->priority;
2929  /* Start IB */
2930  bna_ib_start(tx->bna, &txq->ib, is_regular);
2931  }
2932  tx->tx_resume_cbfn(tx->bna->bnad, tx);
2933 }
2934 
2935 static void
2936 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2937 {
2938  switch (event) {
2939  case TX_E_STOP:
2940  bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2941  tx->tx_stall_cbfn(tx->bna->bnad, tx);
2942  bna_tx_enet_stop(tx);
2943  break;
2944 
2945  case TX_E_FAIL:
2946  bfa_fsm_set_state(tx, bna_tx_sm_failed);
2947  tx->tx_stall_cbfn(tx->bna->bnad, tx);
2948  tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2949  break;
2950 
2951  case TX_E_PRIO_CHANGE:
2952  case TX_E_BW_UPDATE:
2953  bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2954  break;
2955 
2956  default:
2957  bfa_sm_fault(event);
2958  }
2959 }
2960 
2961 static void
2962 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2963 {
2964 }
2965 
2966 static void
2967 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2968 {
2969  switch (event) {
2970  case TX_E_FAIL:
2971  case TX_E_STOPPED:
2972  bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2973  tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2974  break;
2975 
2976  case TX_E_STARTED:
2981  bna_tx_enet_stop(tx);
2982  break;
2983 
2984  case TX_E_PRIO_CHANGE:
2985  case TX_E_BW_UPDATE:
2986  /* No-op */
2987  break;
2988 
2989  default:
2990  bfa_sm_fault(event);
2991  }
2992 }
2993 
2994 static void
2995 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
2996 {
2997 }
2998 
2999 static void
3000 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3001 {
3002  switch (event) {
3003  case TX_E_FAIL:
3004  case TX_E_PRIO_CHANGE:
3005  case TX_E_BW_UPDATE:
3006  /* No-op */
3007  break;
3008 
3009  case TX_E_CLEANUP_DONE:
3010  bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3011  break;
3012 
3013  default:
3014  bfa_sm_fault(event);
3015  }
3016 }
3017 
3018 static void
3019 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3020 {
3021  tx->tx_stall_cbfn(tx->bna->bnad, tx);
3022  bna_tx_enet_stop(tx);
3023 }
3024 
3025 static void
3026 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3027 {
3028  switch (event) {
3029  case TX_E_STOP:
3030  bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3031  break;
3032 
3033  case TX_E_FAIL:
3034  bfa_fsm_set_state(tx, bna_tx_sm_failed);
3036  tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3037  break;
3038 
3039  case TX_E_STOPPED:
3040  bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3041  break;
3042 
3043  case TX_E_PRIO_CHANGE:
3044  case TX_E_BW_UPDATE:
3045  /* No-op */
3046  break;
3047 
3048  default:
3049  bfa_sm_fault(event);
3050  }
3051 }
3052 
3053 static void
3054 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3055 {
3057  tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3058 }
3059 
3060 static void
3061 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3062 {
3063  switch (event) {
3064  case TX_E_STOP:
3065  bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3066  break;
3067 
3068  case TX_E_FAIL:
3069  bfa_fsm_set_state(tx, bna_tx_sm_failed);
3070  break;
3071 
3072  case TX_E_PRIO_CHANGE:
3073  case TX_E_BW_UPDATE:
3074  /* No-op */
3075  break;
3076 
3077  case TX_E_CLEANUP_DONE:
3078  bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3079  break;
3080 
3081  default:
3082  bfa_sm_fault(event);
3083  }
3084 }
3085 
3086 static void
3087 bna_tx_sm_failed_entry(struct bna_tx *tx)
3088 {
3089 }
3090 
3091 static void
3092 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3093 {
3094  switch (event) {
3095  case TX_E_START:
3096  bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3097  break;
3098 
3099  case TX_E_STOP:
3100  bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3101  break;
3102 
3103  case TX_E_FAIL:
3104  /* No-op */
3105  break;
3106 
3107  case TX_E_CLEANUP_DONE:
3108  bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3109  break;
3110 
3111  default:
3112  bfa_sm_fault(event);
3113  }
3114 }
3115 
3116 static void
3117 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3118 {
3119 }
3120 
3121 static void
3122 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3123 {
3124  switch (event) {
3125  case TX_E_STOP:
3126  bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3127  break;
3128 
3129  case TX_E_FAIL:
3130  bfa_fsm_set_state(tx, bna_tx_sm_failed);
3131  break;
3132 
3133  case TX_E_CLEANUP_DONE:
3134  bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3135  break;
3136 
3137  case TX_E_BW_UPDATE:
3138  /* No-op */
3139  break;
3140 
3141  default:
3142  bfa_sm_fault(event);
3143  }
3144 }
3145 
3146 static void
3147 bna_bfi_tx_enet_start(struct bna_tx *tx)
3148 {
3149  struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3150  struct bna_txq *txq = NULL;
3151  struct list_head *qe;
3152  int i;
3153 
3154  bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3156  cfg_req->mh.num_entries = htons(
3158 
3159  cfg_req->num_queues = tx->num_txq;
3160  for (i = 0, qe = bfa_q_first(&tx->txq_q);
3161  i < tx->num_txq;
3162  i++, qe = bfa_q_next(qe)) {
3163  txq = (struct bna_txq *)qe;
3164 
3165  bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3166  cfg_req->q_cfg[i].q.priority = txq->priority;
3167 
3168  cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3169  txq->ib.ib_seg_host_addr.lsb;
3170  cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3171  txq->ib.ib_seg_host_addr.msb;
3172  cfg_req->q_cfg[i].ib.intr.msix_index =
3173  htons((u16)txq->ib.intr_vector);
3174  }
3175 
3176  cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3177  cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3178  cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3179  cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3180  cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3182  cfg_req->ib_cfg.coalescing_timeout =
3183  htonl((u32)txq->ib.coalescing_timeo);
3184  cfg_req->ib_cfg.inter_pkt_timeout =
3185  htonl((u32)txq->ib.interpkt_timeo);
3186  cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3187 
3188  cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3189  cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3190  cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3191  cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3192 
3194  sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3195  bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3196 }
3197 
3198 static void
3199 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3200 {
3201  struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3202 
3205  req->mh.num_entries = htons(
3206  bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3207  bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3208  &req->mh);
3209  bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3210 }
3211 
3212 static void
3213 bna_tx_enet_stop(struct bna_tx *tx)
3214 {
3215  struct bna_txq *txq;
3216  struct list_head *qe;
3217 
3218  /* Stop IB */
3219  list_for_each(qe, &tx->txq_q) {
3220  txq = (struct bna_txq *)qe;
3221  bna_ib_stop(tx->bna, &txq->ib);
3222  }
3223 
3224  bna_bfi_tx_enet_stop(tx);
3225 }
3226 
3227 static void
3228 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3229  struct bna_mem_descr *qpt_mem,
3230  struct bna_mem_descr *swqpt_mem,
3231  struct bna_mem_descr *page_mem)
3232 {
3233  int i;
3234 
3235  txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3236  txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3237  txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3238  txq->qpt.page_count = page_count;
3239  txq->qpt.page_size = page_size;
3240 
3241  txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3242 
3243  for (i = 0; i < page_count; i++) {
3244  txq->tcb->sw_qpt[i] = page_mem[i].kva;
3245 
3246  ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3247  page_mem[i].dma.lsb;
3248  ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3249  page_mem[i].dma.msb;
3250  }
3251 }
3252 
3253 static struct bna_tx *
3254 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3255 {
3256  struct list_head *qe = NULL;
3257  struct bna_tx *tx = NULL;
3258 
3259  if (list_empty(&tx_mod->tx_free_q))
3260  return NULL;
3261  if (type == BNA_TX_T_REGULAR) {
3262  bfa_q_deq(&tx_mod->tx_free_q, &qe);
3263  } else {
3264  bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3265  }
3266  tx = (struct bna_tx *)qe;
3267  bfa_q_qe_init(&tx->qe);
3268  tx->type = type;
3269 
3270  return tx;
3271 }
3272 
3273 static void
3274 bna_tx_free(struct bna_tx *tx)
3275 {
3276  struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3277  struct bna_txq *txq;
3278  struct list_head *prev_qe;
3279  struct list_head *qe;
3280 
3281  while (!list_empty(&tx->txq_q)) {
3282  bfa_q_deq(&tx->txq_q, &txq);
3283  bfa_q_qe_init(&txq->qe);
3284  txq->tcb = NULL;
3285  txq->tx = NULL;
3286  list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3287  }
3288 
3289  list_for_each(qe, &tx_mod->tx_active_q) {
3290  if (qe == &tx->qe) {
3291  list_del(&tx->qe);
3292  bfa_q_qe_init(&tx->qe);
3293  break;
3294  }
3295  }
3296 
3297  tx->bna = NULL;
3298  tx->priv = NULL;
3299 
3300  prev_qe = NULL;
3301  list_for_each(qe, &tx_mod->tx_free_q) {
3302  if (((struct bna_tx *)qe)->rid < tx->rid)
3303  prev_qe = qe;
3304  else {
3305  break;
3306  }
3307  }
3308 
3309  if (prev_qe == NULL) {
3310  /* This is the first entry */
3311  bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3312  } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3313  /* This is the last entry */
3314  list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3315  } else {
3316  /* Somewhere in the middle */
3317  bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3318  bfa_q_prev(&tx->qe) = prev_qe;
3319  bfa_q_next(prev_qe) = &tx->qe;
3320  bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3321  }
3322 }
3323 
3324 static void
3325 bna_tx_start(struct bna_tx *tx)
3326 {
3328  if (tx->flags & BNA_TX_F_ENABLED)
3330 }
3331 
3332 static void
3333 bna_tx_stop(struct bna_tx *tx)
3334 {
3335  tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3336  tx->stop_cbarg = &tx->bna->tx_mod;
3337 
3338  tx->flags &= ~BNA_TX_F_ENET_STARTED;
3340 }
3341 
3342 static void
3343 bna_tx_fail(struct bna_tx *tx)
3344 {
3345  tx->flags &= ~BNA_TX_F_ENET_STARTED;
3347 }
3348 
3349 void
3351 {
3352  struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3353  struct bna_txq *txq = NULL;
3354  struct list_head *qe;
3355  int i;
3356 
3357  bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3358  sizeof(struct bfi_enet_tx_cfg_rsp));
3359 
3360  tx->hw_id = cfg_rsp->hw_id;
3361 
3362  for (i = 0, qe = bfa_q_first(&tx->txq_q);
3363  i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3364  txq = (struct bna_txq *)qe;
3365 
3366  /* Setup doorbells */
3367  txq->tcb->i_dbell->doorbell_addr =
3368  tx->bna->pcidev.pci_bar_kva
3369  + ntohl(cfg_rsp->q_handles[i].i_dbell);
3370  txq->tcb->q_dbell =
3371  tx->bna->pcidev.pci_bar_kva
3372  + ntohl(cfg_rsp->q_handles[i].q_dbell);
3373  txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3374 
3375  /* Initialize producer/consumer indexes */
3376  (*txq->tcb->hw_consumer_index) = 0;
3377  txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3378  }
3379 
3381 }
3382 
3383 void
3385 {
3387 }
3388 
3389 void
3391 {
3392  struct bna_tx *tx;
3393  struct list_head *qe;
3394 
3395  list_for_each(qe, &tx_mod->tx_active_q) {
3396  tx = (struct bna_tx *)qe;
3398  }
3399 }
3400 
3401 void
3402 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3403 {
3404  u32 q_size;
3405  u32 page_count;
3406  struct bna_mem_info *mem_info;
3407 
3409  mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3410  mem_info->mem_type = BNA_MEM_T_KVA;
3411  mem_info->len = sizeof(struct bna_tcb);
3412  mem_info->num = num_txq;
3413 
3414  q_size = txq_depth * BFI_TXQ_WI_SIZE;
3415  q_size = ALIGN(q_size, PAGE_SIZE);
3416  page_count = q_size >> PAGE_SHIFT;
3417 
3419  mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3420  mem_info->mem_type = BNA_MEM_T_DMA;
3421  mem_info->len = page_count * sizeof(struct bna_dma_addr);
3422  mem_info->num = num_txq;
3423 
3425  mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3426  mem_info->mem_type = BNA_MEM_T_KVA;
3427  mem_info->len = page_count * sizeof(void *);
3428  mem_info->num = num_txq;
3429 
3431  mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3432  mem_info->mem_type = BNA_MEM_T_DMA;
3433  mem_info->len = PAGE_SIZE;
3434  mem_info->num = num_txq * page_count;
3435 
3437  mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3438  mem_info->mem_type = BNA_MEM_T_DMA;
3439  mem_info->len = BFI_IBIDX_SIZE;
3440  mem_info->num = num_txq;
3441 
3443  res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3445  res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3446 }
3447 
3448 struct bna_tx *
3449 bna_tx_create(struct bna *bna, struct bnad *bnad,
3450  struct bna_tx_config *tx_cfg,
3451  const struct bna_tx_event_cbfn *tx_cbfn,
3452  struct bna_res_info *res_info, void *priv)
3453 {
3454  struct bna_intr_info *intr_info;
3455  struct bna_tx_mod *tx_mod = &bna->tx_mod;
3456  struct bna_tx *tx;
3457  struct bna_txq *txq;
3458  struct list_head *qe;
3459  int page_count;
3460  int page_size;
3461  int page_idx;
3462  int i;
3463 
3464  intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3465  page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3466  tx_cfg->num_txq;
3467  page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3468 
3473  if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3474  return NULL;
3475 
3476  /* Tx */
3477 
3478  tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3479  if (!tx)
3480  return NULL;
3481  tx->bna = bna;
3482  tx->priv = priv;
3483 
3484  /* TxQs */
3485 
3486  INIT_LIST_HEAD(&tx->txq_q);
3487  for (i = 0; i < tx_cfg->num_txq; i++) {
3488  if (list_empty(&tx_mod->txq_free_q))
3489  goto err_return;
3490 
3491  bfa_q_deq(&tx_mod->txq_free_q, &txq);
3492  bfa_q_qe_init(&txq->qe);
3493  list_add_tail(&txq->qe, &tx->txq_q);
3494  txq->tx = tx;
3495  }
3496 
3497  /*
3498  * Initialize
3499  */
3500 
3501  /* Tx */
3502 
3503  tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3504  tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3505  /* Following callbacks are mandatory */
3506  tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3507  tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3508  tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3509 
3510  list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3511 
3512  tx->num_txq = tx_cfg->num_txq;
3513 
3514  tx->flags = 0;
3515  if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3516  switch (tx->type) {
3517  case BNA_TX_T_REGULAR:
3518  if (!(tx->bna->tx_mod.flags &
3521  break;
3522  case BNA_TX_T_LOOPBACK:
3523  if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3525  break;
3526  }
3527  }
3528 
3529  /* TxQ */
3530 
3531  i = 0;
3532  page_idx = 0;
3533  list_for_each(qe, &tx->txq_q) {
3534  txq = (struct bna_txq *)qe;
3535  txq->tcb = (struct bna_tcb *)
3536  res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3537  txq->tx_packets = 0;
3538  txq->tx_bytes = 0;
3539 
3540  /* IB */
3541  txq->ib.ib_seg_host_addr.lsb =
3542  res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3543  txq->ib.ib_seg_host_addr.msb =
3544  res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3545  txq->ib.ib_seg_host_addr_kva =
3546  res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3547  txq->ib.intr_type = intr_info->intr_type;
3548  txq->ib.intr_vector = (intr_info->num == 1) ?
3549  intr_info->idl[0].vector :
3550  intr_info->idl[i].vector;
3551  if (intr_info->intr_type == BNA_INTR_T_INTX)
3552  txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3553  txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3554  txq->ib.interpkt_timeo = 0; /* Not used */
3555  txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3556 
3557  /* TCB */
3558 
3559  txq->tcb->q_depth = tx_cfg->txq_depth;
3560  txq->tcb->unmap_q = (void *)
3561  res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3562  txq->tcb->hw_consumer_index =
3563  (u32 *)txq->ib.ib_seg_host_addr_kva;
3564  txq->tcb->i_dbell = &txq->ib.door_bell;
3565  txq->tcb->intr_type = txq->ib.intr_type;
3566  txq->tcb->intr_vector = txq->ib.intr_vector;
3567  txq->tcb->txq = txq;
3568  txq->tcb->bnad = bnad;
3569  txq->tcb->id = i;
3570 
3571  /* QPT, SWQPT, Pages */
3572  bna_txq_qpt_setup(txq, page_count, page_size,
3573  &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3574  &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3575  &res_info[BNA_TX_RES_MEM_T_PAGE].
3576  res_u.mem_info.mdl[page_idx]);
3577  txq->tcb->page_idx = page_idx;
3578  txq->tcb->page_count = page_count;
3579  page_idx += page_count;
3580 
3581  /* Callback to bnad for setting up TCB */
3582  if (tx->tcb_setup_cbfn)
3583  (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3584 
3585  if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3586  txq->priority = txq->tcb->id;
3587  else
3588  txq->priority = tx_mod->default_prio;
3589 
3590  i++;
3591  }
3592 
3593  tx->txf_vlan_id = 0;
3594 
3595  bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3596 
3597  tx_mod->rid_mask |= (1 << tx->rid);
3598 
3599  return tx;
3600 
3601 err_return:
3602  bna_tx_free(tx);
3603  return NULL;
3604 }
3605 
3606 void
3608 {
3609  struct bna_txq *txq;
3610  struct list_head *qe;
3611 
3612  list_for_each(qe, &tx->txq_q) {
3613  txq = (struct bna_txq *)qe;
3614  if (tx->tcb_destroy_cbfn)
3615  (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3616  }
3617 
3618  tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3619  bna_tx_free(tx);
3620 }
3621 
3622 void
3624 {
3625  if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3626  return;
3627 
3628  tx->flags |= BNA_TX_F_ENABLED;
3629 
3630  if (tx->flags & BNA_TX_F_ENET_STARTED)
3632 }
3633 
3634 void
3636  void (*cbfn)(void *, struct bna_tx *))
3637 {
3638  if (type == BNA_SOFT_CLEANUP) {
3639  (*cbfn)(tx->bna->bnad, tx);
3640  return;
3641  }
3642 
3643  tx->stop_cbfn = cbfn;
3644  tx->stop_cbarg = tx->bna->bnad;
3645 
3646  tx->flags &= ~BNA_TX_F_ENABLED;
3647 
3649 }
3650 
3651 void
3653 {
3655 }
3656 
3657 static void
3658 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3659 {
3660  struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3661 
3662  bfa_wc_down(&tx_mod->tx_stop_wc);
3663 }
3664 
3665 static void
3666 bna_tx_mod_cb_tx_stopped_all(void *arg)
3667 {
3668  struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3669 
3670  if (tx_mod->stop_cbfn)
3671  tx_mod->stop_cbfn(&tx_mod->bna->enet);
3672  tx_mod->stop_cbfn = NULL;
3673 }
3674 
3675 void
3676 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3677  struct bna_res_info *res_info)
3678 {
3679  int i;
3680 
3681  tx_mod->bna = bna;
3682  tx_mod->flags = 0;
3683 
3684  tx_mod->tx = (struct bna_tx *)
3685  res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3686  tx_mod->txq = (struct bna_txq *)
3687  res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3688 
3689  INIT_LIST_HEAD(&tx_mod->tx_free_q);
3690  INIT_LIST_HEAD(&tx_mod->tx_active_q);
3691 
3692  INIT_LIST_HEAD(&tx_mod->txq_free_q);
3693 
3694  for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3695  tx_mod->tx[i].rid = i;
3696  bfa_q_qe_init(&tx_mod->tx[i].qe);
3697  list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3698  bfa_q_qe_init(&tx_mod->txq[i].qe);
3699  list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3700  }
3701 
3702  tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3703  tx_mod->default_prio = 0;
3705  tx_mod->iscsi_prio = -1;
3706 }
3707 
3708 void
3710 {
3711  struct list_head *qe;
3712  int i;
3713 
3714  i = 0;
3715  list_for_each(qe, &tx_mod->tx_free_q)
3716  i++;
3717 
3718  i = 0;
3719  list_for_each(qe, &tx_mod->txq_free_q)
3720  i++;
3721 
3722  tx_mod->bna = NULL;
3723 }
3724 
3725 void
3726 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3727 {
3728  struct bna_tx *tx;
3729  struct list_head *qe;
3730 
3731  tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3732  if (type == BNA_TX_T_LOOPBACK)
3733  tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3734 
3735  list_for_each(qe, &tx_mod->tx_active_q) {
3736  tx = (struct bna_tx *)qe;
3737  if (tx->type == type)
3738  bna_tx_start(tx);
3739  }
3740 }
3741 
3742 void
3743 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3744 {
3745  struct bna_tx *tx;
3746  struct list_head *qe;
3747 
3748  tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3749  tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3750 
3752 
3753  bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3754 
3755  list_for_each(qe, &tx_mod->tx_active_q) {
3756  tx = (struct bna_tx *)qe;
3757  if (tx->type == type) {
3758  bfa_wc_up(&tx_mod->tx_stop_wc);
3759  bna_tx_stop(tx);
3760  }
3761  }
3762 
3763  bfa_wc_wait(&tx_mod->tx_stop_wc);
3764 }
3765 
3766 void
3768 {
3769  struct bna_tx *tx;
3770  struct list_head *qe;
3771 
3772  tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3773  tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3774 
3775  list_for_each(qe, &tx_mod->tx_active_q) {
3776  tx = (struct bna_tx *)qe;
3777  bna_tx_fail(tx);
3778  }
3779 }
3780 
3781 void
3782 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3783 {
3784  struct bna_txq *txq;
3785  struct list_head *qe;
3786 
3787  list_for_each(qe, &tx->txq_q) {
3788  txq = (struct bna_txq *)qe;
3789  bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3790  }
3791 }