23 bna_ib_coalescing_timeo_set(
struct bna_ib *ib,
u8 coalescing_timeo)
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(
struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(
struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(
struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(
struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(
struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(
struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(
struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(
struct bna_rxf *rxf,
55 static int bna_rxf_promisc_cfg_reset(
struct bna_rxf *rxf,
57 static int bna_rxf_allmulti_cfg_reset(
struct bna_rxf *rxf,
74 bna_rxf_sm_stopped_entry(
struct bna_rxf *rxf)
119 bna_rxf_sm_paused_entry(
struct bna_rxf *rxf)
148 bna_rxf_sm_cfg_wait_entry(
struct bna_rxf *rxf)
150 if (!bna_rxf_cfg_apply(rxf)) {
165 bna_rxf_cfg_reset(rxf);
183 if (!bna_rxf_cfg_apply(rxf)) {
195 bna_rxf_sm_started_entry(
struct bna_rxf *rxf)
208 bna_rxf_cfg_reset(rxf);
218 if (!bna_rxf_fltr_clear(rxf))
230 bna_rxf_sm_fltr_clr_wait_entry(
struct bna_rxf *rxf)
239 bna_rxf_cfg_reset(rxf);
245 if (!bna_rxf_fltr_clear(rxf)) {
257 bna_rxf_sm_last_resp_wait_entry(
struct bna_rxf *rxf)
267 bna_rxf_cfg_reset(rxf);
354 bna_bfi_rx_vlan_filter_set(
struct bna_rxf *rxf,
u8 block_idx)
379 bna_bfi_vlan_strip_enable(
struct bna_rxf *rxf)
394 bna_bfi_rit_cfg(
struct bna_rxf *rxf)
410 bna_bfi_rss_cfg(
struct bna_rxf *rxf)
430 bna_bfi_rss_enable(
struct bna_rxf *rxf)
467 bna_rxf_mchandle_get(
struct bna_rxf *rxf,
int handle)
474 if (mchandle->
handle == handle)
482 bna_rxf_mchandle_attach(
struct bna_rxf *rxf,
u8 *mac_addr,
int handle)
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle ==
NULL) {
507 if (mchandle ==
NULL)
511 if (mchandle->
refcnt == 0) {
513 bna_bfi_mcast_del_req(rxf, mchandle->
handle);
526 bna_rxf_mcast_cfg_apply(
struct bna_rxf *rxf)
549 bna_bfi_mcast_add_req(rxf, mac);
557 bna_rxf_vlan_cfg_apply(
struct bna_rxf *rxf)
559 u8 vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
566 vlan_pending_bitmask >>= 1;
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
608 bna_rxf_rss_cfg_apply(
struct bna_rxf *rxf)
613 bna_bfi_rit_cfg(rxf);
619 bna_bfi_rss_cfg(rxf);
625 bna_bfi_rss_enable(rxf);
634 bna_rxf_cfg_apply(
struct bna_rxf *rxf)
636 if (bna_rxf_ucast_cfg_apply(rxf))
639 if (bna_rxf_mcast_cfg_apply(rxf))
642 if (bna_rxf_promisc_cfg_apply(rxf))
645 if (bna_rxf_allmulti_cfg_apply(rxf))
648 if (bna_rxf_vlan_cfg_apply(rxf))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
654 if (bna_rxf_rss_cfg_apply(rxf))
662 bna_rxf_fltr_clear(
struct bna_rxf *rxf)
680 bna_rxf_cfg_reset(
struct bna_rxf *rxf)
691 bna_rit_init(
struct bna_rxf *rxf,
int rit_size)
722 bna_rxf_mchandle_attach(rxf, (
u8 *)&req->
mac_addr,
728 bna_rxf_init(
struct bna_rxf *rxf,
774 bna_rxf_uninit(
struct bna_rxf *rxf)
802 if (rxf->
rx->bna->promisc_rid == rxf->
rx->rid)
804 if (rxf->
rx->bna->default_mode_rid == rxf->
rx->rid)
816 bna_rx_cb_rxf_started(
struct bna_rx *rx)
822 bna_rxf_start(
struct bna_rxf *rxf)
830 bna_rx_cb_rxf_stopped(
struct bna_rx *rx)
836 bna_rxf_stop(
struct bna_rxf *rxf)
844 bna_rxf_fail(
struct bna_rxf *rxf)
884 cbfn(rx->
bna->bnad, rx);
909 struct list_head *qe;
915 INIT_LIST_HEAD(&list_head);
916 for (i = 0, mcaddr = mclist; i <
count; i++) {
944 while (!list_empty(&list_head)) {
958 while (!list_empty(&list_head)) {
999 bna_rxf_ucast_cfg_apply(
struct bna_rxf *rxf)
1052 bna_bfi_ucast_req(rxf, mac,
1066 bna_bfi_ucast_req(rxf, mac,
1086 bna_rxf_promisc_cfg_apply(
struct bna_rxf *rxf)
1116 struct bna *bna = rxf->
rx->bna;
1146 bna_rxf_allmulti_cfg_apply(
struct bna_rxf *rxf)
1200 bna_rxf_promisc_enable(
struct bna_rxf *rxf)
1202 struct bna *bna = rxf->
rx->bna;
1226 bna_rxf_promisc_disable(
struct bna_rxf *rxf)
1228 struct bna *bna = rxf->
rx->bna;
1252 bna_rxf_allmulti_enable(
struct bna_rxf *rxf)
1276 bna_rxf_allmulti_disable(
struct bna_rxf *rxf)
1300 bna_rxf_vlan_strip_cfg_apply(
struct bna_rxf *rxf)
1304 bna_bfi_vlan_strip_enable(rxf);
1313 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1316 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1317 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1319 #define call_rx_stop_cbfn(rx) \
1321 if ((rx)->stop_cbfn) { \
1322 void (*cbfn)(void *, struct bna_rx *); \
1324 cbfn = (rx)->stop_cbfn; \
1325 cbarg = (rx)->stop_cbarg; \
1326 (rx)->stop_cbfn = NULL; \
1327 (rx)->stop_cbarg = NULL; \
1332 #define call_rx_stall_cbfn(rx) \
1334 if ((rx)->rx_stall_cbfn) \
1335 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1340 struct bna_dma_addr cur_q_addr = \
1341 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1342 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1343 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1344 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1345 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1346 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1347 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1350 static void bna_bfi_rx_enet_start(
struct bna_rx *rx);
1351 static void bna_rx_enet_stop(
struct bna_rx *rx);
1352 static void bna_rx_mod_cb_rx_stopped(
void *
arg,
struct bna_rx *rx);
1373 static void bna_rx_sm_stopped_entry(
struct bna_rx *rx)
1378 static void bna_rx_sm_stopped(
struct bna_rx *rx,
1400 static void bna_rx_sm_start_wait_entry(
struct bna_rx *rx)
1402 bna_bfi_rx_enet_start(rx);
1421 bna_rx_enet_stop(rx);
1430 static void bna_rx_sm_start_wait(
struct bna_rx *rx,
1452 static void bna_rx_sm_rxf_start_wait_entry(
struct bna_rx *rx)
1455 bna_rxf_start(&rx->
rxf);
1469 bna_rxf_fail(&rx->
rxf);
1475 bna_rxf_stop(&rx->
rxf);
1481 bna_rx_enet_stop(rx);
1500 rxp = (
struct bna_rxp *)qe_rxp;
1514 bna_rxf_stop(&rx->
rxf);
1520 bna_rxf_fail(&rx->
rxf);
1531 static void bna_rx_sm_rxf_start_wait(
struct bna_rx *rx,
1541 bna_rxf_fail(&rx->
rxf);
1581 bna_rx_sm_failed_entry(
struct bna_rx *rx)
1613 bna_rx_sm_quiesce_wait_entry(
struct bna_rx *rx)
1640 bna_bfi_rx_enet_start(
struct bna_rx *rx)
1650 cfg_req->
mh.num_entries =
htons(
1657 rxp = (
struct bna_rxp *)rxp_qe;
1660 switch (rxp->
type) {
1666 cfg_req->
q_cfg[
i].qs.rx_buffer_size =
1676 cfg_req->
q_cfg[
i].ql.rx_buffer_size =
1687 cfg_req->
q_cfg[
i].ib.index_addr.a32.addr_lo =
1688 rxp->
cq.ib.ib_seg_host_addr.lsb;
1689 cfg_req->
q_cfg[
i].ib.index_addr.a32.addr_hi =
1690 rxp->
cq.ib.ib_seg_host_addr.msb;
1691 cfg_req->
q_cfg[
i].ib.intr.msix_index =
1702 cfg_req->
ib_cfg.coalescing_timeout =
1704 cfg_req->
ib_cfg.inter_pkt_timeout =
1706 cfg_req->
ib_cfg.inter_pkt_count = (
u8)rxp->
cq.ib.interpkt_count;
1708 switch (rxp->
type) {
1716 cfg_req->
rx_cfg.hds.force_offset = rx->
hds_cfg.forced_offset;
1717 cfg_req->
rx_cfg.hds.max_header_size = rx->
hds_cfg.forced_offset;
1727 cfg_req->
rx_cfg.strip_vlan = rx->
rxf.vlan_strip_status;
1735 bna_bfi_rx_enet_stop(
struct bna_rx *rx)
1749 bna_rx_enet_stop(
struct bna_rx *rx)
1756 rxp = (
struct bna_rxp *)qe_rxp;
1760 bna_bfi_rx_enet_stop(rx);
1840 rx = (
struct bna_rx *)qe;
1857 if (((
struct bna_rx *)qe)->rid < rx->rid)
1863 if (prev_qe ==
NULL) {
1884 switch (rxp->
type) {
1903 bna_rxq_qpt_setup(
struct bna_rxq *rxq,
1913 rxq->
qpt.hw_qpt_ptr.lsb = qpt_mem->
dma.lsb;
1914 rxq->
qpt.hw_qpt_ptr.msb = qpt_mem->
dma.msb;
1915 rxq->
qpt.kv_qpt_ptr = qpt_mem->
kva;
1916 rxq->
qpt.page_count = page_count;
1919 rxq->
rcb->sw_qpt = (
void **) swqpt_mem->
kva;
1921 for (i = 0; i < rxq->
qpt.page_count; i++) {
1922 rxq->
rcb->sw_qpt[
i] = page_mem[
i].
kva;
1924 page_mem[
i].
dma.lsb;
1926 page_mem[
i].
dma.msb;
1931 bna_rxp_cqpt_setup(
struct bna_rxp *rxp,
1940 rxp->
cq.qpt.hw_qpt_ptr.lsb = qpt_mem->
dma.lsb;
1941 rxp->
cq.qpt.hw_qpt_ptr.msb = qpt_mem->
dma.msb;
1942 rxp->
cq.qpt.kv_qpt_ptr = qpt_mem->
kva;
1943 rxp->
cq.qpt.page_count = page_count;
1946 rxp->
cq.ccb->sw_qpt = (
void **) swqpt_mem->
kva;
1948 for (i = 0; i < rxp->
cq.qpt.page_count; i++) {
1949 rxp->
cq.ccb->sw_qpt[
i] = page_mem[
i].
kva;
1952 page_mem[
i].
dma.lsb;
1954 page_mem[
i].
dma.msb;
1959 bna_rx_mod_cb_rx_stopped(
void *
arg,
struct bna_rx *rx)
1967 bna_rx_mod_cb_rx_stopped_all(
void *
arg)
1977 bna_rx_start(
struct bna_rx *rx)
1985 bna_rx_stop(
struct bna_rx *rx)
1989 bna_rx_mod_cb_rx_stopped(&rx->
bna->rx_mod, rx);
1991 rx->
stop_cbfn = bna_rx_mod_cb_rx_stopped;
1998 bna_rx_fail(
struct bna_rx *rx)
2016 rx = (
struct bna_rx *)qe;
2017 if (rx->
type == type)
2033 bfa_wc_init(&rx_mod->
rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2036 rx = (
struct bna_rx *)qe;
2037 if (rx->
type == type) {
2056 rx = (
struct bna_rx *)qe;
2089 for (index = 0; index < bna->
ioceth.attr.num_rxp; index++) {
2093 INIT_LIST_HEAD(&rx_ptr->
rxp_q);
2104 for (index = 0; index < bna->
ioceth.attr.num_rxp; index++) {
2112 for (index = 0; index < (bna->
ioceth.attr.num_rxp * 2); index++) {
2158 rxp = (
struct bna_rxp *)rxp_qe;
2162 rxp->
cq.ccb->i_dbell->doorbell_addr =
2163 rx->
bna->pcidev.pci_bar_kva
2167 rx->
bna->pcidev.pci_bar_kva
2172 rx->
bna->pcidev.pci_bar_kva
2178 (*rxp->
cq.ccb->hw_producer_index) = 0;
2179 rxp->
cq.ccb->producer_index = 0;
2180 q0->
rcb->producer_index = q0->
rcb->consumer_index = 0;
2182 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2197 u32 cq_size, hq_size, dq_size;
2198 u32 cpage_count, hpage_count, dpage_count;
2206 cq_depth = dq_depth + hq_depth;
2247 mem_info->
len = cpage_count *
sizeof(
void *);
2265 mem_info->
len = dpage_count *
sizeof(
void *);
2283 mem_info->
len = hpage_count *
sizeof(
void *);
2290 mem_info->
num = (hpage_count ? (hpage_count * q_cfg->
num_paths) : 0);
2335 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
2336 int dpage_count, hpage_count, rcb_idx;
2338 if (!bna_rx_res_check(rx_mod, rx_cfg))
2364 rx = bna_rx_get(rx_mod, rx_cfg->
rx_type);
2367 INIT_LIST_HEAD(&rx->
rxp_q);
2384 if (!(rx->
bna->rx_mod.flags &
2396 for (i = 0, rcb_idx = 0; i < rx->
num_paths; i++) {
2397 rxp = bna_rxp_get(rx_mod);
2403 q0 = bna_rxq_get(rx_mod);
2407 q1 = bna_rxq_get(rx_mod);
2409 if (1 == intr_info->
num)
2416 rxp->
cq.ib.ib_seg_host_addr.lsb =
2418 rxp->
cq.ib.ib_seg_host_addr.msb =
2420 rxp->
cq.ib.ib_seg_host_addr_kva =
2424 rxp->
cq.ib.intr_vector = rxp->
vector;
2426 rxp->
cq.ib.intr_vector = (1 << rxp->
vector);
2431 bna_rxp_add_rxqs(rxp, q0, q1);
2438 q0->
rcb = (
struct bna_rcb *) rcb_mem[rcb_idx].kva;
2439 q0->
rcb->unmap_q = (
void *)unmapq_mem[rcb_idx].kva;
2448 bna_rxq_qpt_setup(q0, rxp, dpage_count,
PAGE_SIZE,
2449 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2450 q0->
rcb->page_idx = dpage_idx;
2451 q0->
rcb->page_count = dpage_count;
2452 dpage_idx += dpage_count;
2463 q1->
rcb = (
struct bna_rcb *) rcb_mem[rcb_idx].kva;
2464 q1->
rcb->unmap_q = (
void *)unmapq_mem[rcb_idx].kva;
2476 bna_rxq_qpt_setup(q1, rxp, hpage_count,
PAGE_SIZE,
2477 &hqpt_mem[i], &hsqpt_mem[i],
2478 &hpage_mem[hpage_idx]);
2479 q1->
rcb->page_idx = hpage_idx;
2480 q1->
rcb->page_count = hpage_count;
2481 hpage_idx += hpage_count;
2489 rxp->
cq.ccb = (
struct bna_ccb *) ccb_mem[i].kva;
2490 rxp->
cq.ccb->q_depth = rx_cfg->
q_depth +
2493 rxp->
cq.ccb->cq = &rxp->
cq;
2494 rxp->
cq.ccb->rcb[0] = q0->
rcb;
2495 q0->
rcb->ccb = rxp->
cq.ccb;
2497 rxp->
cq.ccb->rcb[1] = q1->
rcb;
2498 q1->
rcb->ccb = rxp->
cq.ccb;
2500 rxp->
cq.ccb->hw_producer_index =
2501 (
u32 *)rxp->
cq.ib.ib_seg_host_addr_kva;
2502 rxp->
cq.ccb->i_dbell = &rxp->
cq.ib.door_bell;
2503 rxp->
cq.ccb->intr_type = rxp->
cq.ib.intr_type;
2504 rxp->
cq.ccb->intr_vector = rxp->
cq.ib.intr_vector;
2505 rxp->
cq.ccb->rx_coalescing_timeo =
2506 rxp->
cq.ib.coalescing_timeo;
2507 rxp->
cq.ccb->pkt_rate.small_pkt_cnt = 0;
2508 rxp->
cq.ccb->pkt_rate.large_pkt_cnt = 0;
2509 rxp->
cq.ccb->bnad = bna->
bnad;
2510 rxp->
cq.ccb->id = i;
2512 bna_rxp_cqpt_setup(rxp, page_count,
PAGE_SIZE,
2513 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2514 rxp->
cq.ccb->page_idx = cpage_idx;
2524 bna_rxf_init(&rx->
rxf, rx, rx_cfg, res_info);
2542 bna_rxf_uninit(&rx->
rxf);
2544 while (!list_empty(&rx->
rxp_q)) {
2552 bna_rxq_put(rx_mod, q0);
2560 bna_rxq_put(rx_mod, q1);
2569 bna_rxp_put(rx_mod, rxp);
2573 if (qe == &rx->
qe) {
2584 bna_rx_put(rx_mod, rx);
2600 void (*cbfn)(
void *,
struct bna_rx *))
2604 (*cbfn)(rx->
bna->bnad,
rx);
2624 void (*cbfn)(
struct bnad *,
struct bna_rx *))
2627 int need_hw_config = 0;
2634 (rx->
bna->promisc_rid != rxf->
rx->rid))
2649 (rx->
bna->default_mode_rid != rxf->
rx->rid)) {
2661 if (bna_rxf_promisc_enable(rxf))
2664 if (bna_rxf_promisc_disable(rxf))
2669 if (bna_rxf_allmulti_enable(rxf))
2672 if (bna_rxf_allmulti_disable(rxf))
2678 if (need_hw_config) {
2683 (*cbfn)(rx->
bna->bnad,
rx);
2711 rxp->
cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2712 bna_ib_coalescing_timeo_set(&rxp->
cq.ib, coalescing_timeo);
2729 struct bna *bna = ccb->
cq->rx->bna;
2731 u32 pkt_rt, small_rt, large_rt;
2732 u8 coalescing_timeo;
2734 if ((ccb->
pkt_rate.small_pkt_cnt == 0) &&
2735 (ccb->
pkt_rate.large_pkt_cnt == 0))
2740 small_rt = ccb->
pkt_rate.small_pkt_cnt;
2741 large_rt = ccb->
pkt_rate.large_pkt_cnt;
2743 pkt_rt = small_rt + large_rt;
2762 if (small_rt > (large_rt << 1))
2770 coalescing_timeo = bna->
rx_mod.dim_vector[
load][bias];
2774 bna_ib_coalescing_timeo_set(&ccb->
cq->ib, coalescing_timeo);
2790 #define call_tx_stop_cbfn(tx) \
2792 if ((tx)->stop_cbfn) { \
2793 void (*cbfn)(void *, struct bna_tx *); \
2795 cbfn = (tx)->stop_cbfn; \
2796 cbarg = (tx)->stop_cbarg; \
2797 (tx)->stop_cbfn = NULL; \
2798 (tx)->stop_cbarg = NULL; \
2799 cbfn(cbarg, (tx)); \
2803 #define call_tx_prio_change_cbfn(tx) \
2805 if ((tx)->prio_change_cbfn) { \
2806 void (*cbfn)(struct bnad *, struct bna_tx *); \
2807 cbfn = (tx)->prio_change_cbfn; \
2808 (tx)->prio_change_cbfn = NULL; \
2809 cbfn((tx)->bna->bnad, (tx)); \
2813 static void bna_tx_mod_cb_tx_stopped(
void *
tx_mod,
struct bna_tx *
tx);
2814 static void bna_bfi_tx_enet_start(
struct bna_tx *
tx);
2815 static void bna_tx_enet_stop(
struct bna_tx *
tx);
2843 bna_tx_sm_stopped_entry(
struct bna_tx *
tx)
2878 bna_tx_sm_start_wait_entry(
struct bna_tx *tx)
2880 bna_bfi_tx_enet_start(tx);
2920 bna_tx_sm_started_entry(
struct bna_tx *tx)
2942 bna_tx_enet_stop(tx);
2962 bna_tx_sm_stop_wait_entry(
struct bna_tx *tx)
2981 bna_tx_enet_stop(tx);
2995 bna_tx_sm_cleanup_wait_entry(
struct bna_tx *tx)
3019 bna_tx_sm_prio_stop_wait_entry(
struct bna_tx *tx)
3022 bna_tx_enet_stop(tx);
3054 bna_tx_sm_prio_cleanup_wait_entry(
struct bna_tx *tx)
3087 bna_tx_sm_failed_entry(
struct bna_tx *tx)
3117 bna_tx_sm_quiesce_wait_entry(
struct bna_tx *tx)
3147 bna_bfi_tx_enet_start(
struct bna_tx *tx)
3156 cfg_req->
mh.num_entries =
htons(
3168 cfg_req->
q_cfg[
i].ib.index_addr.a32.addr_lo =
3169 txq->
ib.ib_seg_host_addr.lsb;
3170 cfg_req->
q_cfg[
i].ib.index_addr.a32.addr_hi =
3171 txq->
ib.ib_seg_host_addr.msb;
3172 cfg_req->
q_cfg[
i].ib.intr.msix_index =
3182 cfg_req->
ib_cfg.coalescing_timeout =
3184 cfg_req->
ib_cfg.inter_pkt_timeout =
3186 cfg_req->
ib_cfg.inter_pkt_count = (
u8)txq->
ib.interpkt_count;
3199 bna_bfi_tx_enet_stop(
struct bna_tx *tx)
3213 bna_tx_enet_stop(
struct bna_tx *tx)
3224 bna_bfi_tx_enet_stop(tx);
3228 bna_txq_qpt_setup(
struct bna_txq *txq,
int page_count,
int page_size,
3235 txq->
qpt.hw_qpt_ptr.lsb = qpt_mem->
dma.lsb;
3236 txq->
qpt.hw_qpt_ptr.msb = qpt_mem->
dma.msb;
3237 txq->
qpt.kv_qpt_ptr = qpt_mem->
kva;
3238 txq->
qpt.page_count = page_count;
3241 txq->
tcb->sw_qpt = (
void **) swqpt_mem->
kva;
3243 for (i = 0; i < page_count; i++) {
3244 txq->
tcb->sw_qpt[
i] = page_mem[
i].
kva;
3247 page_mem[
i].
dma.lsb;
3249 page_mem[
i].
dma.msb;
3266 tx = (
struct bna_tx *)qe;
3274 bna_tx_free(
struct bna_tx *tx)
3281 while (!list_empty(&tx->
txq_q)) {
3290 if (qe == &tx->
qe) {
3302 if (((
struct bna_tx *)qe)->rid < tx->rid)
3309 if (prev_qe ==
NULL) {
3325 bna_tx_start(
struct bna_tx *tx)
3333 bna_tx_stop(
struct bna_tx *tx)
3335 tx->
stop_cbfn = bna_tx_mod_cb_tx_stopped;
3343 bna_tx_fail(
struct bna_tx *tx)
3367 txq->
tcb->i_dbell->doorbell_addr =
3368 tx->
bna->pcidev.pci_bar_kva
3371 tx->
bna->pcidev.pci_bar_kva
3376 (*txq->
tcb->hw_consumer_index) = 0;
3377 txq->
tcb->producer_index = txq->
tcb->consumer_index = 0;
3396 tx = (
struct bna_tx *)qe;
3412 mem_info->
num = num_txq;
3422 mem_info->
num = num_txq;
3427 mem_info->
len = page_count *
sizeof(
void *);
3428 mem_info->
num = num_txq;
3434 mem_info->
num = num_txq * page_count;
3440 mem_info->
num = num_txq;
3478 tx = bna_tx_get(tx_mod, tx_cfg->
tx_type);
3486 INIT_LIST_HEAD(&tx->
txq_q);
3487 for (i = 0; i < tx_cfg->
num_txq; i++) {
3518 if (!(tx->
bna->tx_mod.flags &
3541 txq->
ib.ib_seg_host_addr.lsb =
3543 txq->
ib.ib_seg_host_addr.msb =
3545 txq->
ib.ib_seg_host_addr_kva =
3548 txq->
ib.intr_vector = (intr_info->
num == 1) ?
3549 intr_info->
idl[0].vector :
3550 intr_info->
idl[
i].vector;
3552 txq->
ib.intr_vector = (1 << txq->
ib.intr_vector);
3554 txq->
ib.interpkt_timeo = 0;
3560 txq->
tcb->unmap_q = (
void *)
3562 txq->
tcb->hw_consumer_index =
3563 (
u32 *)txq->
ib.ib_seg_host_addr_kva;
3564 txq->
tcb->i_dbell = &txq->
ib.door_bell;
3565 txq->
tcb->intr_type = txq->
ib.intr_type;
3566 txq->
tcb->intr_vector = txq->
ib.intr_vector;
3572 bna_txq_qpt_setup(txq, page_count, page_size,
3576 res_u.mem_info.mdl[page_idx]);
3618 tx->
bna->tx_mod.rid_mask &= ~(1 << tx->
rid);
3636 void (*cbfn)(
void *,
struct bna_tx *))
3639 (*cbfn)(tx->
bna->bnad,
tx);
3658 bna_tx_mod_cb_tx_stopped(
void *arg,
struct bna_tx *tx)
3666 bna_tx_mod_cb_tx_stopped_all(
void *arg)
3694 for (i = 0; i < bna->
ioceth.attr.num_txq; i++) {
3695 tx_mod->
tx[
i].rid =
i;
3736 tx = (
struct bna_tx *)qe;
3737 if (tx->
type == type)
3753 bfa_wc_init(&tx_mod->
tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3756 tx = (
struct bna_tx *)qe;
3757 if (tx->
type == type) {
3776 tx = (
struct bna_tx *)qe;
3789 bna_ib_coalescing_timeo_set(&txq->
ib, coalescing_timeo);