18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
39 static uint bnad_msix_disable;
43 static uint bnad_ioc_auto_recover = 1;
47 static uint bna_debugfs_enable = 1;
50 " Range[false:0|true:1]");
57 static struct mutex bnad_list_mutex;
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
68 #define BNAD_GET_MBOX_IRQ(_bnad) \
69 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
70 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71 ((_bnad)->pcidev->irq))
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
75 (_res_info)->res_type = BNA_RES_T_MEM; \
76 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77 (_res_info)->res_u.mem_info.num = (_num); \
78 (_res_info)->res_u.mem_info.len = \
79 sizeof(struct bnad_unmap_q) + \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
93 bnad_remove_from_list(
struct bnad *
bnad)
107 unsigned int wi_range, wis = 0, ccb_prod = 0;
113 for (i = 0; i < ccb->
q_depth; i++) {
116 next_cmpl = cmpl + 1;
121 next_cmpl, wi_range);
140 for (j = 0; j <
frag; j++) {
142 skb_frag_size(&skb_shinfo(skb)->frags[j]),
157 bnad_txq_cleanup(
struct bnad *bnad,
168 for (q = 0; q < unmap_q->
q_depth; q++) {
169 skb = unmap_array[
q].
skb;
174 unmap_cons = bnad_pci_unmap_skb(&bnad->
pcidev->dev, unmap_array,
175 unmap_cons, unmap_q->
q_depth, skb,
176 skb_shinfo(skb)->nr_frags);
190 bnad_txcmpl_process(
struct bnad *bnad,
193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
194 u16 wis, updated_hw_cons;
206 updated_hw_cons, tcb->
q_depth);
213 prefetch(&unmap_array[unmap_cons + 1]);
215 skb = unmap_array[unmap_cons].
skb;
218 sent_bytes += skb->
len;
221 unmap_cons = bnad_pci_unmap_skb(&bnad->
pcidev->dev, unmap_array,
222 unmap_cons, unmap_q->
q_depth, skb,
223 skb_shinfo(skb)->nr_frags);
232 tcb->
txq->tx_packets += sent_packets;
233 tcb->
txq->tx_bytes += sent_bytes;
239 bnad_tx_complete(
struct bnad *bnad,
struct bna_tcb *tcb)
247 sent = bnad_txcmpl_process(bnad, tcb);
249 if (netif_queue_stopped(netdev) &&
250 netif_carrier_ok(netdev) &&
254 netif_wake_queue(netdev);
271 bnad_msix_tx(
int irq,
void *
data)
274 struct bnad *bnad = tcb->
bnad;
276 bnad_tx_complete(bnad, tcb);
282 bnad_rcb_cleanup(
struct bnad *bnad,
struct bna_rcb *rcb)
294 bnad_rxq_cleanup(
struct bnad *bnad,
struct bna_rcb *rcb)
303 for (unmap_cons = 0; unmap_cons < unmap_q->
q_depth; unmap_cons++) {
304 skb = unmap_array[unmap_cons].
skb;
307 unmap_array[unmap_cons].
skb =
NULL;
311 rcb->
rxq->buffer_size,
315 bnad_rcb_cleanup(bnad, rcb);
319 bnad_rxq_post(
struct bnad *bnad,
struct bna_rcb *rcb)
321 u16 to_alloc, alloced, unmap_prod, wi_range;
341 skb = netdev_alloc_skb_ip_align(bnad->
netdev,
342 rcb->
rxq->buffer_size);
345 rcb->
rxq->rxbuf_alloc_failed++;
348 unmap_array[unmap_prod].
skb =
skb;
350 rcb->
rxq->buffer_size,
373 bnad_refill_rxq(
struct bnad *bnad,
struct bna_rcb *rcb)
380 bnad_rxq_post(bnad, rcb);
387 bnad_cq_process(
struct bnad *bnad,
struct bna_ccb *ccb,
int budget)
391 unsigned int wi_range,
packets = 0, wis = 0;
405 BUG_ON(!(wi_range <= ccb->q_depth));
406 while (cmpl->
valid && packets < budget) {
419 skb = unmap_array[unmap_cons].
skb;
421 unmap_array[unmap_cons].
skb =
NULL;
425 rcb->
rxq->buffer_size,
434 next_cmpl = cmpl + 1;
439 next_cmpl, wi_range);
440 BUG_ON(!(wi_range <= ccb->q_depth));
450 rcb->
rxq->rx_packets_with_error++;
464 skb_checksum_none_assert(skb);
466 rcb->
rxq->rx_packets++;
467 rcb->
rxq->rx_bytes += skb->
len;
488 bnad_refill_rxq(bnad, ccb->
rcb[0]);
490 bnad_refill_rxq(bnad, ccb->
rcb[1]);
498 bnad_netif_rx_schedule_poll(
struct bnad *bnad,
struct bna_ccb *ccb)
503 if (
likely(napi_schedule_prep(napi))) {
511 bnad_msix_rx(
int irq,
void *
data)
517 bnad_netif_rx_schedule_poll(ccb->
bnad, ccb);
527 bnad_msix_mbox_handler(
int irq,
void *data)
531 struct bnad *bnad = (
struct bnad *)data;
535 spin_unlock_irqrestore(&bnad->bna_lock, flags);
544 spin_unlock_irqrestore(&bnad->bna_lock, flags);
550 bnad_isr(
int irq,
void *data)
555 struct bnad *bnad = (
struct bnad *)data;
562 spin_unlock_irqrestore(&bnad->bna_lock, flags);
569 spin_unlock_irqrestore(&bnad->bna_lock, flags);
576 spin_unlock_irqrestore(&bnad->bna_lock, flags);
583 for (i = 0; i < bnad->
num_tx; i++) {
587 bnad_tx_complete(bnad, bnad->
tx_info[i].tcb[j]);
591 for (i = 0; i < bnad->
num_rx; i++) {
598 bnad_netif_rx_schedule_poll(bnad,
610 bnad_enable_mbox_irq(
struct bnad *bnad)
622 bnad_disable_mbox_irq(
struct bnad *bnad)
630 bnad_set_netdev_perm_addr(
struct bnad *bnad)
635 if (is_zero_ether_addr(netdev->
dev_addr))
645 bnad_enable_mbox_irq(bnad);
651 bnad_disable_mbox_irq(bnad);
676 bnad_cb_enet_disabled(
void *
arg)
678 struct bnad *bnad = (
struct bnad *)arg;
703 if (!netif_carrier_ok(bnad->
netdev)) {
709 for (tx_id = 0; tx_id < bnad->
num_tx; tx_id++) {
713 bnad->
tx_info[tx_id].tcb[tcb_id];
745 if (netif_carrier_ok(bnad->
netdev)) {
757 struct bnad *bnad = (
struct bnad *)arg;
763 bnad_cb_tcb_setup(
struct bnad *bnad,
struct bna_tcb *tcb)
769 tx_info->
tcb[tcb->
id] = tcb;
776 bnad_cb_tcb_destroy(
struct bnad *bnad,
struct bna_tcb *tcb)
786 bnad_cb_rcb_setup(
struct bnad *bnad,
struct bna_rcb *rcb)
796 bnad_cb_ccb_setup(
struct bnad *bnad,
struct bna_ccb *ccb)
806 bnad_cb_ccb_destroy(
struct bnad *bnad,
struct bna_ccb *ccb)
815 bnad_cb_tx_stall(
struct bnad *bnad,
struct bna_tx *
tx)
824 tcb = tx_info->
tcb[
i];
829 netif_stop_subqueue(bnad->
netdev, txq_id);
831 bnad->
netdev->name, txq_id);
836 bnad_cb_tx_resume(
struct bnad *bnad,
struct bna_tx *tx)
844 tcb = tx_info->
tcb[
i];
853 if (netif_carrier_ok(bnad->
netdev)) {
855 bnad->
netdev->name, txq_id);
856 netif_wake_subqueue(bnad->
netdev, txq_id);
868 bnad_set_netdev_perm_addr(bnad);
880 struct bnad *bnad =
NULL;
887 tcb = tx_info->
tcb[
i];
898 bnad_txq_cleanup(bnad, tcb);
916 spin_unlock_irqrestore(&bnad->bna_lock, flags);
921 bnad_cb_tx_cleanup(
struct bnad *bnad,
struct bna_tx *tx)
928 tcb = tx_info->
tcb[
i];
937 bnad_cb_rx_stall(
struct bnad *bnad,
struct bna_rx *
rx)
961 bnad_rx_cleanup(
void *work)
966 struct bnad *bnad =
NULL;
976 bnad = rx_ctrl->
ccb->bnad;
982 napi_disable(&rx_ctrl->
napi);
984 bnad_cq_cleanup(bnad, rx_ctrl->
ccb);
985 bnad_rxq_cleanup(bnad, rx_ctrl->
ccb->rcb[0]);
986 if (rx_ctrl->
ccb->rcb[1])
987 bnad_rxq_cleanup(bnad, rx_ctrl->
ccb->rcb[1]);
992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
996 bnad_cb_rx_cleanup(
struct bnad *bnad,
struct bna_rx *rx)
1019 bnad_cb_rx_post(
struct bnad *bnad,
struct bna_rx *rx)
1035 napi_enable(&rx_ctrl->
napi);
1051 bnad_rxq_post(bnad, rcb);
1060 bnad_cb_rx_disabled(
void *
arg,
struct bna_rx *rx)
1062 struct bnad *bnad = (
struct bnad *)arg;
1068 bnad_cb_rx_mcast_add(
struct bnad *bnad,
struct bna_rx *rx)
1081 if (!netif_running(bnad->
netdev) ||
1090 bnad_cb_enet_mtu_set(
struct bnad *bnad)
1109 bnad_mem_free(
struct bnad *bnad,
1118 for (i = 0; i < mem_info->
num; i++) {
1119 if (mem_info->
mdl[i].kva !=
NULL) {
1124 mem_info->
mdl[i].len,
1125 mem_info->
mdl[i].kva, dma_pa);
1135 bnad_mem_alloc(
struct bnad *bnad,
1141 if ((mem_info->
num == 0) || (mem_info->
len == 0)) {
1152 for (i = 0; i < mem_info->
num; i++) {
1153 mem_info->
mdl[
i].len = mem_info->
len;
1154 mem_info->
mdl[
i].kva =
1156 mem_info->
len, &dma_pa,
1159 if (mem_info->
mdl[i].kva ==
NULL)
1163 &(mem_info->
mdl[i].dma));
1166 for (i = 0; i < mem_info->
num; i++) {
1167 mem_info->
mdl[
i].len = mem_info->
len;
1168 mem_info->
mdl[
i].kva = kzalloc(mem_info->
len,
1170 if (mem_info->
mdl[i].kva ==
NULL)
1178 bnad_mem_free(bnad, mem_info);
1184 bnad_mbox_irq_free(
struct bnad *bnad)
1187 unsigned long flags;
1190 bnad_disable_mbox_irq(bnad);
1191 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1203 bnad_mbox_irq_alloc(
struct bnad *bnad)
1206 unsigned long irq_flags,
flags;
1221 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1250 int i, vector_start = 0;
1252 unsigned long flags;
1256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1260 intr_info->
idl = kcalloc(intr_info->
num,
1263 if (!intr_info->
idl)
1281 for (i = 0; i < intr_info->
num; i++)
1282 intr_info->
idl[i].vector = vector_start + i;
1286 intr_info->
idl = kcalloc(intr_info->
num,
1289 if (!intr_info->
idl)
1309 bnad_tx_msix_unregister(
struct bnad *bnad,
struct bnad_tx_info *tx_info,
1315 for (i = 0; i < num_txqs; i++) {
1319 vector_num = tx_info->
tcb[
i]->intr_vector;
1328 bnad_tx_msix_register(
struct bnad *bnad,
struct bnad_tx_info *tx_info,
1329 u32 tx_id,
int num_txqs)
1335 for (i = 0; i < num_txqs; i++) {
1336 vector_num = tx_info->
tcb[
i]->intr_vector;
1338 tx_id + tx_info->
tcb[i]->id);
1341 tx_info->
tcb[i]->name,
1351 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1359 bnad_rx_msix_unregister(
struct bnad *bnad,
struct bnad_rx_info *rx_info,
1365 for (i = 0; i < num_rxps; i++) {
1369 vector_num = rx_info->
rx_ctrl[
i].ccb->intr_vector;
1379 bnad_rx_msix_register(
struct bnad *bnad,
struct bnad_rx_info *rx_info,
1380 u32 rx_id,
int num_rxps)
1386 for (i = 0; i < num_rxps; i++) {
1387 vector_num = rx_info->
rx_ctrl[
i].ccb->intr_vector;
1390 rx_id + rx_info->
rx_ctrl[i].ccb->id);
1393 rx_info->
rx_ctrl[i].ccb->name,
1403 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1409 bnad_tx_res_free(
struct bnad *bnad,
struct bna_res_info *res_info)
1415 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1417 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1423 bnad_tx_res_alloc(
struct bnad *bnad,
struct bna_res_info *res_info,
1430 err = bnad_mem_alloc(bnad,
1431 &res_info[i].res_u.mem_info);
1434 &res_info[i].res_u.intr_info);
1441 bnad_tx_res_free(bnad, res_info);
1447 bnad_rx_res_free(
struct bnad *bnad,
struct bna_res_info *res_info)
1453 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1455 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1461 bnad_rx_res_alloc(
struct bnad *bnad,
struct bna_res_info *res_info,
1469 err = bnad_mem_alloc(bnad,
1470 &res_info[i].res_u.mem_info);
1473 &res_info[i].res_u.intr_info);
1480 bnad_rx_res_free(bnad, res_info);
1487 bnad_ioc_timeout(
unsigned long data)
1489 struct bnad *bnad = (
struct bnad *)data;
1490 unsigned long flags;
1494 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1498 bnad_ioc_hb_check(
unsigned long data)
1500 struct bnad *bnad = (
struct bnad *)data;
1501 unsigned long flags;
1505 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1509 bnad_iocpf_timeout(
unsigned long data)
1511 struct bnad *bnad = (
struct bnad *)data;
1512 unsigned long flags;
1516 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1520 bnad_iocpf_sem_timeout(
unsigned long data)
1522 struct bnad *bnad = (
struct bnad *)data;
1523 unsigned long flags;
1527 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1542 bnad_dim_timeout(
unsigned long data)
1544 struct bnad *bnad = (
struct bnad *)data;
1548 unsigned long flags;
1550 if (!netif_carrier_ok(bnad->
netdev))
1554 for (i = 0; i < bnad->
num_rx; i++) {
1570 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1575 bnad_stats_timeout(
unsigned long data)
1577 struct bnad *bnad = (
struct bnad *)data;
1578 unsigned long flags;
1580 if (!netif_running(bnad->
netdev) ||
1586 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1599 (
unsigned long)bnad);
1611 bnad_stats_timer_start(
struct bnad *bnad)
1613 unsigned long flags;
1618 (
unsigned long)bnad);
1622 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1630 bnad_stats_timer_stop(
struct bnad *bnad)
1633 unsigned long flags;
1638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1646 bnad_netdev_mc_list_get(
struct net_device *netdev,
u8 *mc_list)
1659 bnad_napi_poll_rx(
struct napi_struct *napi,
int budget)
1663 struct bnad *bnad = rx_ctrl->
bnad;
1668 if (!netif_carrier_ok(bnad->
netdev))
1671 rcvd = bnad_cq_process(bnad, rx_ctrl->
ccb, budget);
1686 #define BNAD_NAPI_POLL_QUOTA 64
1688 bnad_napi_add(
struct bnad *bnad,
u32 rx_id)
1695 rx_ctrl = &bnad->
rx_info[rx_id].rx_ctrl[
i];
1702 bnad_napi_delete(
struct bnad *bnad,
u32 rx_id)
1717 unsigned long flags;
1725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729 bnad_tx_msix_unregister(bnad, tx_info,
1734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1739 bnad_tx_res_free(bnad, res_info);
1754 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1755 .tx_stall_cbfn = bnad_cb_tx_stall,
1756 .tx_resume_cbfn = bnad_cb_tx_resume,
1757 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1761 unsigned long flags;
1763 tx_info->
tx_id = tx_id;
1775 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1784 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1792 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1802 err = bnad_tx_msix_register(bnad, tx_info,
1810 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1815 bnad_tx_res_free(bnad, res_info);
1822 bnad_init_rx_config(
struct bnad *bnad,
struct bna_rx_config *rx_config)
1838 sizeof(rx_config->
rss_config.toeplitz_hash_key));
1853 bnad_rx_ctrl_init(
struct bnad *bnad,
u32 rx_id)
1859 rx_info->
rx_ctrl[i].bnad = bnad;
1869 unsigned long flags;
1882 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1890 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1894 bnad_rx_msix_unregister(bnad, rx_info, rx_config->
num_paths);
1896 bnad_napi_delete(bnad, rx_id);
1903 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1905 bnad_rx_res_free(bnad, res_info);
1920 .rcb_destroy_cbfn =
NULL,
1921 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1923 .rx_stall_cbfn = bnad_cb_rx_stall,
1924 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1925 .rx_post_cbfn = bnad_cb_rx_post,
1928 unsigned long flags;
1930 rx_info->
rx_id = rx_id;
1933 bnad_init_rx_config(bnad, rx_config);
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1948 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1952 bnad_rx_ctrl_init(bnad, rx_id);
1960 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1964 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1973 bnad_napi_add(bnad, rx_id);
1977 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1997 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2026 for (i = 0; i < bnad->
num_rx; i++) {
2043 if (!is_valid_ether_addr(mac_addr))
2063 unsigned long flags;
2069 bnad_cb_rx_mcast_add);
2070 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2088 unsigned long flags;
2093 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2103 for (i = 0; i < bnad->
num_rx; i++) {
2105 if (bnad->
rx_info[i].rx_ctrl[j].ccb) {
2107 rx_ctrl[
j].
ccb->rcb[0]->rxq->rx_packets;
2109 rx_ctrl[
j].
ccb->rcb[0]->rxq->rx_bytes;
2110 if (bnad->
rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2111 bnad->
rx_info[i].rx_ctrl[j].ccb->
2115 ccb->
rcb[1]->rxq->rx_packets;
2118 ccb->
rcb[1]->rxq->rx_bytes;
2123 for (i = 0; i < bnad->
num_tx; i++) {
2125 if (bnad->
tx_info[i].tcb[j]) {
2127 bnad->
tx_info[
i].tcb[
j]->txq->tx_packets;
2145 mac_stats = &bnad->
stats.bna_stats->hw_stats.mac_stats;
2165 for (i = 0;
bmap; i++) {
2168 bnad->
stats.bna_stats->
2169 hw_stats.rxf_stats[
i].frame_drops;
2177 bnad_mbox_irq_sync(
struct bnad *bnad)
2180 unsigned long flags;
2187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2194 bnad_tso_prepare(
struct bnad *bnad,
struct sk_buff *skb)
2198 if (skb_header_cloned(skb)) {
2211 struct iphdr *iph = ip_hdr(skb);
2217 tcp_hdr(skb)->check =
2222 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2225 tcp_hdr(skb)->check =
2240 bnad_q_num_init(
struct bnad *bnad)
2263 bnad_q_num_adjust(
struct bnad *bnad,
int msix_vectors,
int temp)
2278 bnad_ioceth_disable(
struct bnad *bnad)
2280 unsigned long flags;
2286 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2296 bnad_ioceth_enable(
struct bnad *bnad)
2299 unsigned long flags;
2305 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2317 bnad_res_free(
struct bnad *bnad,
struct bna_res_info *res_info,
2322 for (i = 0; i < res_val_max; i++)
2323 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2328 bnad_res_alloc(
struct bnad *bnad,
struct bna_res_info *res_info,
2333 for (i = 0; i < res_val_max; i++) {
2334 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2341 bnad_res_free(bnad, res_info, res_val_max);
2347 bnad_enable_msix(
struct bnad *bnad)
2350 unsigned long flags;
2353 if (!(bnad->
cfg_flags & BNAD_CF_MSIX)) {
2354 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2368 for (i = 0; i < bnad->
msix_num; i++)
2374 pr_warn(
"BNA: %d MSI-X vectors allocated < %d requested\n",
2381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2404 pr_warn(
"BNA: MSI-X enable failed - operating in INTx mode\n");
2411 bnad_q_num_init(bnad);
2412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2416 bnad_disable_msix(
struct bnad *bnad)
2419 unsigned long flags;
2425 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2427 if (cfg_flags & BNAD_CF_MSIX) {
2439 struct bnad *bnad = netdev_priv(netdev);
2442 unsigned long flags;
2457 pause_config.tx_pause = 0;
2458 pause_config.rx_pause = 0;
2466 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2477 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2480 bnad_stats_timer_start(bnad);
2497 struct bnad *bnad = netdev_priv(netdev);
2498 unsigned long flags;
2503 bnad_stats_timer_stop(bnad);
2509 bnad_cb_enet_disabled);
2510 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2518 bnad_mbox_irq_sync(bnad);
2533 struct bnad *bnad = netdev_priv(netdev);
2537 u16 txq_prod, vlan_tag = 0;
2538 u32 unmap_prod, wis, wis_used, wi_range;
2559 if (
unlikely(skb_headlen(skb) == 0)) {
2575 vectors = 1 + skb_shinfo(skb)->nr_frags;
2588 acked = bnad_txcmpl_process(bnad, tcb);
2594 netif_stop_queue(netdev);
2610 netif_wake_queue(netdev);
2620 txqent->
hdr.
wi.reserved = 0;
2629 (tcb->
priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2635 if (skb_is_gso(skb)) {
2636 gso_size = skb_shinfo(skb)->gso_size;
2643 if (
unlikely((gso_size + skb_transport_offset(skb) +
2644 tcp_hdrlen(skb)) >= skb->
len)) {
2645 txqent->
hdr.
wi.opcode =
2647 txqent->
hdr.
wi.lso_mss = 0;
2650 txqent->
hdr.
wi.opcode =
2655 err = bnad_tso_prepare(bnad, skb);
2662 txqent->
hdr.
wi.l4_hdr_size_n_offset =
2664 (tcp_hdrlen(skb) >> 2,
2665 skb_transport_offset(skb)));
2668 txqent->
hdr.
wi.lso_mss = 0;
2680 proto = ip_hdr(skb)->protocol;
2684 proto = ipv6_hdr(skb)->nexthdr;
2688 txqent->
hdr.
wi.l4_hdr_size_n_offset =
2690 (0, skb_transport_offset(skb)));
2695 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2703 txqent->
hdr.
wi.l4_hdr_size_n_offset =
2705 (0, skb_transport_offset(skb)));
2709 skb_transport_offset(skb) +
2710 sizeof(
struct udphdr))) {
2721 txqent->
hdr.
wi.l4_hdr_size_n_offset = 0;
2730 len = skb_headlen(skb);
2743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2745 u16 size = skb_frag_size(frag);
2750 unmap_prod = bnad_pci_unmap_skb(&bnad->
pcidev->dev,
2752 unmap_prod, unmap_q->
q_depth, skb,
2779 dma_addr = skb_frag_dma_map(&bnad->
pcidev->dev, frag,
2790 unmap_prod = bnad_pci_unmap_skb(&bnad->
pcidev->dev,
2793 skb_shinfo(skb)->nr_frags);
2821 struct bnad *bnad = netdev_priv(netdev);
2822 unsigned long flags;
2829 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2837 struct bnad *bnad = netdev_priv(netdev);
2838 u32 new_mask, valid_mask;
2839 unsigned long flags;
2843 new_mask = valid_mask = 0;
2892 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2901 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2913 struct bnad *bnad = netdev_priv(netdev);
2915 unsigned long flags;
2924 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2930 bnad_mtu_set(
struct bnad *bnad,
int mtu)
2932 unsigned long flags;
2938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2946 bnad_change_mtu(
struct net_device *netdev,
int new_mtu)
2948 int err, mtu = netdev->
mtu;
2949 struct bnad *bnad = netdev_priv(netdev);
2956 netdev->
mtu = new_mtu;
2959 err = bnad_mtu_set(bnad, mtu);
2968 bnad_vlan_rx_add_vid(
struct net_device *netdev,
2971 struct bnad *bnad = netdev_priv(netdev);
2972 unsigned long flags;
2982 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2990 bnad_vlan_rx_kill_vid(
struct net_device *netdev,
2993 struct bnad *bnad = netdev_priv(netdev);
2994 unsigned long flags;
3004 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3011 #ifdef CONFIG_NET_POLL_CONTROLLER
3015 struct bnad *bnad = netdev_priv(netdev);
3021 if (!(bnad->
cfg_flags & BNAD_CF_MSIX)) {
3023 bnad_isr(bnad->
pcidev->irq, netdev);
3032 for (i = 0; i < bnad->
num_rx; i++) {
3039 bnad_netif_rx_schedule_poll(bnad,
3048 .ndo_open = bnad_open,
3049 .ndo_stop = bnad_stop,
3050 .ndo_start_xmit = bnad_start_xmit,
3051 .ndo_get_stats64 = bnad_get_stats64,
3054 .ndo_set_mac_address = bnad_set_mac_address,
3055 .ndo_change_mtu = bnad_change_mtu,
3056 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3057 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3058 #ifdef CONFIG_NET_POLL_CONTROLLER
3059 .ndo_poll_controller = bnad_netpoll
3064 bnad_netdev_init(
struct bnad *bnad,
bool using_dac)
3096 bnad_init(
struct bnad *bnad,
3099 unsigned long flags;
3102 pci_set_drvdata(pdev, netdev);
3110 dev_err(&pdev->
dev,
"ioremap for bar0 failed\n");
3111 pci_set_drvdata(pdev,
NULL);
3114 pr_info(
"bar0 mapped to %p, len %llu\n", bnad->
bar0,
3115 (
unsigned long long) bnad->
mmio_len);
3118 if (!bnad_msix_disable)
3123 bnad_q_num_init(bnad);
3124 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3151 bnad_uninit(
struct bnad *bnad)
3171 bnad_lock_init(
struct bnad *bnad)
3179 bnad_lock_uninit(
struct bnad *bnad)
3187 bnad_pci_init(
struct bnad *bnad,
3188 struct pci_dev *pdev,
bool *using_dac)
3197 goto disable_device;
3207 goto release_regions;
3223 bnad_pci_uninit(
struct pci_dev *pdev)
3230 bnad_pci_probe(
struct pci_dev *pdev,
3239 unsigned long flags;
3241 pr_info(
"bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3247 pr_warn(
"Failed to load Firmware Image!\n");
3256 netdev = alloc_etherdev(
sizeof(
struct bnad));
3261 bnad = netdev_priv(netdev);
3262 bnad_lock_init(bnad);
3263 bnad_add_to_list(bnad);
3272 err = bnad_pci_init(bnad, pdev, &using_dac);
3280 err = bnad_init(bnad, pdev, netdev);
3285 bnad_netdev_init(bnad, using_dac);
3291 if (bna_debugfs_enable)
3297 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3314 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3318 bnad_enable_msix(bnad);
3319 err = bnad_mbox_irq_alloc(bnad);
3326 ((
unsigned long)bnad));
3328 ((
unsigned long)bnad));
3329 setup_timer(&bnad->
bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3330 ((
unsigned long)bnad));
3331 setup_timer(&bnad->
bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3332 ((
unsigned long)bnad));
3343 err = bnad_ioceth_enable(bnad);
3345 pr_err(
"BNA: Initialization failed err=%d\n",
3353 bnad_q_num_adjust(bnad,
bna_attr(bna)->num_txq - 1,
3359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3361 goto disable_ioceth;
3365 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3370 goto disable_ioceth;
3375 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3380 bnad_set_netdev_perm_addr(bnad);
3381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3388 pr_err(
"BNA : Registering with netdev failed\n");
3403 bnad_ioceth_disable(bnad);
3409 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3410 bnad_mbox_irq_free(bnad);
3411 bnad_disable_msix(bnad);
3420 bnad_pci_uninit(pdev);
3423 bnad_remove_from_list(bnad);
3424 bnad_lock_uninit(bnad);
3430 bnad_pci_remove(
struct pci_dev *pdev)
3432 struct net_device *netdev = pci_get_drvdata(pdev);
3435 unsigned long flags;
3441 bnad = netdev_priv(netdev);
3448 bnad_ioceth_disable(bnad);
3454 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3458 bnad_mbox_irq_free(bnad);
3459 bnad_disable_msix(bnad);
3460 bnad_pci_uninit(pdev);
3462 bnad_remove_from_list(bnad);
3463 bnad_lock_uninit(bnad);
3476 .class_mask = 0xffff00
3482 .class_mask = 0xffff00
3491 .id_table = bnad_pci_id_table,
3492 .probe = bnad_pci_probe,
3497 bnad_module_init(
void)
3501 pr_info(
"Brocade 10G Ethernet driver - version: %s\n",
3506 err = pci_register_driver(&bnad_pci_driver);
3508 pr_err(
"bna : PCI registration failed in module init "
3517 bnad_module_exit(
void)