18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/if_vlan.h>
26 #include <linux/prefetch.h>
46 static inline void bnx2x_move_fp(
struct bnx2x *bp,
int from,
int to)
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
61 memcpy(to_fp, from_fp,
sizeof(*to_fp));
65 memcpy(to_sp_objs, from_sp_objs,
sizeof(*to_sp_objs));
68 memcpy(to_fp_stats, from_fp_stats,
sizeof(*to_fp_stats));
95 u16 idx,
unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
118 #ifdef BNX2X_STOP_ON_ERROR
153 (*bytes_compl) += skb->
len;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
178 while (sw_cons != hw_cons) {
181 pkt_cons =
TX_BD(sw_cons);
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->
txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
209 if (
unlikely(netif_tx_queue_stopped(txq))) {
222 if ((netif_tx_queue_stopped(txq)) &&
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
237 if (
SUB_S16(idx, last_max) > 0)
241 static inline void bnx2x_update_sge_prod(
struct bnx2x_fastpath *fp,
246 u16 last_max, last_elem, first_elem;
263 bnx2x_update_last_max_sge(fp,
271 if (last_elem + 1 != first_elem)
286 bnx2x_clear_sge_mask_next_elems(fp);
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
297 static u32 bnx2x_get_rxhash(
const struct bnx2x *bp,
329 BNX2X_ERR(
"start of bin not in stop [%d]\n", queue);
343 bnx2x_reuse_rx_data(fp, cons, prod);
349 prod_rx_buf->
data = first_buf->
data;
356 *first_buf = *cons_rx_buf;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
388 #define TPA_TSTAMP_OPT_LEN 12
411 hdrs_len +=
sizeof(
struct ipv6hdr);
413 hdrs_len +=
sizeof(
struct iphdr);
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(
struct bnx2x *bp,
465 u32 i, frag_len, frag_size;
466 int err,
j, frag_id = 0;
468 u16 full_page = 0, gro_size = 0;
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
484 skb_shinfo(skb)->gso_type =
492 #ifdef BNX2X_STOP_ON_ERROR
494 BNX2X_ERR(
"SGL length is too long: %d. CQE index is %d\n",
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
531 skb_fill_page_desc(skb, j, old_rx_pg.
page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.
page, offset, len);
540 get_page(old_rx_pg.
page);
547 skb->
len += frag_len;
549 frag_size -= frag_len;
588 #ifdef BNX2X_STOP_ON_ERROR
590 BNX2X_ERR(
"skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
608 __vlan_hwaccel_put_tag(skb, tpa_info->
vlan_tag);
612 "Failed to allocate new pages - dropping packet!\n");
618 rx_buf->
data = new_data;
626 "Failed to allocate or map a new skb - dropping packet!\n");
630 static int bnx2x_alloc_rx_data(
struct bnx2x *bp,
687 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
691 #ifdef BNX2X_STOP_ON_ERROR
704 bd_prod_fw = bd_prod;
714 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
715 fp->
index, hw_comp_cons, sw_comp_cons);
717 while (sw_comp_cons != hw_comp_cons) {
728 #ifdef BNX2X_STOP_ON_ERROR
733 comp_ring_cons =
RCQ_BD(sw_comp_cons);
734 bd_prod =
RX_BD(bd_prod);
735 bd_cons =
RX_BD(bd_cons);
743 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
762 #ifdef BNX2X_STOP_ON_ERROR
767 BNX2X_ERR(
"START/STOP packet while disable_tpa type %x\n",
774 "calling tpa_start on queue %d\n",
777 bnx2x_tpa_start(fp, queue,
787 "calling tpa_stop on queue %d\n",
794 pages = (frag_size + tpa_info->
full_page - 1) /
800 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
802 #ifdef BNX2X_STOP_ON_ERROR
807 bnx2x_update_sge_prod(fp, pages, &cqe->
end_agg_cqe);
822 "ERROR flags %x rx packet %u\n",
823 cqe_fp_flags, sw_comp_cons);
833 skb = netdev_alloc_skb_ip_align(bp->
dev, len);
836 "ERROR packet dropped because of alloc failure\n");
841 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
843 if (
likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
852 rx_skb_alloc_failed++;
855 skb_reserve(skb, pad);
858 "ERROR packet dropped because of alloc failure\n");
861 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
870 skb->
rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
873 skb_checksum_none_assert(skb);
876 bnx2x_csum_validate(skb, cqe, fp,
879 skb_record_rx_queue(skb, fp->
rx_queue);
883 __vlan_hwaccel_put_tag(skb,
899 if (rx_pkt == budget)
918 static irqreturn_t bnx2x_msix_fp_int(
int irq,
void *fp_cookie)
925 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
929 #ifdef BNX2X_STOP_ON_ERROR
938 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
951 if (bp->port.need_hw_lock)
957 if (bp->
port.need_hw_lock)
968 u16 maxCfg = bnx2x_extract_max_cfg(bp,
975 line_speed = (line_speed * maxCfg) / 100;
977 u16 vn_max_rate = maxCfg * 100;
979 if (vn_max_rate < line_speed)
980 line_speed = vn_max_rate;
995 static void bnx2x_fill_report_data(
struct bnx2x *bp,
1000 memset(data, 0,
sizeof(*data));
1057 bnx2x_fill_report_data(bp, &cur_data);
1077 netdev_err(bp->
dev,
"NIC Link is Down\n");
1100 flow =
"ON - receive & transmit";
1102 flow =
"ON - receive";
1104 flow =
"ON - transmit";
1109 netdev_info(bp->
dev,
"NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1132 static void bnx2x_free_tpa_pool(
struct bnx2x *bp,
1137 for (i = 0; i < last; i++) {
1140 u8 *data = first_buf->
data;
1178 if (!first_buf->
data) {
1179 BNX2X_ERR(
"Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1181 bnx2x_free_tpa_pool(bp, fp, i);
1190 bnx2x_set_next_page_sgl(fp);
1193 bnx2x_init_sge_ring_bit_mask(fp);
1196 for (i = 0, ring_prod = 0;
1199 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1200 BNX2X_ERR(
"was only able to allocate %d rx sges\n",
1202 BNX2X_ERR(
"disabling TPA for queue[%d]\n",
1205 bnx2x_free_rx_sge_range(bp, fp,
1207 bnx2x_free_tpa_pool(bp, fp,
1247 static void bnx2x_free_tx_skbs(
struct bnx2x *bp)
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1261 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata,
TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl);
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->
dev,
1297 static void bnx2x_free_rx_skbs(
struct bnx2x *bp)
1304 bnx2x_free_rx_bds(fp);
1313 bnx2x_free_tx_skbs(bp);
1314 bnx2x_free_rx_skbs(bp);
1322 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1340 static void bnx2x_free_msix_irqs(
struct bnx2x *bp,
int nvecs)
1344 if (nvecs == offset)
1351 if (nvecs == offset)
1357 if (nvecs == offset)
1378 int msix_vec = 0,
i,
rc, req_cnt;
1395 msix_vec, msix_vec, i);
1409 int diff = req_cnt -
rc;
1426 }
else if (rc > 0) {
1438 }
else if (rc < 0) {
1455 static int bnx2x_req_msix_irqs(
struct bnx2x *bp)
1457 int i,
rc, offset = 0;
1476 bnx2x_msix_fp_int, 0, fp->
name, fp);
1478 BNX2X_ERR(
"request fp #%d irq (%d) failed rc %d\n", i,
1480 bnx2x_free_msix_irqs(bp, offset);
1489 netdev_info(bp->
dev,
"using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1492 i - 1, bp->
msix_table[offset + i - 1].vector);
1501 rc = pci_enable_msi(bp->
pdev);
1511 static int bnx2x_req_irq(
struct bnx2x *bp)
1513 unsigned long flags;
1524 irq = bp->
pdev->irq;
1529 static int bnx2x_setup_irqs(
struct bnx2x *bp)
1534 rc = bnx2x_req_msix_irqs(bp);
1539 rc = bnx2x_req_irq(bp);
1541 BNX2X_ERR(
"IRQ request failed rc %d, aborting\n", rc);
1546 netdev_info(bp->
dev,
"using MSI IRQ %d\n",
1551 netdev_info(bp->
dev,
"using MSIX IRQ %d\n",
1559 static void bnx2x_napi_enable(
struct bnx2x *bp)
1572 napi_disable(&
bnx2x_fp(bp, i, napi));
1577 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp);
1581 netif_tx_wake_all_queues(bp->dev);
1588 bnx2x_napi_disable(bp);
1593 struct bnx2x *bp = netdev_priv(dev);
1656 static int bnx2x_set_real_num_queues(
struct bnx2x *bp)
1673 BNX2X_ERR(
"Failed to set real number of Tx queues: %d\n", rc);
1676 rc = netif_set_real_num_rx_queues(bp->
dev, rx);
1678 BNX2X_ERR(
"Failed to set real number of Rx queues: %d\n", rc);
1688 static void bnx2x_set_rx_buf_size(
struct bnx2x *bp)
1716 static int bnx2x_init_rss_pf(
struct bnx2x *bp)
1724 for (i = 0; i <
sizeof(bp->
rss_conf_obj.ind_table); i++)
1727 ethtool_rxfh_indir_default(i, num_eth_queues);
1776 for (i = 0; i <
sizeof(params.
rss_key) / 4; i++)
1785 static int bnx2x_init_hw(
struct bnx2x *bp,
u32 load_code)
1804 static void bnx2x_squeeze_objects(
struct bnx2x *bp)
1823 BNX2X_ERR(
"Failed to clean ETH MACs: %d\n", rc);
1828 rc = mac_obj->
delete_all(bp, mac_obj, &vlan_mac_flags,
1831 BNX2X_ERR(
"Failed to clean UC list MACs: %d\n", rc);
1840 BNX2X_ERR(
"Failed to add a new DEL command to a multi-cast object: %d\n",
1847 BNX2X_ERR(
"Failed to clean multi-cast object: %d\n",
1856 #ifndef BNX2X_STOP_ON_ERROR
1857 #define LOAD_ERROR_EXIT(bp, label) \
1859 (bp)->state = BNX2X_STATE_ERROR; \
1863 #define LOAD_ERROR_EXIT(bp, label) \
1865 (bp)->state = BNX2X_STATE_ERROR; \
1884 if (loaded_fw != my_fw) {
1886 BNX2X_ERR(
"bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1903 static void bnx2x_bz_fp(
struct bnx2x *bp,
int index)
1914 memset(fp, 0,
sizeof(*fp));
1922 if (tmp_eth_q_stats)
1926 tmp_eth_q_stats_old =
1929 if (tmp_eth_q_stats_old)
1934 memset(fp, 0,
sizeof(*fp));
1936 if (tmp_eth_q_stats) {
1939 kfree(tmp_eth_q_stats);
1942 if (tmp_eth_q_stats_old) {
1945 kfree(tmp_eth_q_stats_old);
1951 fp->
napi = orig_napi;
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
1977 bnx2x_mtu_allows_gro(bp->
dev->mtu)));
1998 #ifdef BNX2X_STOP_ON_ERROR
2000 BNX2X_ERR(
"Can't load NIC when there is panic\n");
2030 bnx2x_set_rx_buf_size(bp);
2039 rc = bnx2x_set_real_num_queues(bp);
2041 BNX2X_ERR(
"Unable to set real_num_queues\n");
2052 bnx2x_add_all_napi(bp);
2053 bnx2x_napi_enable(bp);
2078 BNX2X_ERR(
"MCP response failure, aborting\n");
2134 rc = bnx2x_init_hw(bp, load_code);
2136 BNX2X_ERR(
"HW init failed, aborting\n");
2142 rc = bnx2x_setup_irqs(bp);
2153 bnx2x_init_bp_objs(bp);
2157 (bp->
common.shmem2_base)) {
2171 rc = bnx2x_func_start(bp);
2182 BNX2X_ERR(
"MCP response failure, aborting\n");
2207 rc = bnx2x_init_rss_pf(bp);
2219 BNX2X_ERR(
"Setting Ethernet MAC failed\n");
2234 netif_addr_lock_bh(bp->
dev);
2236 netif_addr_unlock_bh(bp->
dev);
2239 switch (load_mode) {
2242 netif_tx_wake_all_queues(bp->
dev);
2246 netif_tx_start_all_queues(bp->
dev);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2286 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2287 BNX2X_ERR(
"Timeout waiting for SP elements to complete\n");
2298 #ifndef BNX2X_STOP_ON_ERROR
2308 bnx2x_squeeze_objects(bp);
2325 bnx2x_napi_disable(bp);
2339 bool global =
false;
2364 BNX2X_ERR(
"Can't unload in closed or error state\n");
2377 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->
dev);
2416 bnx2x_del_all_napi(bp);
2429 bnx2x_squeeze_objects(bp);
2498 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2513 dev_err(&bp->
pdev->dev,
"Can't support state = %d\n", state);
2531 #ifdef BNX2X_STOP_ON_ERROR
2539 if (bnx2x_tx_queue_has_work(fp->
txdata_ptr[cos]))
2543 if (bnx2x_has_rx_work(fp)) {
2547 if (work_done >= budget)
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2564 bnx2x_update_fpsb_idx(fp);
2580 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2604 u16 bd_prod,
int nbd)
2634 "TSO split data size is %d (%x:%x)\n",
2643 static inline u16 bnx2x_csum_fix(
unsigned char *t_header,
u16 csum,
s8 fix)
2646 csum = (
u16) ~csum_fold(csum_sub(csum,
2650 csum = (
u16) ~csum_fold(csum_add(csum,
2656 static inline u32 bnx2x_xmit_type(
struct bnx2x *bp,
struct sk_buff *skb)
2676 if (skb_is_gso_v6(skb))
2678 else if (skb_is_gso(skb))
2684 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2688 static int bnx2x_pkt_req_lin(
struct bnx2x *bp,
struct sk_buff *skb,
2693 int first_bd_sz = 0;
2699 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2704 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2710 hlen = (
int)(skb_transport_header(skb) - skb->
data) +
2714 first_bd_sz = skb_headlen(skb) - hlen;
2716 wnd_sum = first_bd_sz;
2719 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2721 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2724 if (first_bd_sz > 0) {
2730 wnd_sum -= first_bd_sz;
2735 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2737 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2744 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2756 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2757 (xmit_type & XMIT_GSO) ?
"LSO" :
"non-LSO",
2758 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2764 static inline void bnx2x_set_pbd_gso_e2(
struct sk_buff *skb,
u32 *parsing_data,
2767 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2782 static inline void bnx2x_set_pbd_gso(
struct sk_buff *skb,
2800 &ipv6_hdr(skb)->
daddr,
2816 static inline u8 bnx2x_set_pbd_csum_e2(
struct bnx2x *bp,
struct sk_buff *skb,
2817 u32 *parsing_data,
u32 xmit_type)
2820 ((((
u8 *)skb_transport_header(skb) - skb->
data) >> 1) <<
2825 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2829 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->
data;
2834 return skb_transport_header(skb) +
2835 sizeof(
struct udphdr) - skb->data;
2838 static inline void bnx2x_set_sbd_csum(
struct bnx2x *bp,
struct sk_buff *skb,
2844 tx_start_bd->
bd_flags.as_bitfield |=
2847 tx_start_bd->
bd_flags.as_bitfield |=
2862 static inline u8 bnx2x_set_pbd_csum(
struct bnx2x *bp,
struct sk_buff *skb,
2866 u8 hlen = (skb_network_header(skb) - skb->
data) >> 1;
2873 pbd->
ip_hlen_w = (skb_transport_header(skb) -
2874 skb_network_header(skb)) >> 1;
2880 hlen += tcp_hdrlen(skb) / 2;
2882 hlen +=
sizeof(
struct udphdr) / 2;
2887 if (xmit_type & XMIT_CSUM_TCP) {
2894 "hlen %d fix %d csum before fix %x\n",
2899 bnx2x_csum_fix(skb_transport_header(skb),
2915 struct bnx2x *bp = netdev_priv(dev);
2924 u32 pbd_e2_parsing_data = 0;
2925 u16 pkt_prod, bd_prod;
2928 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2935 #ifdef BNX2X_STOP_ON_ERROR
2940 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index);
2956 if (
unlikely(bnx2x_tx_avail(bp, txdata) <
2957 skb_shinfo(skb)->nr_frags +
2962 BNX2X_ERR(
"BUG! Tx ring full when queue awake!\n");
2964 netif_tx_stop_queue(txq);
2971 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2973 ip_hdr(skb)->
protocol, skb_shinfo(skb)->gso_type, xmit_type);
2979 if (is_broadcast_ether_addr(eth->
h_dest))
2985 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2989 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2992 if (skb_linearize(skb) != 0) {
2994 "SKB linearization failed - silently dropping this SKB\n");
3005 "SKB mapping failed - silently dropping this SKB\n");
3030 first_bd = tx_start_bd;
3046 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3047 pkt_prod, tx_buf, txdata->
tx_pkt_prod, bd_prod, tx_start_bd);
3052 tx_start_bd->
bd_flags.as_bitfield |=
3061 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3067 if (xmit_type & XMIT_CSUM)
3068 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3069 &pbd_e2_parsing_data,
3089 u16 global_data = 0;
3093 if (xmit_type & XMIT_CSUM)
3094 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3106 pkt_size = tx_start_bd->
nbytes;
3109 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3115 if (xmit_type & XMIT_GSO) {
3118 "TSO packet len %d hlen %d total len %d tso size %d\n",
3119 skb->
len, hlen, skb_headlen(skb),
3120 skb_shinfo(skb)->gso_size);
3124 if (
unlikely(skb_headlen(skb) > hlen))
3125 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3129 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3132 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3138 if (pbd_e2_parsing_data)
3141 tx_data_bd = (
struct eth_tx_bd *)tx_start_bd;
3144 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3147 mapping = skb_frag_dma_map(&bp->
pdev->dev, frag, 0,
3150 unsigned int pkts_compl = 0, bytes_compl = 0;
3153 "Unable to map page - dropping packet...\n");
3161 bnx2x_free_tx_pkt(bp, txdata,
3163 &pkts_compl, &bytes_compl);
3169 if (total_pkt_bd ==
NULL)
3175 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3179 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3204 if (total_pkt_bd !=
NULL)
3209 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3216 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3223 netdev_tx_sent_queue(txq, skb->
len);
3225 skb_tx_timestamp(skb);
3247 netif_tx_stop_queue(txq);
3256 netif_tx_wake_queue(txq);
3274 struct bnx2x *bp = netdev_priv(dev);
3281 netdev_reset_tc(dev);
3287 BNX2X_ERR(
"support for too many traffic classes requested: %d. max supported is %d\n",
3293 if (netdev_set_num_tc(dev, num_tc)) {
3294 BNX2X_ERR(
"failed to declare %d traffic classes\n", num_tc);
3300 netdev_set_prio_tc_map(dev, prio, bp->
prio_to_cos[prio]);
3302 "mapping priority %d to tc %d\n",
3318 for (cos = 0; cos < bp->
max_cos; cos++) {
3321 netdev_set_tc_queue(dev, cos, count, offset);
3323 "mapping tc %d to offset %d count %d\n",
3324 cos, offset, count);
3334 struct bnx2x *bp = netdev_priv(dev);
3337 if (!bnx2x_is_valid_ether_addr(bp, addr->
sa_data)) {
3338 BNX2X_ERR(
"Requested MAC address is not valid\n");
3344 !is_zero_ether_addr(addr->
sa_data)) {
3345 BNX2X_ERR(
"Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3350 if (netif_running(dev)) {
3359 if (netif_running(dev))
3365 static void bnx2x_free_fp_mem_at(
struct bnx2x *bp,
int fp_index)
3395 bnx2x_free_rx_bds(fp);
3422 "freeing tx memory of fp %d cos %d cid %d\n",
3423 fp_index, cos, txdata->
cid);
3438 bnx2x_free_fp_mem_at(bp, i);
3441 static void set_sb_shortcuts(
struct bnx2x *bp,
int index)
3445 bnx2x_fp(bp, index, sb_index_values) =
3447 bnx2x_fp(bp, index, sb_running_index) =
3450 bnx2x_fp(bp, index, sb_index_values) =
3452 bnx2x_fp(bp, index, sb_running_index) =
3462 u16 ring_prod, cqe_ring_prod;
3463 int i, failure_cnt = 0;
3466 cqe_ring_prod = ring_prod = 0;
3472 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3478 WARN_ON(ring_prod <= (i - failure_cnt));
3482 BNX2X_ERR(
"was only able to allocate %d rx skbs on queue[%d]\n",
3483 i - failure_cnt, fp->
index);
3491 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3493 return i - failure_cnt;
3514 static int bnx2x_alloc_fp_mem_at(
struct bnx2x *bp,
int index)
3520 int rx_ring_size = 0;
3552 sb = &
bnx2x_fp(bp, index, status_blk);
3573 set_sb_shortcuts(bp, index);
3582 "allocating tx memory of fp %d cos %d\n",
3597 sizeof(
struct sw_rx_bd) * NUM_RX_BD);
3599 &
bnx2x_fp(bp, index, rx_desc_mapping),
3603 &
bnx2x_fp(bp, index, rx_comp_mapping),
3611 &
bnx2x_fp(bp, index, rx_sge_mapping),
3614 bnx2x_set_next_page_rx_bd(fp);
3617 bnx2x_set_next_page_rx_cq(fp);
3620 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3621 if (ring_size < rx_ring_size)
3629 BNX2X_ERR(
"Unable to allocate full memory for queue %d (size %d)\n",
3638 bnx2x_free_fp_mem_at(bp, index);
3656 if (bnx2x_alloc_fp_mem_at(bp, 0))
3662 if (bnx2x_alloc_fp_mem_at(bp,
FCOE_IDX(bp)))
3671 if (bnx2x_alloc_fp_mem_at(bp, i))
3690 BNX2X_ERR(
"Adjusted num of queues from %d to %d\n",
3711 struct msix_entry *tbl;
3727 fp = kcalloc(fp_array_size,
sizeof(*fp),
GFP_KERNEL);
3730 for (i = 0; i < fp_array_size; i++) {
3734 if (!(fp[i].tpa_info))
3763 tbl = kcalloc(msix_table_size,
sizeof(*tbl),
GFP_KERNEL);
3783 struct bnx2x *bp = netdev_priv(dev);
3794 u32 sel_phy_idx = 0;
3841 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3844 struct bnx2x *bp = netdev_priv(dev);
3848 case NETDEV_FCOE_WWNN:
3852 case NETDEV_FCOE_WWPN:
3857 BNX2X_ERR(
"Wrong WWN type requested - %d\n", type);
3868 struct bnx2x *bp = netdev_priv(dev);
3871 BNX2X_ERR(
"Can't perform change MTU during parity recovery\n");
3877 BNX2X_ERR(
"Can't support requested MTU size\n");
3893 struct bnx2x *bp = netdev_priv(dev);
3906 struct bnx2x *bp = netdev_priv(dev);
3908 bool bnx2x_reload =
false;
3923 bnx2x_reload =
true;
3928 bnx2x_reload =
true;
3932 if (flags ^ bp->
flags) {
3934 bnx2x_reload =
true;
3948 struct bnx2x *bp = netdev_priv(dev);
3950 #ifdef BNX2X_STOP_ON_ERROR
3965 struct net_device *dev = pci_get_drvdata(pdev);
3969 dev_err(&pdev->
dev,
"BAD net device from bnx2x_init_one\n");
3972 bp = netdev_priv(dev);
3978 if (!netif_running(dev)) {
3996 struct net_device *dev = pci_get_drvdata(pdev);
4001 dev_err(&pdev->
dev,
"BAD net device from bnx2x_init_one\n");
4004 bp = netdev_priv(dev);
4007 BNX2X_ERR(
"Handling parity error recovery. Try again later\n");
4015 if (!netif_running(dev)) {
4044 static void storm_memset_hc_timeout(
struct bnx2x *bp,
u8 port,
4045 u8 fw_sb_id,
u8 sb_index,
4053 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4054 port, fw_sb_id, sb_index, ticks);
4057 static void storm_memset_hc_disable(
struct bnx2x *bp,
u8 port,
4058 u16 fw_sb_id,
u8 sb_index,
4061 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4066 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4067 flags |= enable_flag;
4070 "port %x fw_sb_id %d sb_index %d disable %d\n",
4071 port, fw_sb_id, sb_index, disable);
4080 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4082 disable = disable ? 1 : (usec ? 0 : 1);
4083 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);