18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/device.h>
25 #include <linux/errno.h>
27 #include <linux/slab.h>
29 #include <linux/pci.h>
31 #include <linux/netdevice.h>
35 #include <linux/bitops.h>
38 #include <asm/byteorder.h>
39 #include <linux/time.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
68 #define FW_FILE_VERSION \
69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
72 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
73 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
74 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
80 #define TX_TIMEOUT (5*HZ)
83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
88 "BCM57710/57711/57711E/"
89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
90 "57840/57840_MF Driver");
101 " Set number of queues (default is as a number of CPUs)");
103 static int disable_tpa;
107 #define INT_MODE_INTx 1
108 #define INT_MODE_MSI 2
114 static int dropless_fc;
118 static int mrrs = -1;
153 {
"Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
154 {
"Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
155 {
"Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
156 {
"Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
157 {
"Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
158 {
"Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
159 {
"Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
160 {
"Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
161 {
"Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
162 {
"Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
163 {
"Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
164 {
"Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
165 {
"Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
166 {
"Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
167 {
"Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
168 {
"Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
171 #ifndef PCI_DEVICE_ID_NX2_57710
172 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
174 #ifndef PCI_DEVICE_ID_NX2_57711
175 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
177 #ifndef PCI_DEVICE_ID_NX2_57711E
178 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
180 #ifndef PCI_DEVICE_ID_NX2_57712
181 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
183 #ifndef PCI_DEVICE_ID_NX2_57712_MF
184 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
186 #ifndef PCI_DEVICE_ID_NX2_57800
187 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
189 #ifndef PCI_DEVICE_ID_NX2_57800_MF
190 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
192 #ifndef PCI_DEVICE_ID_NX2_57810
193 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
195 #ifndef PCI_DEVICE_ID_NX2_57810_MF
196 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
198 #ifndef PCI_DEVICE_ID_NX2_57840_O
199 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
201 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
202 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
204 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
205 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
207 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
208 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
210 #ifndef PCI_DEVICE_ID_NX2_57840_MF
211 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
213 #ifndef PCI_DEVICE_ID_NX2_57811
214 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
216 #ifndef PCI_DEVICE_ID_NX2_57811_MF
217 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
242 #define BNX2X_PREV_WAIT_NEEDED 1
249 static void __storm_memset_dma_mapping(
struct bnx2x *bp,
256 static void storm_memset_spq_addr(
struct bnx2x *bp,
262 __storm_memset_dma_mapping(bp, addr, mapping);
265 static void storm_memset_vf_to_pf(
struct bnx2x *bp,
u16 abs_fid,
278 static void storm_memset_func_en(
struct bnx2x *bp,
u16 abs_fid,
291 static void storm_memset_eq_data(
struct bnx2x *bp,
299 __storm_memset_struct(bp, addr, size, (
u32 *)eq_data);
302 static void storm_memset_eq_prod(
struct bnx2x *bp,
u16 eq_prod,
320 static u32 bnx2x_reg_rd_ind(
struct bnx2x *bp,
u32 addr)
332 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
333 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
334 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
335 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
336 #define DMAE_DP_DST_NONE "dst_addr [none]"
347 REG_WR(bp, cmd_offset + i*4, *(((
u32 *)dmae) + i));
349 REG_WR(bp, dmae_reg_go_c[idx], 1);
388 static void bnx2x_prep_dmae_with_comp(
struct bnx2x *bp,
390 u8 src_type,
u8 dst_type)
405 static int bnx2x_issue_dmae_with_comp(
struct bnx2x *bp,
458 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
460 bnx2x_init_str_wr(bp, dst_addr, data, len32);
475 bnx2x_issue_dmae_with_comp(bp, &dmae);
487 for (i = 0; i < len32; i++)
488 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
490 for (i = 0; i < len32; i++)
491 data[i] =
REG_RD(bp, src_addr + i*4);
507 bnx2x_issue_dmae_with_comp(bp, &dmae);
516 while (len > dmae_wr_max) {
518 addr + offset, dmae_wr_max);
519 offset += dmae_wr_max * 4;
526 static int bnx2x_mc_assert(
struct bnx2x *bp)
530 u32 row0, row1, row2, row3;
536 BNX2X_ERR(
"XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
551 BNX2X_ERR(
"XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
552 i, row3, row2, row1, row0);
563 BNX2X_ERR(
"TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
578 BNX2X_ERR(
"TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
579 i, row3, row2, row1, row0);
590 BNX2X_ERR(
"CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
605 BNX2X_ERR(
"CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
606 i, row3, row2, row1, row0);
617 BNX2X_ERR(
"USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
632 BNX2X_ERR(
"USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
633 i, row3, row2, row1, row0);
649 u32 trace_shmem_base;
655 (bp->
common.bc_ver & 0xff0000) >> 16,
656 (bp->
common.bc_ver & 0xff00) >> 8,
657 (bp->
common.bc_ver & 0xff));
661 BNX2X_ERR(
"%s" "MCP PC at 0x%x\n", lvl, val);
664 trace_shmem_base = bp->
common.shmem_base;
666 trace_shmem_base =
SHMEM2_RD(bp, other_shmem_base_addr);
667 addr = trace_shmem_base - 0x800;
672 BNX2X_ERR(
"Trace buffer signature is missing.");
680 + ((mark + 0x3) & ~0x3) - 0x08000000;
681 printk(
"%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
684 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
685 for (word = 0; word < 8; word++)
690 for (offset = addr + 4; offset <=
mark; offset += 0x8*4) {
691 for (word = 0; word < 8; word++)
696 printk(
"%s" "end of fw dump\n", lvl);
699 static void bnx2x_fw_dump(
struct bnx2x *bp)
710 #ifdef BNX2X_STOP_ON_ERROR
719 BNX2X_ERR(
"begin crash dump -----------------\n");
723 BNX2X_ERR(
"def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
726 BNX2X_ERR(
"DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
735 (i == HC_SP_SB_MAX_INDICES - 1) ?
") " :
" ");
742 pr_cont(
"igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
743 sp_sb_data.igu_sb_id,
744 sp_sb_data.igu_seg_id,
746 sp_sb_data.
p_func.vnic_id,
748 sp_sb_data.
p_func.vf_valid,
759 sb_data_e1x.
common.state_machine :
760 sb_data_e2.
common.state_machine;
770 BNX2X_ERR(
"fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
774 BNX2X_ERR(
" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
782 BNX2X_ERR(
"fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
802 (j == HC_SB_MAX_SM - 1) ?
")" :
" ");
805 for (j = 0; j < loop; j++)
808 (j == loop - 1) ?
")" :
" ");
812 sizeof(struct hc_status_block_data_e2);
813 data_size /= sizeof(u32);
815 (u32 *)&sb_data_e1x :
818 for (j = 0; j < data_size; j++)
819 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
820 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
823 if (!CHIP_IS_E1x(bp)) {
824 pr_cont(
"pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
825 sb_data_e2.
common.p_func.pf_id,
826 sb_data_e2.
common.p_func.vf_id,
827 sb_data_e2.
common.p_func.vf_valid,
828 sb_data_e2.
common.p_func.vnic_id,
829 sb_data_e2.
common.same_igu_sb_1b,
832 pr_cont(
"pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
833 sb_data_e1x.
common.p_func.pf_id,
834 sb_data_e1x.
common.p_func.vf_id,
835 sb_data_e1x.
common.p_func.vf_valid,
836 sb_data_e1x.
common.p_func.vnic_id,
837 sb_data_e1x.
common.same_igu_sb_1b,
838 sb_data_e1x.
common.state);
843 pr_cont(
"SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
844 j, hc_sm_p[j].__flags,
845 hc_sm_p[j].igu_sb_id,
846 hc_sm_p[j].igu_seg_id,
847 hc_sm_p[j].time_to_expire,
848 hc_sm_p[j].timer_value);
852 for (j = 0; j < loop; j++) {
853 pr_cont(
"INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
859 #ifdef BNX2X_STOP_ON_ERROR
867 for (j = start; j !=
end; j =
RX_BD(j + 1)) {
871 BNX2X_ERR(
"fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
877 for (j = start; j !=
end; j =
RX_SGE(j + 1)) {
881 BNX2X_ERR(
"fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
882 i, j, rx_sge[1], rx_sge[0], sw_page->page);
887 for (j = start; j !=
end; j =
RCQ_BD(j + 1)) {
890 BNX2X_ERR(
"fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
891 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
903 for (j = start; j !=
end; j =
TX_BD(j + 1)) {
907 BNX2X_ERR(
"fp%d: txdata %d, packet[%x]=[%p,%x]\n",
908 i, cos, j, sw_bd->
skb,
914 for (j = start; j !=
end; j =
TX_BD(j + 1)) {
917 BNX2X_ERR(
"fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
918 i, cos, j, tx_bd[0], tx_bd[1],
926 BNX2X_ERR(
"end crash dump -----------------\n");
935 #define FLR_WAIT_USEC 10000
936 #define FLR_WAIT_INTERVAL 50
937 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
952 static void bnx2x_pbf_pN_buf_flushed(
struct bnx2x *bp,
956 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
957 u32 cur_cnt = poll_count;
967 while ((crd != init_crd) && ((
u32)
SUB_S32(crd_freed, crd_freed_start) <
968 (init_crd - crd_start))) {
979 regs->
pN, crd_freed);
987 static void bnx2x_pbf_pN_cmd_flushed(
struct bnx2x *bp,
991 u32 occup, to_free, freed, freed_start;
992 u32 cur_cnt = poll_count;
1000 while (occup && ((
u32)
SUB_S32(freed, freed_start) < to_free)) {
1020 u32 expected,
u32 poll_count)
1022 u32 cur_cnt = poll_count;
1025 while ((val =
REG_RD(bp, reg)) != expected && cur_cnt--)
1031 static int bnx2x_flr_clnup_poll_hw_counter(
struct bnx2x *bp,
u32 reg,
1034 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1036 BNX2X_ERR(
"%s usage count=%d\n", msg, val);
1042 static u32 bnx2x_flr_clnup_poll_count(
struct bnx2x *bp)
1054 static void bnx2x_tx_hw_flushed(
struct bnx2x *bp,
u32 poll_count)
1111 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1116 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1119 #define OP_GEN_PARAM(param) \
1120 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1122 #define OP_GEN_TYPE(type) \
1123 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1125 #define OP_GEN_AGG_VECT(index) \
1126 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1129 static int bnx2x_send_final_clnup(
struct bnx2x *bp,
u8 clnup_func,
1138 if (
REG_RD(bp, comp_addr)) {
1139 BNX2X_ERR(
"Cleanup complete was not 0 before sending\n");
1151 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1152 BNX2X_ERR(
"FW final cleanup did not succeed\n");
1154 (
REG_RD(bp, comp_addr)));
1158 REG_WR(bp, comp_addr, 0);
1173 static int bnx2x_poll_hw_usage_counters(
struct bnx2x *bp,
u32 poll_cnt)
1177 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1179 "CFC PF usage counter timed out",
1185 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1187 "DQ PF usage counter timed out",
1192 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1194 "QM PF usage counter timed out",
1199 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1201 "Timers VNIC usage counter timed out",
1204 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1206 "Timers NUM_SCANS usage counter timed out",
1211 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1213 "DMAE dommand register timed out",
1220 static void bnx2x_hw_enable_status(
struct bnx2x *bp)
1237 DP(
BNX2X_MSG_SP,
"IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1240 DP(
BNX2X_MSG_SP,
"PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1243 DP(
BNX2X_MSG_SP,
"PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1246 DP(
BNX2X_MSG_SP,
"PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1250 static int bnx2x_pf_flr_clnup(
struct bnx2x *bp)
1252 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1261 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1267 if (bnx2x_send_final_clnup(bp, (
u8)
BP_FUNC(bp), poll_cnt))
1273 bnx2x_tx_hw_flushed(bp, poll_cnt);
1279 if (bnx2x_is_pcie_pending(bp->
pdev))
1280 BNX2X_ERR(
"PCIE Transactions still pending\n");
1283 bnx2x_hw_enable_status(bp);
1294 static void bnx2x_hc_int_enable(
struct bnx2x *bp)
1323 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1335 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1336 (msix ?
"MSI-X" : (msi ?
"MSI" :
"INTx")));
1348 val = (0xee0f | (1 << (
BP_VN(bp) + 4)));
1363 static void bnx2x_igu_int_enable(
struct bnx2x *bp)
1396 val, (msix ?
"MSI-X" : (msi ?
"MSI" :
"INTx")));
1407 val = (0xee0f | (1 << (
BP_VN(bp) + 4)));
1424 bnx2x_hc_int_enable(bp);
1426 bnx2x_igu_int_enable(bp);
1429 static void bnx2x_hc_int_disable(
struct bnx2x *bp)
1457 "write %x to HC %d (addr 0x%x)\n",
1464 if (
REG_RD(bp, addr) != val)
1465 BNX2X_ERR(
"BUG! proper val not read from IGU!\n");
1468 static void bnx2x_igu_int_disable(
struct bnx2x *bp)
1473 IGU_PF_CONF_INT_LINE_EN |
1483 BNX2X_ERR(
"BUG! proper val not read from IGU!\n");
1489 bnx2x_hc_int_disable(bp);
1491 bnx2x_igu_int_disable(bp);
1533 u32 hw_lock_control_reg;
1536 "Trying to take a lock on resource %d\n", resource);
1541 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1549 hw_lock_control_reg =
1553 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1554 lock_status =
REG_RD(bp, hw_lock_control_reg);
1555 if (lock_status & resource_bit)
1559 "Failed to get a lock on resource %d\n", resource);
1571 static int bnx2x_get_leader_lock_resource(
struct bnx2x *bp)
1586 static bool bnx2x_trylock_leader_lock(
struct bnx2x *bp)
1588 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1592 static void bnx2x_cnic_cfc_comp(
struct bnx2x *bp,
int cid,
u8 err);
1604 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1640 BNX2X_ERR(
"unexpected MC reply (%d) on fp[%d]\n",
1641 command, fp->
index);
1654 #ifdef BNX2X_STOP_ON_ERROR
1692 u16 bd_prod,
u16 rx_comp_prod,
u16 rx_sge_prod)
1696 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
1702 struct bnx2x *bp = netdev_priv(dev_instance);
1715 #ifdef BNX2X_STOP_ON_ERROR
1724 if (status & mask) {
1737 if (status & (mask | 0x1)) {
1778 u32 hw_lock_control_reg;
1783 BNX2X_ERR(
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1791 hw_lock_control_reg =
1796 lock_status =
REG_RD(bp, hw_lock_control_reg);
1797 if (lock_status & resource_bit) {
1798 BNX2X_ERR(
"lock_status 0x%x resource_bit 0x%x\n",
1799 lock_status, resource_bit);
1804 for (cnt = 0; cnt < 1000; cnt++) {
1806 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1807 lock_status =
REG_RD(bp, hw_lock_control_reg);
1808 if (lock_status & resource_bit)
1827 u32 hw_lock_control_reg;
1831 BNX2X_ERR(
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1839 hw_lock_control_reg =
1844 lock_status =
REG_RD(bp, hw_lock_control_reg);
1845 if (!(lock_status & resource_bit)) {
1846 BNX2X_ERR(
"lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
1847 lock_status, resource_bit);
1851 REG_WR(bp, hw_lock_control_reg, resource_bit);
1861 int gpio_shift = gpio_num +
1863 u32 gpio_mask = (1 << gpio_shift);
1868 BNX2X_ERR(
"Invalid GPIO %d\n", gpio_num);
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1891 int gpio_shift = gpio_num +
1893 u32 gpio_mask = (1 << gpio_shift);
1897 BNX2X_ERR(
"Invalid GPIO %d\n", gpio_num);
1908 "Set GPIO %d (shift %d) -> output low\n",
1909 gpio_num, gpio_shift);
1917 "Set GPIO %d (shift %d) -> output high\n",
1918 gpio_num, gpio_shift);
1926 "Set GPIO %d (shift %d) -> input\n",
1927 gpio_num, gpio_shift);
1976 BNX2X_ERR(
"Invalid GPIO mode assignment %d\n", mode);
1994 int gpio_shift = gpio_num +
1996 u32 gpio_mask = (1 << gpio_shift);
2000 BNX2X_ERR(
"Invalid GPIO %d\n", gpio_num);
2011 "Clear GPIO INT %d (shift %d) -> output low\n",
2012 gpio_num, gpio_shift);
2020 "Set GPIO INT %d (shift %d) -> output high\n",
2021 gpio_num, gpio_shift);
2037 static int bnx2x_set_spio(
struct bnx2x *bp,
int spio_num,
u32 mode)
2039 u32 spio_mask = (1 << spio_num);
2044 BNX2X_ERR(
"Invalid SPIO %d\n", spio_num);
2161 BNX2X_ERR(
"Bootcode is missing - can not initialize link\n");
2174 BNX2X_ERR(
"Bootcode is missing - can not set link\n");
2177 static void bnx2x__link_reset(
struct bnx2x *bp)
2184 BNX2X_ERR(
"Bootcode is missing - can not reset link\n");
2204 BNX2X_ERR(
"Bootcode is missing - can not test link\n");
2219 static void bnx2x_calc_vn_min(
struct bnx2x *bp,
2234 else if (!vn_min_rate)
2244 input->
flags.cmng_enables &=
2247 }
else if (all_zero) {
2248 input->
flags.cmng_enables &=
2251 "All MIN values are zeroes fairness will be disabled\n");
2253 input->
flags.cmng_enables |=
2257 static void bnx2x_calc_vn_max(
struct bnx2x *bp,
int vn,
2263 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2266 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2270 vn_max_rate = (bp->
link_vars.line_speed * maxCfg) / 100;
2273 vn_max_rate = maxCfg * 100;
2282 static int bnx2x_get_cmng_fns_mode(
struct bnx2x *bp)
2328 static void bnx2x_cmng_fns_init(
struct bnx2x *bp,
u8 read_cfg,
u8 cmng_type)
2343 bnx2x_calc_vn_min(bp, &input);
2348 bnx2x_calc_vn_max(bp, vn, &input);
2351 input.
flags.cmng_enables |=
2354 bnx2x_init_cmng(&input, &bp->
cmng);
2360 "rate shaping and fairness are disabled\n");
2363 static void storm_memset_cmng(
struct bnx2x *bp,
2373 __storm_memset_struct(bp, addr, size, (
u32 *)&cmng->
port);
2376 int func = func_by_vn(bp, vn);
2381 __storm_memset_struct(bp, addr, size,
2382 (
u32 *)&cmng->
vnic.vnic_max_rate[vn]);
2387 __storm_memset_struct(bp, addr, size,
2388 (
u32 *)&cmng->
vnic.vnic_min_rate[vn]);
2393 static void bnx2x_link_attn(
struct bnx2x *bp)
2405 u32 pause_enabled = 0;
2428 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2431 bnx2x_cmng_fns_init(bp,
false, cmng_fns);
2436 "single function mode without fairness\n");
2442 bnx2x_link_sync_notify(bp);
2464 static int bnx2x_afex_func_update(
struct bnx2x *bp,
u16 vifid,
2465 u16 vlan_val,
u8 allowed_prio)
2478 f_update_params->
vif_id = vifid;
2489 static int bnx2x_afex_handle_vif_list_cmd(
struct bnx2x *bp,
u8 cmd_type,
2490 u16 vif_index,
u8 func_bit_map)
2500 BNX2X_ERR(
"BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2527 static void bnx2x_handle_afex_cmd(
struct bnx2x *bp,
u32 cmd)
2537 u32 addr_to_write, vifid, addrs, stats_type,
i;
2542 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2550 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2563 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2570 REG_WR(bp, addr_to_write + i*
sizeof(
u32),
2581 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2595 bnx2x_calc_vn_max(bp,
BP_VN(bp), &cmng_input);
2596 m_rs_vn.vn_counter.rate =
2597 cmng_input.vnic_max_rate[
BP_VN(bp)];
2598 m_rs_vn.vn_counter.quota =
2599 (m_rs_vn.vn_counter.rate *
2602 __storm_memset_struct(bp, addr, size, (
u32 *)&m_rs_vn);
2606 (
MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2610 (
MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2613 vlan_prio = (mf_config &
2619 func_mf_config[func].afex_config) &
2624 func_mf_config[func].afex_config) &
2629 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2640 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2648 static void bnx2x_pmf_update(
struct bnx2x *bp)
2668 val = (0xff0f | (1 << (
BP_VN(bp) + 4)));
2699 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2700 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2703 (command | seq), param);
2709 rc =
SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2715 cnt*delay, rc, seq);
2718 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2732 static void storm_memset_func_cfg(
struct bnx2x *bp,
2741 __storm_memset_struct(bp, addr, size, (
u32 *)tcfg);
2749 storm_memset_func_cfg(bp, &tcfg, p->
func_id);
2754 storm_memset_func_en(bp, p->
func_id, 1);
2773 static unsigned long bnx2x_get_common_flags(
struct bnx2x *bp,
2777 unsigned long flags = 0;
2795 static unsigned long bnx2x_get_q_flags(
struct bnx2x *bp,
2799 unsigned long flags = 0;
2831 return flags | bnx2x_get_common_flags(bp, fp,
true);
2834 static void bnx2x_pf_q_prep_general(
struct bnx2x *bp,
2838 gen_init->
stat_id = bnx2x_stats_id(fp);
2845 gen_init->
mtu = bp->
dev->mtu;
2847 gen_init->
cos = cos;
2850 static void bnx2x_pf_rx_q_prep(
struct bnx2x *bp,
2856 u16 tpa_agg_size = 0;
2941 static void bnx2x_pf_tx_q_prep(
struct bnx2x *bp,
2962 static void bnx2x_pf_init(
struct bnx2x *bp)
3008 bnx2x_cmng_fns_init(bp,
true, bnx2x_get_cmng_fns_mode(bp));
3017 eq_data.producer = bp->
eq_prod;
3020 storm_memset_eq_data(bp, &eq_data,
BP_FUNC(bp));
3024 static void bnx2x_e1h_disable(
struct bnx2x *bp)
3028 bnx2x_tx_disable(bp);
3033 static void bnx2x_e1h_enable(
struct bnx2x *bp)
3040 netif_tx_wake_all_queues(bp->
dev);
3048 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3050 static void bnx2x_drv_info_ether_stat(
struct bnx2x *bp)
3053 &bp->
slowpath->drv_info_to_mcp.ether_stat;
3058 bp->
sp_objs[0].mac_obj.get_n_elements(bp, &bp->
sp_objs[0].mac_obj,
3076 static void bnx2x_drv_info_fcoe_stat(
struct bnx2x *bp)
3081 &bp->
slowpath->drv_info_to_mcp.fcoe_stat;
3093 tstorm_queue_statistics;
3097 xstorm_queue_statistics;
3103 fw_fcoe_stat->
rx_stat0.fcoe_rx_byte_cnt);
3121 fw_fcoe_stat->
rx_stat0.fcoe_rx_pkt_cnt);
3133 fw_fcoe_stat->
tx_stat.fcoe_tx_byte_cnt);
3151 fw_fcoe_stat->
tx_stat.fcoe_tx_pkt_cnt);
3168 static void bnx2x_drv_info_iscsi_stat(
struct bnx2x *bp)
3173 &bp->
slowpath->drv_info_to_mcp.iscsi_stat;
3176 bp->cnic_eth_dev.iscsi_mac,
ETH_ALEN);
3191 static void bnx2x_config_mf_bw(
struct bnx2x *bp)
3195 bnx2x_link_sync_notify(bp);
3200 static void bnx2x_set_mf_bw(
struct bnx2x *bp)
3202 bnx2x_config_mf_bw(bp);
3206 static void bnx2x_handle_eee_event(
struct bnx2x *bp)
3212 static void bnx2x_handle_drv_info_req(
struct bnx2x *bp)
3231 bnx2x_drv_info_ether_stat(bp);
3234 bnx2x_drv_info_fcoe_stat(bp);
3237 bnx2x_drv_info_iscsi_stat(bp);
3256 static void bnx2x_dcc_event(
struct bnx2x *bp,
u32 dcc_event)
3271 bnx2x_e1h_disable(bp);
3276 bnx2x_e1h_enable(bp);
3278 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3281 bnx2x_config_mf_bw(bp);
3282 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3293 static struct eth_spe *bnx2x_sp_get_next(
struct bnx2x *bp)
3309 static void bnx2x_sp_prod_update(
struct bnx2x *bp)
3331 static bool bnx2x_is_contextless_ramrod(
int cmd,
int cmd_type)
3362 u32 data_hi,
u32 data_lo,
int cmd_type)
3366 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3368 #ifdef BNX2X_STOP_ON_ERROR
3370 BNX2X_ERR(
"Can't post SP when there is panic\n");
3391 spe = bnx2x_sp_get_next(bp);
3394 spe->
hdr.conn_and_cmd_data =
3420 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3424 HW_CID(bp, cid), data_hi, data_lo, type,
3427 bnx2x_sp_prod_update(bp);
3433 static int bnx2x_acquire_alr(
struct bnx2x *bp)
3439 for (j = 0; j < 1000; j++) {
3443 if (val & (1L << 31))
3448 if (!(val & (1L << 31))) {
3449 BNX2X_ERR(
"Cannot acquire MCP access lock register\n");
3457 static void bnx2x_release_alr(
struct bnx2x *bp)
3462 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3463 #define BNX2X_DEF_SB_IDX 0x0002
3465 static u16 bnx2x_update_dsb_idx(
struct bnx2x *bp)
3490 static void bnx2x_attn_int_asserted(
struct bnx2x *bp,
u32 asserted)
3505 aeu_mask =
REG_RD(bp, aeu_addr);
3508 aeu_mask, asserted);
3509 aeu_mask &= ~(asserted & 0x3ff);
3512 REG_WR(bp, aeu_addr, aeu_mask);
3525 nig_mask =
REG_RD(bp, nig_int_mask_addr);
3531 REG_WR(bp, nig_int_mask_addr, 0);
3533 bnx2x_link_attn(bp);
3586 DP(
NETIF_MSG_HW,
"about to mask 0x%08x at %s addr 0x%x\n", asserted,
3588 REG_WR(bp, reg_addr, asserted);
3591 if (asserted & ATTN_NIG_FOR_FUNC) {
3592 REG_WR(bp, nig_int_mask_addr, nig_mask);
3597 static void bnx2x_fan_failure(
struct bnx2x *bp)
3604 dev_info.port_hw_config[port].external_phy_config);
3612 netdev_err(bp->
dev,
"Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3613 "Please contact OEM Support for assistance\n");
3627 static void bnx2x_attn_int_deasserted0(
struct bnx2x *bp,
u32 attn)
3638 val =
REG_RD(bp, reg_offset);
3639 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3640 REG_WR(bp, reg_offset, val);
3646 bnx2x_fan_failure(bp);
3657 val =
REG_RD(bp, reg_offset);
3659 REG_WR(bp, reg_offset, val);
3661 BNX2X_ERR(
"FATAL HW block attention set0 0x%x\n",
3662 (
u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3667 static void bnx2x_attn_int_deasserted1(
struct bnx2x *bp,
u32 attn)
3674 BNX2X_ERR(
"DB hw attention 0x%x\n", val);
3688 val =
REG_RD(bp, reg_offset);
3690 REG_WR(bp, reg_offset, val);
3692 BNX2X_ERR(
"FATAL HW block attention set1 0x%x\n",
3693 (
u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3698 static void bnx2x_attn_int_deasserted2(
struct bnx2x *bp,
u32 attn)
3705 BNX2X_ERR(
"CFC hw attention 0x%x\n", val);
3713 BNX2X_ERR(
"PXP hw attention-0 0x%x\n", val);
3720 BNX2X_ERR(
"PXP hw attention-1 0x%x\n", val);
3732 val =
REG_RD(bp, reg_offset);
3734 REG_WR(bp, reg_offset, val);
3736 BNX2X_ERR(
"FATAL HW block attention set2 0x%x\n",
3737 (
u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3742 static void bnx2x_attn_int_deasserted3(
struct bnx2x *bp,
u32 attn)
3759 (val & DRV_STATUS_DCC_EVENT_MASK));
3762 bnx2x_set_mf_bw(bp);
3765 bnx2x_handle_drv_info_req(bp);
3767 bnx2x_pmf_update(bp);
3776 bnx2x_handle_afex_cmd(bp,
3777 val & DRV_STATUS_AFEX_EVENT_MASK);
3779 bnx2x_handle_eee_event(bp);
3788 bnx2x_link_sync_notify(bp);
3798 bnx2x_mc_assert(bp);
3812 BNX2X_ERR(
"Unknown HW assert! (attn 0x%x)\n", attn);
3816 BNX2X_ERR(
"LATCHED attention 0x%08x (masked)\n", attn);
3820 BNX2X_ERR(
"GRC time-out 0x%08x\n", val);
3825 BNX2X_ERR(
"GRC reserved 0x%08x\n", val);
3845 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
3847 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
3848 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
3849 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
3850 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
3851 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
3852 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
3853 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
3874 static void bnx2x_clear_reset_global(
struct bnx2x *bp)
3888 static bool bnx2x_reset_is_global(
struct bnx2x *bp)
3901 static void bnx2x_set_reset_done(
struct bnx2x *bp)
3946 return (val & bit) ?
false :
true;
3968 val1 = (val &
mask) >> shift;
3971 val1 |= (1 << bp->
pf_num);
3977 val |= ((val1 << shift) & mask);
4005 val1 = (val &
mask) >> shift;
4008 val1 &= ~(1 << bp->
pf_num);
4014 val |= ((val1 << shift) & mask);
4026 static bool bnx2x_get_load_status(
struct bnx2x *bp,
int engine)
4036 val = (val &
mask) >> shift;
4044 static void _print_next_block(
int idx,
const char *
blk)
4046 pr_cont(
"%s%s", idx ?
", " :
"", blk);
4049 static int bnx2x_check_blocks_with_parity0(
u32 sig,
int par_num,
4054 for (i = 0;
sig; i++) {
4055 cur_bit = ((
u32)0x1 << i);
4056 if (sig & cur_bit) {
4060 _print_next_block(par_num++,
"BRB");
4064 _print_next_block(par_num++,
"PARSER");
4068 _print_next_block(par_num++,
"TSDM");
4072 _print_next_block(par_num++,
4077 _print_next_block(par_num++,
"TCM");
4081 _print_next_block(par_num++,
"TSEMI");
4085 _print_next_block(par_num++,
"XPB");
4097 static int bnx2x_check_blocks_with_parity1(
u32 sig,
int par_num,
4098 bool *global,
bool print)
4102 for (i = 0;
sig; i++) {
4103 cur_bit = ((
u32)0x1 << i);
4104 if (sig & cur_bit) {
4108 _print_next_block(par_num++,
"PBF");
4112 _print_next_block(par_num++,
"QM");
4116 _print_next_block(par_num++,
"TM");
4120 _print_next_block(par_num++,
"XSDM");
4124 _print_next_block(par_num++,
"XCM");
4128 _print_next_block(par_num++,
"XSEMI");
4132 _print_next_block(par_num++,
4137 _print_next_block(par_num++,
"NIG");
4141 _print_next_block(par_num++,
4147 _print_next_block(par_num++,
"DEBUG");
4151 _print_next_block(par_num++,
"USDM");
4155 _print_next_block(par_num++,
"UCM");
4159 _print_next_block(par_num++,
"USEMI");
4163 _print_next_block(par_num++,
"UPB");
4167 _print_next_block(par_num++,
"CSDM");
4171 _print_next_block(par_num++,
"CCM");
4183 static int bnx2x_check_blocks_with_parity2(
u32 sig,
int par_num,
4188 for (i = 0;
sig; i++) {
4189 cur_bit = ((
u32)0x1 << i);
4190 if (sig & cur_bit) {
4194 _print_next_block(par_num++,
"CSEMI");
4198 _print_next_block(par_num++,
"PXP");
4202 _print_next_block(par_num++,
4203 "PXPPCICLOCKCLIENT");
4207 _print_next_block(par_num++,
"CFC");
4211 _print_next_block(par_num++,
"CDU");
4215 _print_next_block(par_num++,
"DMAE");
4219 _print_next_block(par_num++,
"IGU");
4223 _print_next_block(par_num++,
"MISC");
4235 static int bnx2x_check_blocks_with_parity3(
u32 sig,
int par_num,
4236 bool *global,
bool print)
4240 for (i = 0;
sig; i++) {
4241 cur_bit = ((
u32)0x1 << i);
4242 if (sig & cur_bit) {
4246 _print_next_block(par_num++,
"MCP ROM");
4251 _print_next_block(par_num++,
4257 _print_next_block(par_num++,
4263 _print_next_block(par_num++,
4277 static int bnx2x_check_blocks_with_parity4(
u32 sig,
int par_num,
4282 for (i = 0;
sig; i++) {
4283 cur_bit = ((
u32)0x1 << i);
4284 if (sig & cur_bit) {
4288 _print_next_block(par_num++,
"PGLUE_B");
4292 _print_next_block(par_num++,
"ATC");
4304 static bool bnx2x_parity_attn(
struct bnx2x *bp,
bool *global,
bool print,
4313 DP(
NETIF_MSG_HW,
"Was parity error: HW block parity attention:\n"
4314 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4315 sig[0] & HW_PRTY_ASSERT_SET_0,
4316 sig[1] & HW_PRTY_ASSERT_SET_1,
4317 sig[2] & HW_PRTY_ASSERT_SET_2,
4318 sig[3] & HW_PRTY_ASSERT_SET_3,
4319 sig[4] & HW_PRTY_ASSERT_SET_4);
4322 "Parity errors detected in blocks: ");
4323 par_num = bnx2x_check_blocks_with_parity0(
4324 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4325 par_num = bnx2x_check_blocks_with_parity1(
4326 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4327 par_num = bnx2x_check_blocks_with_parity2(
4328 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4329 par_num = bnx2x_check_blocks_with_parity3(
4330 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4331 par_num = bnx2x_check_blocks_with_parity4(
4332 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4372 return bnx2x_parity_attn(bp, global, print, attn.
sig);
4376 static void bnx2x_attn_int_deasserted4(
struct bnx2x *bp,
u32 attn)
4382 BNX2X_ERR(
"PGLUE hw attention 0x%x\n", val);
4384 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4386 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4388 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4390 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4393 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4396 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4398 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4400 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4402 BNX2X_ERR(
"PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4406 BNX2X_ERR(
"ATC hw attention 0x%x\n", val);
4408 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4410 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4412 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4414 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4416 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4418 BNX2X_ERR(
"ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4423 BNX2X_ERR(
"FATAL parity attention set4 0x%x\n",
4430 static void bnx2x_attn_int_deasserted(
struct bnx2x *bp,
u32 deasserted)
4438 bool global =
false;
4442 bnx2x_acquire_alr(bp);
4445 #ifndef BNX2X_STOP_ON_ERROR
4456 bnx2x_release_alr(bp);
4471 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4474 if (deasserted & (1 << index)) {
4479 group_mask->
sig[0], group_mask->
sig[1],
4480 group_mask->
sig[2], group_mask->
sig[3],
4481 group_mask->
sig[4]);
4483 bnx2x_attn_int_deasserted4(bp,
4484 attn.sig[4] & group_mask->
sig[4]);
4485 bnx2x_attn_int_deasserted3(bp,
4486 attn.sig[3] & group_mask->
sig[3]);
4487 bnx2x_attn_int_deasserted1(bp,
4488 attn.sig[1] & group_mask->
sig[1]);
4489 bnx2x_attn_int_deasserted2(bp,
4490 attn.sig[2] & group_mask->
sig[2]);
4491 bnx2x_attn_int_deasserted0(bp,
4492 attn.sig[0] & group_mask->
sig[0]);
4496 bnx2x_release_alr(bp);
4507 REG_WR(bp, reg_addr, val);
4516 aeu_mask =
REG_RD(bp, reg_addr);
4519 aeu_mask, deasserted);
4520 aeu_mask |= (deasserted & 0x3ff);
4523 REG_WR(bp, reg_addr, aeu_mask);
4531 static void bnx2x_attn_int(
struct bnx2x *bp)
4541 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4542 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4545 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4546 attn_bits, attn_ack, asserted, deasserted);
4548 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4553 bnx2x_attn_int_asserted(bp, asserted);
4556 bnx2x_attn_int_deasserted(bp, deasserted);
4564 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4568 static void bnx2x_update_eq_prod(
struct bnx2x *bp,
u16 prod)
4571 storm_memset_eq_prod(bp, prod,
BP_FUNC(bp));
4576 static int bnx2x_cnic_handle_cfc_del(
struct bnx2x *bp,
u32 cid,
4581 if (!bp->cnic_eth_dev.starting_cid ||
4583 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4590 BNX2X_ERR(
"got delete ramrod for CNIC CID %d with error!\n",
4594 bnx2x_cnic_cfc_comp(bp, cid, err);
4599 static void bnx2x_handle_mcast_eqe(
struct bnx2x *bp)
4604 memset(&rparam, 0,
sizeof(rparam));
4608 netif_addr_lock_bh(bp->
dev);
4617 BNX2X_ERR(
"Failed to send pending mcast commands: %d\n",
4621 netif_addr_unlock_bh(bp->
dev);
4624 static void bnx2x_handle_classification_eqe(
struct bnx2x *bp,
4640 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4651 bnx2x_handle_mcast_eqe(bp);
4654 BNX2X_ERR(
"Unsupported classification command: %d\n",
4655 elem->
message.data.eth_event.echo);
4659 rc = vlan_mac_obj->
complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4662 BNX2X_ERR(
"Failed to schedule new commands: %d\n", rc);
4669 static void bnx2x_set_iscsi_eth_rx_mode(
struct bnx2x *bp,
bool start);
4672 static void bnx2x_handle_rx_mode_eqe(
struct bnx2x *bp)
4674 netif_addr_lock_bh(bp->
dev);
4684 bnx2x_set_iscsi_eth_rx_mode(bp,
true);
4687 bnx2x_set_iscsi_eth_rx_mode(bp,
false);
4690 netif_addr_unlock_bh(bp->
dev);
4693 static void bnx2x_after_afex_vif_lists(
struct bnx2x *bp,
4698 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4699 elem->
message.data.vif_list_event.func_bit_map);
4701 elem->
message.data.vif_list_event.func_bit_map);
4702 }
else if (elem->
message.data.vif_list_event.echo ==
4710 static void bnx2x_after_function_update(
struct bnx2x *bp)
4746 BNX2X_ERR(
"Failed to config silent vlan rem for Q %d\n",
4766 BNX2X_ERR(
"Failed to config silent vlan rem for Q %d\n",
4792 static void bnx2x_eq_int(
struct bnx2x *bp)
4794 u16 hw_cons, sw_cons, sw_prod;
4820 DP(
BNX2X_MSG_SP,
"EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
4823 for (; sw_cons != hw_cons;
4830 opcode = elem->
message.opcode;
4837 "got statistics comp event %d\n",
4849 "got delete ramrod for MULTI[%d]\n", cid);
4851 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4854 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4880 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4898 bnx2x_after_afex_vif_lists(bp, elem);
4902 "got FUNC_START ramrod\n");
4910 "got FUNC_STOP ramrod\n");
4917 switch (opcode | bp->
state) {
4922 cid = elem->
message.data.eth_event.echo &
4940 bnx2x_handle_classification_eqe(bp, elem);
4950 bnx2x_handle_mcast_eqe(bp);
4960 bnx2x_handle_rx_mode_eqe(bp);
4964 BNX2X_ERR(
"Unknown EQ event %d, bp->state 0x%x\n",
4980 bnx2x_update_eq_prod(bp, bp->
eq_prod);
4988 status = bnx2x_update_dsb_idx(bp);
4992 DP(
BNX2X_MSG_SP,
"got a slowpath interrupt (status 0x%x)\n", status);
4997 status &= ~BNX2X_DEF_SB_ATT_IDX;
5006 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5022 status &= ~BNX2X_DEF_SB_IDX;
5043 struct bnx2x *bp = netdev_priv(dev);
5048 #ifdef BNX2X_STOP_ON_ERROR
5079 static void bnx2x_timer(
unsigned long data)
5083 if (!netif_running(bp->
dev))
5097 mcp_pulse = (
SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5102 if ((drv_pulse != mcp_pulse) &&
5105 BNX2X_ERR(
"drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5106 drv_pulse, mcp_pulse);
5127 if (!(len%4) && !(addr%4))
5128 for (i = 0; i < len; i += 4)
5129 REG_WR(bp, addr + i, fill);
5131 for (i = 0; i < len; i++)
5137 static void bnx2x_wr_fp_sb_data(
struct bnx2x *bp,
5143 for (index = 0; index <
data_size; index++)
5147 *(sb_data_p + index));
5150 static void bnx2x_zero_fp_sb(
struct bnx2x *bp,
int fw_sb_id)
5161 sb_data_e2.common.p_func.vf_valid =
false;
5162 sb_data_p = (
u32 *)&sb_data_e2;
5168 sb_data_e1x.common.p_func.vf_valid =
false;
5169 sb_data_p = (
u32 *)&sb_data_e1x;
5172 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5183 static void bnx2x_wr_sp_sb_data(
struct bnx2x *bp,
5192 *((
u32 *)sp_sb_data + i));
5195 static void bnx2x_zero_sp_sb(
struct bnx2x *bp)
5202 sp_sb_data.
p_func.vf_valid =
false;
5204 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5217 int igu_sb_id,
int igu_seg_id)
5219 hc_sm->igu_sb_id = igu_sb_id;
5220 hc_sm->igu_seg_id = igu_seg_id;
5221 hc_sm->timer_value = 0xFF;
5227 static void bnx2x_map_sb_state_machines(
struct hc_index_data *index_data)
5242 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5246 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5248 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5250 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5252 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5255 static void bnx2x_init_sb(
struct bnx2x *bp,
dma_addr_t mapping,
int vfid,
5256 u8 vf_valid,
int fw_sb_id,
int igu_sb_id)
5271 bnx2x_zero_fp_sb(bp, fw_sb_id);
5276 sb_data_e2.common.p_func.pf_id =
BP_FUNC(bp);
5277 sb_data_e2.common.p_func.vf_id = vfid;
5278 sb_data_e2.common.p_func.vf_valid = vf_valid;
5279 sb_data_e2.common.p_func.vnic_id =
BP_VN(bp);
5280 sb_data_e2.common.same_igu_sb_1b =
true;
5281 sb_data_e2.common.host_sb_addr.hi =
U64_HI(mapping);
5282 sb_data_e2.common.host_sb_addr.lo =
U64_LO(mapping);
5283 hc_sm_p = sb_data_e2.common.state_machine;
5284 sb_data_p = (
u32 *)&sb_data_e2;
5286 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5291 sb_data_e1x.common.p_func.pf_id =
BP_FUNC(bp);
5292 sb_data_e1x.common.p_func.vf_id = 0xff;
5293 sb_data_e1x.common.p_func.vf_valid =
false;
5294 sb_data_e1x.common.p_func.vnic_id =
BP_VN(bp);
5295 sb_data_e1x.common.same_igu_sb_1b =
true;
5296 sb_data_e1x.common.host_sb_addr.hi =
U64_HI(mapping);
5297 sb_data_e1x.common.host_sb_addr.lo =
U64_LO(mapping);
5298 hc_sm_p = sb_data_e1x.common.state_machine;
5299 sb_data_p = (
u32 *)&sb_data_e1x;
5301 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5304 bnx2x_setup_ndsb_state_machine(&hc_sm_p[
SM_RX_ID],
5305 igu_sb_id, igu_seg_id);
5306 bnx2x_setup_ndsb_state_machine(&hc_sm_p[
SM_TX_ID],
5307 igu_sb_id, igu_seg_id);
5312 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5315 static void bnx2x_update_coalesce_sb(
struct bnx2x *bp,
u8 fw_sb_id,
5316 u16 tx_usec,
u16 rx_usec)
5331 static void bnx2x_init_def_sb(
struct bnx2x *bp)
5335 int igu_sp_sb_index;
5339 int reg_offset, reg_offset_en5;
5355 atten_status_block);
5367 for (sindex = 0; sindex < 4; sindex++)
5369 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5378 reg_offset_en5 + 0x4*index);
5397 bnx2x_zero_sp_sb(bp);
5402 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5403 sp_sb_data.igu_seg_id = igu_seg_id;
5406 sp_sb_data.
p_func.vf_id = 0xff;
5408 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5418 bnx2x_update_coalesce_sb(bp, bp->
fp[i].fw_sb_id,
5422 static void bnx2x_init_sp_ring(
struct bnx2x *bp)
5433 static void bnx2x_init_eq_ring(
struct bnx2x *bp)
5458 unsigned long rx_mode_flags,
5459 unsigned long rx_accept_flags,
5460 unsigned long tx_accept_flags,
5461 unsigned long ramrod_flags)
5466 memset(&ramrod_param, 0,
sizeof(ramrod_param));
5469 ramrod_param.
cid = 0;
5574 static void bnx2x_init_internal_common(
struct bnx2x *bp)
5592 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5602 static void bnx2x_init_internal(
struct bnx2x *bp,
u32 load_code)
5604 switch (load_code) {
5607 bnx2x_init_internal_common(bp);
5620 BNX2X_ERR(
"Unknown load_code (0x%x) from MCP\n", load_code);
5640 return bnx2x_fp_igu_sb_id(fp);
5643 static void bnx2x_init_eth_fp(
struct bnx2x *bp,
int fp_idx)
5647 unsigned long q_type = 0;
5651 fp->
cl_id = bnx2x_fp_cl_id(fp);
5652 fp->
fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5687 DP(
NETIF_MSG_IFUP,
"queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
5693 bnx2x_update_fpsb_idx(fp);
5723 static void bnx2x_init_tx_rings(
struct bnx2x *bp)
5730 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5738 bnx2x_init_eth_fp(bp, i);
5741 bnx2x_init_fcoe_fp(bp);
5743 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
5745 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5751 bp->common.shmem_base, bp->common.shmem2_base,
5756 bnx2x_init_def_sb(bp);
5757 bnx2x_update_dsb_idx(bp);
5759 bnx2x_init_tx_rings(bp);
5760 bnx2x_init_sp_ring(bp);
5761 bnx2x_init_eq_ring(bp);
5762 bnx2x_init_internal(bp, load_code);
5773 bnx2x_attn_int_deasserted0(bp,
5775 AEU_INPUTS_ATTN_BITS_SPIO5);
5784 static int bnx2x_gunzip_init(
struct bnx2x *bp)
5811 BNX2X_ERR(
"Cannot allocate firmware buffer for un-compression\n");
5815 static void bnx2x_gunzip_end(
struct bnx2x *bp)
5830 static int bnx2x_gunzip(
struct bnx2x *bp,
const u8 *zbuf,
int len)
5835 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] !=
Z_DEFLATED)) {
5844 if (zbuf[3] & FNAME)
5845 while ((zbuf[n++] != 0) && (n < len));
5848 bp->
strm->avail_in = len -
n;
5858 netdev_err(bp->
dev,
"Firmware decompression error: %s\n",
5864 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5883 static void bnx2x_lb_pckt(
struct bnx2x *bp)
5888 wb_write[0] = 0x55555555;
5889 wb_write[1] = 0x55555555;
5894 wb_write[0] = 0x09000000;
5895 wb_write[1] = 0x55555555;
5904 static int bnx2x_int_mem_test(
struct bnx2x *bp)
5931 count = 1000 * factor;
5943 BNX2X_ERR(
"NIG timeout val = 0x%x\n", val);
5948 count = 1000 * factor;
5958 BNX2X_ERR(
"PRS timeout val = 0x%x\n", val);
5982 for (i = 0; i < 10; i++)
5987 count = 1000 * factor;
5999 BNX2X_ERR(
"NIG timeout val = 0x%x\n", val);
6006 BNX2X_ERR(
"PRS timeout val = 0x%x\n", val);
6016 BNX2X_ERR(
"PRS timeout val = 0x%x\n", val);
6019 for (i = 0; i < 11; i++)
6050 static void bnx2x_enable_blocks_attention(
struct bnx2x *bp)
6111 static void bnx2x_reset_common(
struct bnx2x *bp)
6127 static void bnx2x_setup_dmae(
struct bnx2x *bp)
6133 static void bnx2x_init_pxp(
struct bnx2x *bp)
6136 int r_order, w_order;
6148 bnx2x_init_pxp_arb(bp, r_order, w_order);
6151 static void bnx2x_setup_fan_failure_detection(
struct bnx2x *bp)
6184 if (is_required == 0)
6203 static void bnx2x_pretend_func(
struct bnx2x *bp,
u8 pretend_func_num)
6241 REG_WR(bp, offset, pretend_func_num);
6256 static void bnx2x__common_init_phy(
struct bnx2x *bp)
6258 u32 shmem_base[2], shmem2_base[2];
6259 shmem_base[0] = bp->
common.shmem_base;
6260 shmem2_base[0] = bp->
common.shmem2_base;
6278 static int bnx2x_init_hw_common(
struct bnx2x *bp)
6290 bnx2x_reset_common(bp);
6314 for (abs_func_id =
BP_PATH(bp);
6323 bnx2x_pretend_func(bp, abs_func_id);
6464 bnx2x_pretend_func(bp, (
BP_PATH(bp) + 6));
6465 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli,
INITOP_CLEAR);
6488 }
while (factor-- && (val != 1));
6630 dev_alert(&bp->
pdev->dev,
6631 "please adjust the size of cdu_context(%ld)\n",
6635 val = (4 << 24) + (0 << 12) + 1024;
6655 REG_WR(bp, 0x2814, 0xffffffff);
6656 REG_WR(bp, 0x3820, 0xffffffff);
6710 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6711 BNX2X_ERR(
"internal mem self test failed\n");
6716 bnx2x_setup_fan_failure_detection(bp);
6721 bnx2x_enable_blocks_attention(bp);
6722 bnx2x_enable_blocks_parity(bp);
6726 bnx2x__common_init_phy(bp);
6728 BNX2X_ERR(
"Bootcode is missing - can not initialize link\n");
6738 static int bnx2x_init_hw_common_chip(
struct bnx2x *bp)
6740 int rc = bnx2x_init_hw_common(bp);
6747 bnx2x__common_init_phy(bp);
6752 static int bnx2x_init_hw_port(
struct bnx2x *bp)
6764 bnx2x_init_block(bp,
BLOCK_MISC, init_phase);
6765 bnx2x_init_block(bp,
BLOCK_PXP, init_phase);
6766 bnx2x_init_block(bp,
BLOCK_PXP2, init_phase);
6776 bnx2x_init_block(bp,
BLOCK_ATC, init_phase);
6777 bnx2x_init_block(bp,
BLOCK_DMAE, init_phase);
6779 bnx2x_init_block(bp,
BLOCK_QM, init_phase);
6781 bnx2x_init_block(bp,
BLOCK_TCM, init_phase);
6782 bnx2x_init_block(bp,
BLOCK_UCM, init_phase);
6783 bnx2x_init_block(bp,
BLOCK_CCM, init_phase);
6784 bnx2x_init_block(bp,
BLOCK_XCM, init_phase);
6790 bnx2x_init_block(bp,
BLOCK_TM, init_phase);
6795 bnx2x_init_block(bp,
BLOCK_DORQ, init_phase);
6797 bnx2x_init_block(bp,
BLOCK_BRB1, init_phase);
6803 else if (bp->
dev->mtu > 4096) {
6809 low = 96 + (val/64) +
6810 ((val % 64) ? 1 : 0);
6825 bnx2x_init_block(bp,
BLOCK_PRS, init_phase);
6850 bnx2x_init_block(bp,
BLOCK_TSDM, init_phase);
6851 bnx2x_init_block(bp,
BLOCK_CSDM, init_phase);
6852 bnx2x_init_block(bp,
BLOCK_USDM, init_phase);
6853 bnx2x_init_block(bp,
BLOCK_XSDM, init_phase);
6855 bnx2x_init_block(bp,
BLOCK_TSEM, init_phase);
6856 bnx2x_init_block(bp,
BLOCK_USEM, init_phase);
6857 bnx2x_init_block(bp,
BLOCK_CSEM, init_phase);
6858 bnx2x_init_block(bp,
BLOCK_XSEM, init_phase);
6860 bnx2x_init_block(bp,
BLOCK_UPB, init_phase);
6861 bnx2x_init_block(bp,
BLOCK_XPB, init_phase);
6863 bnx2x_init_block(bp,
BLOCK_PBF, init_phase);
6872 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6881 bnx2x_init_block(bp,
BLOCK_SRC, init_phase);
6883 bnx2x_init_block(bp,
BLOCK_CDU, init_phase);
6884 bnx2x_init_block(bp,
BLOCK_CFC, init_phase);
6890 bnx2x_init_block(bp,
BLOCK_HC, init_phase);
6892 bnx2x_init_block(bp,
BLOCK_IGU, init_phase);
6899 val =
IS_MF(bp) ? 0xF7 : 0x7;
6904 bnx2x_init_block(bp,
BLOCK_NIG, init_phase);
6961 val =
REG_RD(bp, reg_addr);
6963 REG_WR(bp, reg_addr, val);
6984 static void bnx2x_igu_clear_sb_gen(
struct bnx2x *bp,
u8 func,
6985 u8 idu_sb_id,
bool is_Pf)
6991 u32 sb_bit = 1 << (idu_sb_id%32);
7009 data, igu_addr_data);
7010 REG_WR(bp, igu_addr_data, data);
7015 REG_WR(bp, igu_addr_ctl, ctl);
7020 while (!(
REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7024 if (!(
REG_RD(bp, igu_addr_ack) & sb_bit)) {
7026 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7027 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7031 static void bnx2x_igu_clear_sb(
struct bnx2x *bp,
u8 idu_sb_id)
7033 bnx2x_igu_clear_sb_gen(bp,
BP_FUNC(bp), idu_sb_id,
true );
7036 static void bnx2x_clear_func_ilt(
struct bnx2x *bp,
u32 func)
7040 bnx2x_ilt_wr(bp, i, 0);
7043 static int bnx2x_init_hw_func(
struct bnx2x *bp)
7051 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7052 int i, main_mem_width,
rc;
7058 rc = bnx2x_pf_flr_clnup(bp);
7071 bnx2x_init_block(bp,
BLOCK_PXP, init_phase);
7072 bnx2x_init_block(bp,
BLOCK_PXP2, init_phase);
7079 ilt->
lines[cdu_ilt_start +
i].page_mapping =
7086 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7129 bnx2x_init_block(bp,
BLOCK_ATC, init_phase);
7130 bnx2x_init_block(bp,
BLOCK_DMAE, init_phase);
7131 bnx2x_init_block(bp,
BLOCK_NIG, init_phase);
7132 bnx2x_init_block(bp,
BLOCK_SRC, init_phase);
7133 bnx2x_init_block(bp,
BLOCK_MISC, init_phase);
7134 bnx2x_init_block(bp,
BLOCK_TCM, init_phase);
7135 bnx2x_init_block(bp,
BLOCK_UCM, init_phase);
7136 bnx2x_init_block(bp,
BLOCK_CCM, init_phase);
7137 bnx2x_init_block(bp,
BLOCK_XCM, init_phase);
7138 bnx2x_init_block(bp,
BLOCK_TSEM, init_phase);
7139 bnx2x_init_block(bp,
BLOCK_USEM, init_phase);
7140 bnx2x_init_block(bp,
BLOCK_CSEM, init_phase);
7141 bnx2x_init_block(bp,
BLOCK_XSEM, init_phase);
7152 bnx2x_init_block(bp,
BLOCK_QM, init_phase);
7154 bnx2x_init_block(bp,
BLOCK_TM, init_phase);
7155 bnx2x_init_block(bp,
BLOCK_DORQ, init_phase);
7156 bnx2x_init_block(bp,
BLOCK_BRB1, init_phase);
7157 bnx2x_init_block(bp,
BLOCK_PRS, init_phase);
7158 bnx2x_init_block(bp,
BLOCK_TSDM, init_phase);
7159 bnx2x_init_block(bp,
BLOCK_CSDM, init_phase);
7160 bnx2x_init_block(bp,
BLOCK_USDM, init_phase);
7161 bnx2x_init_block(bp,
BLOCK_XSDM, init_phase);
7162 bnx2x_init_block(bp,
BLOCK_UPB, init_phase);
7163 bnx2x_init_block(bp,
BLOCK_XPB, init_phase);
7164 bnx2x_init_block(bp,
BLOCK_PBF, init_phase);
7168 bnx2x_init_block(bp,
BLOCK_CDU, init_phase);
7170 bnx2x_init_block(bp,
BLOCK_CFC, init_phase);
7190 bnx2x_init_block(bp,
BLOCK_HC, init_phase);
7193 int num_segs, sb_idx, prod_offset;
7202 bnx2x_init_block(bp,
BLOCK_IGU, init_phase);
7229 for (sb_idx = 0; sb_idx < bp->
igu_sb_cnt; sb_idx++) {
7233 for (i = 0; i < num_segs; i++) {
7235 (prod_offset +
i) * 4;
7241 bnx2x_igu_clear_sb(bp,
7252 dsb_idx =
BP_VN(bp);
7265 (prod_offset +
i)*4;
7300 REG_WR(bp, 0x2114, 0xffffffff);
7301 REG_WR(bp, 0x2120, 0xffffffff);
7306 BP_PORT(bp) * (main_mem_size * 4);
7310 val =
REG_RD(bp, main_mem_prty_clr);
7313 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7317 for (i = main_mem_base;
7318 i < main_mem_base + main_mem_size * 4;
7319 i += main_mem_width) {
7322 i, main_mem_width / 4);
7325 REG_RD(bp, main_mem_prty_clr);
7328 #ifdef BNX2X_STOP_ON_ERROR
7387 static int bnx2x_alloc_fw_stats_mem(
struct bnx2x *bp)
7390 int is_fcoe_stats =
NO_FCOE(bp) ? 0 : 1;
7399 bp->
fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
7451 int i, allocated, context_size;
7475 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->
slowpath->drv_info_to_mcp;
7479 if (bnx2x_alloc_fw_stats_mem(bp))
7497 for (i = 0, allocated = 0; allocated < context_size; i++) {
7499 (context_size - allocated));
7538 int mac_type,
unsigned long *ramrod_flags)
7543 memset(&ramrod_param, 0,
sizeof(ramrod_param));
7569 BNX2X_ERR(
"%s MAC failed\n", (
set ?
"Set" :
"Del"));
7576 int mac_type,
bool wait_for_comp)
7579 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
7588 rc = mac_obj->
delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7590 BNX2X_ERR(
"Failed to delete MACs: %d\n", rc);
7597 unsigned long ramrod_flags = 0;
7600 if (is_zero_ether_addr(bp->
dev->dev_addr) &&
7603 "Ignoring Zero MAC for STORAGE SD mode\n");
7646 BNX2X_DEV_INFO(
"Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7661 static inline u16 bnx2x_cid_ilt_lines(
struct bnx2x *bp)
7680 ilt_client->
start = line;
7681 line += bnx2x_cid_ilt_lines(bp);
7683 line += CNIC_ILT_LINES;
7685 ilt_client->
end = line - 1;
7687 DP(
NETIF_MSG_IFUP,
"ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7699 ilt_client->
flags = 0;
7700 ilt_client->
start = line;
7706 ilt_client->
end = line - 1;
7709 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7721 ilt_client->
page_size = SRC_ILT_PAGE_SZ;
7722 ilt_client->
flags = 0;
7723 ilt_client->
start = line;
7724 line += SRC_ILT_LINES;
7725 ilt_client->
end = line - 1;
7728 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7744 ilt_client->
flags = 0;
7745 ilt_client->
start = line;
7746 line += TM_ILT_LINES;
7747 ilt_client->
end = line - 1;
7750 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7774 static void bnx2x_pf_q_prep_init(
struct bnx2x *bp,
7779 int cxt_index, cxt_offset;
7799 init_params->
rx.fw_sb_id = init_params->
tx.fw_sb_id =
7819 cxt_offset = fp->
txdata_ptr[cos]->cid - (cxt_index *
7821 init_params->
cxts[cos] =
7822 &bp->
context[cxt_index].vcxt[cxt_offset].eth;
7829 int tx_index,
bool leading)
7831 memset(tx_only_params, 0,
sizeof(*tx_only_params));
7837 tx_only_params->
flags = bnx2x_get_common_flags(bp, fp,
false);
7843 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->
gen_params, tx_index);
7846 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->
txq_params, tx_index);
7849 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
7851 q_params->
q_obj->cids[tx_index], q_params->
q_obj->cl_id,
7893 bnx2x_pf_q_prep_init(bp, fp, &q_params.
params.
init);
7909 memset(setup_params, 0,
sizeof(*setup_params));
7912 setup_params->
flags = bnx2x_get_q_flags(bp, fp, leading);
7915 bnx2x_pf_q_prep_general(bp, fp, &setup_params->
gen_params,
7918 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->
pause_params,
7921 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->
txq_params,
7941 tx_only_params, tx_index, leading);
7943 BNX2X_ERR(
"Queue(%d.%d) TX_ONLY_SETUP failed\n",
7944 fp->
index, tx_index);
7952 static int bnx2x_stop_queue(
struct bnx2x *bp,
int index)
8020 static void bnx2x_reset_func(
struct bnx2x *bp)
8071 for (i = 0; i < 200; i++) {
8078 bnx2x_clear_func_ilt(bp, func);
8091 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0,
INITOP_CLEAR);
8101 static void bnx2x_reset_port(
struct bnx2x *bp)
8107 bnx2x__link_reset(bp);
8125 "BRB1 is not empty %d blocks are occupied\n", val);
8130 static int bnx2x_reset_hw(
struct bnx2x *bp,
u32 load_code)
8145 static int bnx2x_func_stop(
struct bnx2x *bp)
8163 #ifdef BNX2X_STOP_ON_ERROR
8166 BNX2X_ERR(
"FUNC_STOP ramrod failed. Running a dry transaction\n");
8206 val = (mac_addr[0] << 8) | mac_addr[1];
8209 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8210 (mac_addr[4] << 8) | mac_addr[5];
8263 static int bnx2x_func_wait_started(
struct bnx2x *bp)
8299 #ifdef BNX2X_STOP_ON_ERROR
8310 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8342 rc = bnx2x_clean_tx_queue(bp, fp->
txdata_ptr[cos]);
8343 #ifdef BNX2X_STOP_ON_ERROR
8356 BNX2X_ERR(
"Failed to delete all ETH macs: %d\n", rc);
8362 BNX2X_ERR(
"Failed to schedule DEL commands for UC MACs list: %d\n",
8373 netif_addr_lock_bh(bp->
dev);
8384 BNX2X_ERR(
"Failed to send DEL multicast command: %d\n", rc);
8386 netif_addr_unlock_bh(bp->
dev);
8401 rc = bnx2x_func_wait_started(bp);
8403 BNX2X_ERR(
"bnx2x_func_wait_started failed\n");
8404 #ifdef BNX2X_STOP_ON_ERROR
8413 if (bnx2x_stop_queue(bp, i))
8414 #ifdef BNX2X_STOP_ON_ERROR
8422 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8423 BNX2X_ERR(
"Hmmm... Common slow path ramrods got stuck!\n");
8425 #ifndef BNX2X_STOP_ON_ERROR
8428 rc = bnx2x_func_stop(bp);
8431 #ifdef BNX2X_STOP_ON_ERROR
8439 bnx2x_del_all_napi(bp);
8445 rc = bnx2x_reset_hw(bp, reset_code);
8477 static void bnx2x_set_234_gates(
struct bnx2x *bp,
bool close)
8512 close ?
"closing" :
"opening");
8516 #define SHARED_MF_CLP_MAGIC 0x80000000
8518 static void bnx2x_clp_reset_prep(
struct bnx2x *bp,
u32 *magic_val)
8523 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8532 static void bnx2x_clp_reset_done(
struct bnx2x *bp,
u32 magic_val)
8548 static void bnx2x_reset_mcp_prep(
struct bnx2x *bp,
u32 *magic_val)
8551 u32 validity_offset;
8557 bnx2x_clp_reset_prep(bp, magic_val);
8565 REG_WR(bp, shmem + validity_offset, 0);
8568 #define MCP_TIMEOUT 5000
8569 #define MCP_ONE_TIMEOUT 100
8576 static void bnx2x_mcp_wait_one(
struct bnx2x *bp)
8589 static int bnx2x_init_shmem(
struct bnx2x *bp)
8596 if (bp->
common.shmem_base) {
8602 bnx2x_mcp_wait_one(bp);
8606 BNX2X_ERR(
"BAD MCP validity signature\n");
8611 static int bnx2x_reset_mcp_comp(
struct bnx2x *bp,
u32 magic_val)
8613 int rc = bnx2x_init_shmem(bp);
8617 bnx2x_clp_reset_done(bp, magic_val);
8622 static void bnx2x_pxp_prep(
struct bnx2x *bp)
8641 static void bnx2x_process_kill_chip_reset(
struct bnx2x *bp,
bool global)
8643 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8644 u32 global_bits2, stay_reset2;
8687 reset_mask1 = 0xffffffff;
8690 reset_mask2 = 0xffff;
8692 reset_mask2 = 0x1ffff;
8694 reset_mask2 = 0xfffff;
8696 reset_mask2 = 0x3ffffff;
8700 reset_mask2 &= ~global_bits2;
8717 reset_mask2 & (~not_reset_mask2));
8720 reset_mask1 & (~not_reset_mask1));
8726 reset_mask2 & (~stay_reset2));
8744 static int bnx2x_er_poll_igu_vq(
struct bnx2x *bp)
8756 }
while (cnt-- > 0);
8759 BNX2X_ERR(
"Still pending IGU requests pend_bits=%x!\n",
8767 static int bnx2x_process_kill(
struct bnx2x *bp,
bool global)
8771 u32 sr_cnt,
blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8781 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8782 ((port_is_idle_0 & 0x1) == 0x1) &&
8783 ((port_is_idle_1 & 0x1) == 0x1) &&
8784 (pgl_exp_rom2 == 0xffffffff))
8787 }
while (cnt-- > 0);
8790 BNX2X_ERR(
"Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
8791 BNX2X_ERR(
"sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8800 bnx2x_set_234_gates(bp,
true);
8824 bnx2x_reset_mcp_prep(bp, &val);
8831 bnx2x_process_kill_chip_reset(bp, global);
8836 if (global && bnx2x_reset_mcp_comp(bp, val))
8845 bnx2x_set_234_gates(bp,
false);
8856 bool global = bnx2x_reset_is_global(bp);
8866 BNX2X_ERR(
"MCP response failure, aborting\n");
8868 goto exit_leader_reset;
8872 BNX2X_ERR(
"MCP unexpected resp, aborting\n");
8874 goto exit_leader_reset2;
8878 BNX2X_ERR(
"MCP response failure, aborting\n");
8880 goto exit_leader_reset2;
8885 if (bnx2x_process_kill(bp, global)) {
8886 BNX2X_ERR(
"Something bad had happen on engine %d! Aii!\n",
8889 goto exit_leader_reset2;
8896 bnx2x_set_reset_done(bp);
8898 bnx2x_clear_reset_global(bp);
8913 static void bnx2x_recovery_failed(
struct bnx2x *bp)
8915 netdev_err(bp->
dev,
"Recovery has failed. Power cycle is needed.\n");
8939 static void bnx2x_parity_recover(
struct bnx2x *bp)
8941 bool global =
false;
8942 u32 error_recovered, error_unrecovered;
8954 if (bnx2x_trylock_leader_lock(bp)) {
8985 int other_engine =
BP_PATH(bp) ? 0 : 1;
8986 bool other_load_status =
8987 bnx2x_get_load_status(bp, other_engine);
8989 bnx2x_get_load_status(bp,
BP_PATH(bp));
8990 global = bnx2x_reset_is_global(bp);
9001 (global && other_load_status)) {
9015 bnx2x_recovery_failed(bp);
9034 if (bnx2x_trylock_leader_lock(bp)) {
9051 if (bnx2x_reset_is_global(bp)) {
9065 error_unrecovered++;
9067 "Recovery failed. Power cycle needed\n");
9099 static void bnx2x_sp_rtnl_task(
struct work_struct *work)
9105 if (!netif_running(bp->
dev))
9109 #ifdef BNX2X_STOP_ON_ERROR
9110 BNX2X_ERR(
"recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9111 "you will need to reboot when done\n");
9112 goto sp_rtnl_not_reset;
9123 bnx2x_parity_recover(bp);
9141 #ifdef BNX2X_STOP_ON_ERROR
9147 bnx2x_after_function_update(bp);
9156 bnx2x_close(bp->
dev);
9165 static void bnx2x_period_task(
struct work_struct *work)
9169 if (!netif_running(bp->
dev))
9170 goto period_task_exit;
9173 BNX2X_ERR(
"period task called on emulation, ignoring\n");
9174 goto period_task_exit;
9200 static u32 bnx2x_get_pretend_reg(
struct bnx2x *bp)
9207 static void bnx2x_undi_int_disable_e1h(
struct bnx2x *bp)
9209 u32 reg = bnx2x_get_pretend_reg(bp);
9229 static inline void bnx2x_undi_int_disable(
struct bnx2x *bp)
9234 bnx2x_undi_int_disable_e1h(bp);
9237 static void __devinit bnx2x_prev_unload_close_mac(
struct bnx2x *bp)
9240 bool mac_stopped =
false;
9248 if ((mask & reset_reg) && val) {
9262 wb_data[0] =
REG_RD(bp, base_addr + offset);
9263 wb_data[1] =
REG_RD(bp, base_addr + offset + 0x4);
9265 REG_WR(bp, base_addr + offset, wb_data[0]);
9266 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
9286 if (mask & reset_reg) {
9299 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9300 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9301 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9302 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9304 static void __devinit bnx2x_prev_unload_undi_inc(
struct bnx2x *bp,
u8 port,
9316 BNX2X_DEV_INFO(
"UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9325 BNX2X_ERR(
"MCP response failure, aborting\n");
9332 static bool __devinit bnx2x_prev_is_path_marked(
struct bnx2x *bp)
9342 bp->
pdev->bus->number == tmp_list->
bus &&
9345 BNX2X_DEV_INFO(
"Path %d was already cleaned from previous drivers\n",
9351 up(&bnx2x_prev_sem);
9363 BNX2X_ERR(
"Failed to allocate 'bnx2x_prev_path_list'\n");
9367 tmp_list->
bus = bp->
pdev->bus->number;
9373 BNX2X_ERR(
"Received %d when tried to take lock\n", rc);
9378 list_add(&tmp_list->
list, &bnx2x_prev_list);
9379 up(&bnx2x_prev_sem);
9399 BNX2X_ERR(
"FLR not supported by BC_VER: 0x%x\n",
9405 for (i = 0; i < 4; i++) {
9407 msleep((1 << (i - 1)) * 100);
9415 "transaction is not cleared; proceeding with reset anyway\n");
9425 static int __devinit bnx2x_prev_unload_uncommon(
struct bnx2x *bp)
9432 if (bnx2x_prev_is_path_marked(bp))
9433 return bnx2x_prev_mcp_done(bp);
9444 rc = bnx2x_do_flr(bp);
9456 rc = bnx2x_prev_mcp_done(bp);
9472 if (bnx2x_prev_is_path_marked(bp))
9473 return bnx2x_prev_mcp_done(bp);
9479 u32 timer_count = 1000;
9480 bool prev_undi =
false;
9483 bnx2x_prev_unload_close_mac(bp);
9491 if (tmp_reg == 0x7) {
9500 while (timer_count) {
9501 u32 prev_brb = tmp_reg;
9510 if (prev_brb > tmp_reg)
9517 bnx2x_prev_unload_undi_inc(bp,
BP_PORT(bp), 1);
9523 BNX2X_ERR(
"Failed to empty BRB, hope for the best\n");
9528 bnx2x_reset_common(bp);
9530 rc = bnx2x_prev_mark_path(bp);
9532 bnx2x_prev_mcp_done(bp);
9536 return bnx2x_prev_mcp_done(bp);
9546 static void __devinit bnx2x_prev_interrupted_dmae(
struct bnx2x *bp)
9550 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9551 BNX2X_ERR(
"was error bit was found to be set in pglueb upon startup. Clearing");
9560 int time_counter = 10;
9561 u32 rc,
fw, hw_lock_reg, hw_lock_val;
9567 bnx2x_prev_interrupted_dmae(bp);
9570 hw_lock_reg = (
BP_FUNC(bp) <= 5) ?
9574 hw_lock_val = (
REG_RD(bp, hw_lock_reg));
9583 REG_WR(bp, hw_lock_reg, 0xffffffff);
9597 BNX2X_ERR(
"MCP response failure, aborting\n");
9603 rc = bnx2x_prev_unload_common(bp);
9608 rc = bnx2x_prev_unload_uncommon(bp);
9613 }
while (--time_counter);
9615 if (!time_counter || rc) {
9616 BNX2X_ERR(
"Failed unloading previous driver, aborting\n");
9627 u32 val, val2, val3, val4,
id, boot_mode;
9633 id = ((val & 0xffff) << 16);
9635 id |= ((val & 0xf) << 12);
9637 id |= ((val & 0xff) << 4);
9646 (bp->
common.chip_id & 0x0000FFFF);
9649 (bp->
common.chip_id & 0x0000FFFF);
9650 bp->
common.chip_id |= 0x1;
9661 val = (val >> 1) & 1;
9681 val = (
REG_RD(bp, 0x2874) & 0x55);
9682 if ((bp->
common.chip_id & 0x1) ||
9694 bnx2x_init_shmem(bp);
9707 if (!bp->
common.shmem_base) {
9735 BNX2X_ERR(
"This driver needs bc_ver %X but found %X, please upgrade BC\n",
9762 switch (boot_mode) {
9789 val, val2, val3, val4);
9792 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
9793 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
9800 u8 fid, igu_sb_cnt = 0;
9836 #ifdef CONFIG_PCI_MSI
9846 if (igu_sb_cnt == 0)
9850 static void __devinit bnx2x_link_settings_supported(
struct bnx2x *bp,
9856 bp->
port.supported[0] = 0;
9857 bp->
port.supported[1] = 0;
9870 bp->
port.supported[1] =
9872 bp->
port.supported[0] =
9875 bp->
port.supported[0] =
9877 bp->
port.supported[1] =
9884 if (!(bp->
port.supported[0] || bp->
port.supported[1])) {
9885 BNX2X_ERR(
"NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
9887 dev_info.port_hw_config[port].external_phy_config),
9889 dev_info.port_hw_config[port].external_phy_config2));
9896 switch (switch_cfg) {
9906 BNX2X_ERR(
"BAD switch_cfg link_config 0x%x\n",
9907 bp->
port.link_config[0]);
9946 bp->
port.supported[1]);
9949 static void __devinit bnx2x_link_settings_requested(
struct bnx2x *bp)
9952 bp->
port.advertising[0] = 0;
9953 bp->
port.advertising[1] = 0;
9963 for (idx = 0; idx <
cfg_size; idx++) {
9965 link_config = bp->
port.link_config[
idx];
9997 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10014 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10022 if (bp->
port.supported[idx] &
10030 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10038 if (bp->
port.supported[idx] &
10048 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10056 if (bp->
port.supported[idx] &
10064 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10072 if (bp->
port.supported[idx] &
10080 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10088 if (bp->
port.supported[idx] &
10096 BNX2X_ERR(
"NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10107 BNX2X_ERR(
"NVRAM config error. BAD link speed link_config 0x%x\n",
10125 BNX2X_DEV_INFO(
"req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10129 bp->
port.advertising[idx]);
10137 memcpy(mac_buf, &mac_hi,
sizeof(mac_hi));
10138 memcpy(mac_buf +
sizeof(mac_hi), &mac_lo,
sizeof(mac_lo));
10145 u32 ext_phy_type, ext_phy_config, eee_mode;
10155 dev_info.port_hw_config[port].speed_capability_mask);
10158 dev_info.port_hw_config[port].speed_capability_mask2);
10159 bp->
port.link_config[0] =
10162 bp->
port.link_config[1] =
10174 BNX2X_DEV_INFO(
"lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
10177 bp->
port.link_config[0]);
10182 bnx2x_link_settings_supported(bp, bp->
link_params.switch_cfg);
10184 bnx2x_link_settings_requested(bp);
10192 dev_info.port_hw_config[port].external_phy_config);
10195 bp->
mdio.prtad = bp->
port.phy_addr;
10207 bp->
port.need_hw_lock = 1;
10211 bp->
common.shmem2_base);
10215 port_feature_config[port].eee_power_mode)) &
10234 drv_lic_key[port].max_iscsi_conn);
10237 bp->cnic_eth_dev.max_iscsi_conn =
10242 bp->cnic_eth_dev.max_iscsi_conn);
10248 if (!bp->cnic_eth_dev.max_iscsi_conn)
10249 bp->
flags |= no_flags;
10251 bp->
flags |= no_flags;
10256 static void __devinit bnx2x_get_ext_wwn_info(
struct bnx2x *bp,
int func)
10259 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10260 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10261 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10262 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10265 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10266 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10267 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10268 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10278 drv_lic_key[port].max_fcoe_conn);
10281 bp->cnic_eth_dev.max_fcoe_conn =
10288 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10291 fcoe_wwn_port_name_upper);
10292 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10295 fcoe_wwn_port_name_lower);
10298 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10301 fcoe_wwn_node_name_upper);
10302 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10305 fcoe_wwn_node_name_lower);
10311 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !
CHIP_IS_E1x(bp))
10312 bnx2x_get_ext_wwn_info(bp, func);
10314 }
else if (IS_MF_FCOE_SD(bp))
10315 bnx2x_get_ext_wwn_info(bp, func);
10317 BNX2X_DEV_INFO(
"max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10323 if (!bp->cnic_eth_dev.max_fcoe_conn)
10338 bnx2x_get_fcoe_info(bp);
10347 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10348 u8 *fip_mac = bp->fip_mac;
10355 BNX2X_ERROR(
"warning: random MAC workaround active\n");
10356 eth_hw_addr_random(bp->
dev);
10357 }
else if (
IS_MF(bp)) {
10358 val2 =
MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10359 val =
MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10362 bnx2x_set_mac_buf(bp->
dev->dev_addr, val, val2);
10375 val2 =
MF_CFG_RD(bp, func_ext_config[func].
10376 iscsi_mac_addr_upper);
10377 val =
MF_CFG_RD(bp, func_ext_config[func].
10378 iscsi_mac_addr_lower);
10379 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10386 val2 =
MF_CFG_RD(bp, func_ext_config[func].
10387 fcoe_mac_addr_upper);
10388 val =
MF_CFG_RD(bp, func_ext_config[func].
10389 fcoe_mac_addr_lower);
10390 bnx2x_set_mac_buf(fip_mac, val, val2);
10400 if (IS_MF_STORAGE_SD(bp)) {
10401 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10430 bnx2x_set_mac_buf(bp->
dev->dev_addr, val, val2);
10437 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10440 fcoe_fip_mac_upper);
10442 fcoe_fip_mac_lower);
10443 bnx2x_set_mac_buf(fip_mac, val, val2);
10454 if (!is_valid_ether_addr(iscsi_mac)) {
10462 if (!is_valid_ether_addr(fip_mac)) {
10468 if (!bnx2x_is_valid_ether_addr(bp, bp->
dev->dev_addr))
10470 "bad Ethernet MAC address configuration: %pM\n"
10471 "change it manually before bringing up the appropriate network interface\n",
10472 bp->
dev->dev_addr);
10484 bnx2x_get_common_hwinfo(bp);
10518 "FORCING Normal Mode failed!!!\n");
10523 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10529 bnx2x_get_igu_cam_info(bp);
10582 dev_info.shared_feature_config.config);
10587 val =
MF_CFG_RD(bp, func_mf_config[func].
10590 if (val != 0xffff) {
10593 func_mf_config[func].config);
10600 mac_upper) != 0xffff) &&
10602 afex_driver_support))) {
10605 func_mf_config[func].config);
10613 func_mf_config[
FUNC_0].e1hov_tag);
10619 func_mf_config[func].config);
10631 IS_MF(bp) ?
"multi" :
"single");
10635 val =
MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
10645 "No valid MF OV for func %d, aborting\n",
10660 "VN %d is in a single function mode, aborting\n",
10677 u8 other_func =
BP_PATH(bp) + 2*other_port;
10679 func_mf_config[other_func].e1hov_tag);
10690 bnx2x_get_port_hwinfo(bp);
10693 bnx2x_get_mac_hwinfo(bp);
10695 bnx2x_get_cnic_info(bp);
10702 int cnt,
i, block_end, rodi;
10707 char *vpd_extended_data =
NULL;
10714 goto out_not_found;
10722 goto out_not_found;
10725 pci_vpd_lrdt_size(&vpd_start[i]);
10731 if (vpd_extended_data ==
NULL)
10732 goto out_not_found;
10740 goto out_not_found;
10741 vpd_data = vpd_extended_data;
10743 vpd_data = vpd_start;
10750 goto out_not_found;
10752 len = pci_vpd_info_field_size(&vpd_data[rodi]);
10755 goto out_not_found;
10768 len = pci_vpd_info_field_size(&vpd_data[rodi]);
10777 kfree(vpd_extended_data);
10781 kfree(vpd_extended_data);
10827 #if defined(__LITTLE_ENDIAN)
10850 rc = bnx2x_get_hwinfo(bp);
10854 bnx2x_set_modes_bitmap(bp);
10860 bnx2x_read_fwinfo(bp);
10872 bnx2x_prev_unload(bp);
10880 dev_err(&bp->
pdev->dev,
"MCP disabled, must load devices in order!\n");
10915 bp->
timer.function = bnx2x_timer;
10948 static int bnx2x_open(
struct net_device *dev)
10950 struct bnx2x *bp = netdev_priv(dev);
10951 bool global =
false;
10952 int other_engine =
BP_PATH(bp) ? 0 : 1;
10953 bool other_load_status, load_status;
10961 other_load_status = bnx2x_get_load_status(bp, other_engine);
10962 load_status = bnx2x_get_load_status(bp,
BP_PATH(bp));
10988 if ((!load_status &&
10989 (!global || !other_load_status)) &&
10990 bnx2x_trylock_leader_lock(bp) &&
10992 netdev_info(bp->
dev,
"Recovered in open\n");
11000 BNX2X_ERR(
"Recovery flow hasn't been properly completed yet. Try again later.\n"
11001 "If you still see this message after a few retries then power cycle is required.\n");
11011 static int bnx2x_close(
struct net_device *dev)
11013 struct bnx2x *bp = netdev_priv(dev);
11024 static int bnx2x_init_mcast_macs_list(
struct bnx2x *bp,
11029 kzalloc(
sizeof(*mc_mac) * mc_count,
GFP_ATOMIC);
11048 static void bnx2x_free_mcast_macs_list(
11066 static int bnx2x_set_uc_list(
struct bnx2x *bp)
11072 unsigned long ramrod_flags = 0;
11077 BNX2X_ERR(
"Failed to schedule DELETE operations: %d\n", rc);
11086 "Failed to schedule ADD operations: %d\n", rc);
11090 }
else if (rc < 0) {
11092 BNX2X_ERR(
"Failed to schedule ADD operations: %d\n",
11104 static int bnx2x_set_mc_list(
struct bnx2x *bp)
11115 BNX2X_ERR(
"Failed to clear multicast configuration: %d\n", rc);
11121 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11123 BNX2X_ERR(
"Failed to create multicast MACs list: %d\n",
11132 BNX2X_ERR(
"Failed to set a new multicast configuration: %d\n",
11135 bnx2x_free_mcast_macs_list(&rparam);
11145 struct bnx2x *bp = netdev_priv(dev);
11163 if (bnx2x_set_mc_list(bp) < 0)
11166 if (bnx2x_set_uc_list(bp) < 0)
11173 if (IS_MF_ISCSI_SD(bp))
11187 static int bnx2x_mdio_read(
struct net_device *netdev,
int prtad,
11188 int devad,
u16 addr)
11190 struct bnx2x *bp = netdev_priv(netdev);
11195 prtad, devad, addr);
11211 static int bnx2x_mdio_write(
struct net_device *netdev,
int prtad,
int devad,
11214 struct bnx2x *bp = netdev_priv(netdev);
11218 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11219 prtad, devad, addr, value);
11231 static int bnx2x_ioctl(
struct net_device *dev,
struct ifreq *ifr,
int cmd)
11233 struct bnx2x *bp = netdev_priv(dev);
11239 if (!netif_running(dev))
11245 #ifdef CONFIG_NET_POLL_CONTROLLER
11246 static void poll_bnx2x(
struct net_device *dev)
11248 struct bnx2x *bp = netdev_priv(dev);
11258 static int bnx2x_validate_addr(
struct net_device *dev)
11260 struct bnx2x *bp = netdev_priv(dev);
11262 if (!bnx2x_is_valid_ether_addr(bp, dev->
dev_addr)) {
11263 BNX2X_ERR(
"Non-valid Ethernet address\n");
11270 .ndo_open = bnx2x_open,
11271 .ndo_stop = bnx2x_close,
11276 .ndo_validate_addr = bnx2x_validate_addr,
11277 .ndo_do_ioctl = bnx2x_ioctl,
11282 #ifdef CONFIG_NET_POLL_CONTROLLER
11283 .ndo_poll_controller = poll_bnx2x,
11287 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
11288 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11292 static int bnx2x_set_coherency_mask(
struct bnx2x *bp)
11299 dev_err(dev,
"dma_set_coherent_mask failed, aborting\n");
11303 dev_err(dev,
"System does not support DMA, aborting\n");
11317 bool chip_is_e1x = (board_type ==
BCM57710 ||
11322 bp = netdev_priv(dev);
11331 "Cannot enable PCI device, aborting\n");
11337 "Cannot find PCI device base address, aborting\n");
11339 goto err_out_disable;
11343 dev_err(&bp->
pdev->dev,
"Cannot find second PCI device"
11344 " base address, aborting\n");
11346 goto err_out_disable;
11353 "Cannot obtain PCI resources, aborting\n");
11354 goto err_out_disable;
11364 "Cannot find power management capability, aborting\n");
11366 goto err_out_release;
11369 if (!pci_is_pcie(pdev)) {
11370 dev_err(&bp->
pdev->dev,
"Not PCI Express, aborting\n");
11372 goto err_out_release;
11375 rc = bnx2x_set_coherency_mask(bp);
11377 goto err_out_release;
11388 "Cannot map register space, aborting\n");
11390 goto err_out_release;
11401 pci_read_config_dword(bp->
pdev,
11459 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
11467 bp->
mdio.mdio_read = bnx2x_mdio_read;
11468 bp->
mdio.mdio_write = bnx2x_mdio_write;
11478 pci_set_drvdata(pdev,
NULL);
11484 static void __devinit bnx2x_get_pcie_width_speed(
struct bnx2x *bp,
11485 int *
width,
int *speed)
11495 static int bnx2x_check_firmware(
struct bnx2x *bp)
11515 for (i = 0; i <
sizeof(*fw_hdr) /
sizeof(*sections); i++) {
11518 if (offset + len > firmware->
size) {
11519 BNX2X_ERR(
"Section %d length is out of bounds\n", i);
11526 ops_offsets = (
u16 *)(firmware->
data + offset);
11531 BNX2X_ERR(
"Section offset %d is out of bounds\n", i);
11543 BNX2X_ERR(
"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11544 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
11555 static void be32_to_cpu_n(
const u8 *_source,
u8 *_target,
u32 n)
11561 for (i = 0; i < n/4; i++)
11569 static void bnx2x_prep_ops(
const u8 *_source,
u8 *_target,
u32 n)
11575 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11577 target[
i].
op = (tmp >> 24) & 0xff;
11578 target[
i].
offset = tmp & 0xffffff;
11586 static void bnx2x_prep_iro(
const u8 *_source,
u8 *_target,
u32 n)
11589 struct iro *target = (
struct iro *)_target;
11592 for (i = 0, j = 0; i < n/
sizeof(
struct iro); i++) {
11596 target[
i].
m1 = (tmp >> 16) & 0xffff;
11597 target[
i].
m2 = tmp & 0xffff;
11600 target[
i].
m3 = (tmp >> 16) & 0xffff;
11601 target[
i].
size = tmp & 0xffff;
11606 static void be16_to_cpu_n(
const u8 *_source,
u8 *_target,
u32 n)
11609 u16 *target = (
u16 *)_target;
11612 for (i = 0; i < n/2; i++)
11616 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11618 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11619 bp->arr = kmalloc(len, GFP_KERNEL); \
11622 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11623 (u8 *)bp->arr, len); \
11626 static int bnx2x_init_firmware(
struct bnx2x *bp)
11628 const char *fw_file_name;
11642 BNX2X_ERR(
"Unsupported chip revision\n");
11649 BNX2X_ERR(
"Can't load firmware file %s\n",
11651 goto request_firmware_exit;
11654 rc = bnx2x_check_firmware(bp);
11656 BNX2X_ERR(
"Corrupt firmware file %s\n", fw_file_name);
11657 goto request_firmware_exit;
11697 init_offsets_alloc_err:
11699 init_ops_alloc_err:
11701 request_firmware_exit:
11708 static void bnx2x_release_firmware(
struct bnx2x *bp)
11719 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
11720 .init_hw_cmn = bnx2x_init_hw_common,
11721 .init_hw_port = bnx2x_init_hw_port,
11722 .init_hw_func = bnx2x_init_hw_func,
11724 .reset_hw_cmn = bnx2x_reset_common,
11725 .reset_hw_port = bnx2x_reset_port,
11726 .reset_hw_func = bnx2x_reset_func,
11728 .gunzip_init = bnx2x_gunzip_init,
11729 .gunzip_end = bnx2x_gunzip_end,
11731 .init_fw = bnx2x_init_firmware,
11732 .release_fw = bnx2x_release_firmware,
11738 bnx2x_setup_dmae(bp);
11745 &bnx2x_func_sp_drv);
11749 static int bnx2x_set_qm_cid_count(
struct bnx2x *bp)
11754 cid_count += CNIC_CID_MAX;
11765 static int bnx2x_get_num_non_def_sbs(
struct pci_dev *pdev)
11794 int pcie_width, pcie_speed;
11795 int rc, max_non_def_sbs;
11796 int rx_count, tx_count, rss_count, doorbell_size;
11806 u8 max_cos_est = 0;
11835 pr_err(
"Unknown board_type (%ld), aborting\n",
11840 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
11861 bp = netdev_priv(dev);
11865 pci_set_drvdata(pdev, dev);
11867 rc = bnx2x_init_dev(pdev, dev, ent->
driver_data);
11875 BNX2X_DEV_INFO(
"Allocated netdev with %d tx and %d rx queues\n",
11876 tx_count, rx_count);
11878 rc = bnx2x_init_bp(bp);
11880 goto init_one_exit;
11889 "Cannot map doorbells, bar size too small, aborting\n");
11891 goto init_one_exit;
11897 "Cannot map doorbell space, aborting\n");
11899 goto init_one_exit;
11931 dev_err(&pdev->
dev,
"Cannot register net device\n");
11932 goto init_one_exit;
11944 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11947 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
11953 "5GHz (Gen2)" :
"2.5GHz",
11971 pci_set_drvdata(pdev,
NULL);
11978 struct net_device *dev = pci_get_drvdata(pdev);
11982 dev_err(&pdev->
dev,
"BAD net device from bnx2x_init_one\n");
11985 bp = netdev_priv(dev);
11998 bnx2x_dcbnl_update_applist(bp,
true);
12007 bnx2x_disable_msi(bp);
12021 bnx2x_release_firmware(bp);
12031 pci_set_drvdata(pdev,
NULL);
12034 static int bnx2x_eeh_nic_unload(
struct bnx2x *bp)
12046 bnx2x_tx_disable(bp);
12050 bnx2x_del_all_napi(bp);
12063 bnx2x_free_rx_sge_range(bp, bp->fp + i,
NUM_RX_SGE);
12084 BNX2X_ERR(
"BAD MCP validity signature\n");
12098 struct net_device *dev = pci_get_drvdata(pdev);
12099 struct bnx2x *bp = netdev_priv(dev);
12110 if (netif_running(dev))
12111 bnx2x_eeh_nic_unload(bp);
12129 struct net_device *dev = pci_get_drvdata(pdev);
12130 struct bnx2x *bp = netdev_priv(dev);
12136 "Cannot re-enable PCI device after reset\n");
12144 if (netif_running(dev))
12159 static void bnx2x_io_resume(
struct pci_dev *pdev)
12161 struct net_device *dev = pci_get_drvdata(pdev);
12162 struct bnx2x *bp = netdev_priv(dev);
12165 netdev_err(bp->
dev,
"Handling parity error recovery. Try again later\n");
12171 bnx2x_eeh_recover(bp);
12173 if (netif_running(dev))
12182 .error_detected = bnx2x_io_error_detected,
12183 .slot_reset = bnx2x_io_slot_reset,
12184 .resume = bnx2x_io_resume,
12187 static struct pci_driver bnx2x_pci_driver = {
12189 .id_table = bnx2x_pci_tbl,
12190 .probe = bnx2x_init_one,
12194 .err_handler = &bnx2x_err_handler,
12197 static int __init bnx2x_init(
void)
12204 if (bnx2x_wq ==
NULL) {
12205 pr_err(
"Cannot create workqueue\n");
12209 ret = pci_register_driver(&bnx2x_pci_driver);
12211 pr_err(
"Cannot register driver\n");
12217 static void __exit bnx2x_cleanup(
void)
12251 static int bnx2x_set_iscsi_eth_mac_addr(
struct bnx2x *bp)
12253 unsigned long ramrod_flags = 0;
12257 &bp->iscsi_l2_mac_obj,
true,
12262 static void bnx2x_cnic_sp_post(
struct bnx2x *bp,
int count)
12265 int cxt_index, cxt_offset;
12267 #ifdef BNX2X_STOP_ON_ERROR
12273 BUG_ON(bp->cnic_spq_pending < count);
12274 bp->cnic_spq_pending -=
count;
12277 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
12281 u8 cmd = (
le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
12295 vcxt[cxt_offset].eth,
12318 if (bp->cnic_spq_pending >=
12319 bp->cnic_eth_dev.max_kwqe_pending)
12322 bp->cnic_spq_pending++;
12324 BNX2X_ERR(
"Unknown SPE type: %d\n", type);
12329 spe = bnx2x_sp_get_next(bp);
12330 *spe = *bp->cnic_kwq_cons;
12333 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12335 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12336 bp->cnic_kwq_cons = bp->cnic_kwq;
12338 bp->cnic_kwq_cons++;
12340 bnx2x_sp_prod_update(bp);
12344 static int bnx2x_cnic_sp_queue(
struct net_device *dev,
12347 struct bnx2x *bp = netdev_priv(dev);
12350 #ifdef BNX2X_STOP_ON_ERROR
12352 BNX2X_ERR(
"Can't post to SP queue while panic\n");
12359 BNX2X_ERR(
"Handling parity error recovery. Try again later\n");
12365 for (i = 0; i <
count; i++) {
12371 *bp->cnic_kwq_prod = *spe;
12373 bp->cnic_kwq_pending++;
12376 spe->
hdr.conn_and_cmd_data, spe->
hdr.type,
12379 bp->cnic_kwq_pending);
12381 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12382 bp->cnic_kwq_prod = bp->cnic_kwq;
12384 bp->cnic_kwq_prod++;
12389 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12390 bnx2x_cnic_sp_post(bp, 0);
12402 lockdep_is_held(&bp->cnic_mutex));
12404 rc = c_ops->
cnic_ctl(bp->cnic_data, ctl);
12418 rc = c_ops->
cnic_ctl(bp->cnic_data, ctl);
12427 int bnx2x_cnic_notify(
struct bnx2x *bp,
int cmd)
12433 return bnx2x_cnic_ctl_send(bp, &ctl);
12436 static void bnx2x_cnic_cfc_comp(
struct bnx2x *bp,
int cid,
u8 err)
12445 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12446 bnx2x_cnic_sp_post(bp, 0);
12455 static void bnx2x_set_iscsi_eth_rx_mode(
struct bnx2x *bp,
bool start)
12457 unsigned long accept_flags = 0, ramrod_flags = 0;
12493 struct bnx2x *bp = netdev_priv(dev);
12496 switch (ctl->
cmd) {
12501 bnx2x_ilt_wr(bp, index, addr);
12508 bnx2x_cnic_sp_post(bp, count);
12515 unsigned long sp_bits = 0;
12528 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
12537 netif_addr_lock_bh(dev);
12538 bnx2x_set_iscsi_eth_rx_mode(bp,
true);
12539 netif_addr_unlock_bh(dev);
12545 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12546 BNX2X_ERR(
"rx_mode completion timed out!\n");
12553 unsigned long sp_bits = 0;
12556 netif_addr_lock_bh(dev);
12557 bnx2x_set_iscsi_eth_rx_mode(bp,
false);
12558 netif_addr_unlock_bh(dev);
12564 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12565 BNX2X_ERR(
"rx_mode completion timed out!\n");
12592 u32 scratch_offset;
12600 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12608 scratch_offset =
SHMEM2_RD(bp, ncsi_oem_data_addr);
12609 if (!scratch_offset)
12612 fcoe_features[path][port]);
12617 REG_WR(bp, scratch_offset + i,
12618 *(host_addr + i/4));
12630 cap =
SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12635 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12648 void bnx2x_setup_cnic_irq_info(
struct bnx2x *bp)
12661 cp->
irq_arr[0].status_blk = (
void *)bp->cnic_sb.e2_sb;
12663 cp->
irq_arr[0].status_blk = (
void *)bp->cnic_sb.e1x_sb;
12665 cp->
irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
12666 cp->
irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
12674 void bnx2x_setup_cnic_info(
struct bnx2x *bp)
12680 bnx2x_cid_ilt_lines(bp);
12692 struct bnx2x *bp = netdev_priv(dev);
12704 bp->cnic_kwq_cons = bp->cnic_kwq;
12705 bp->cnic_kwq_prod = bp->cnic_kwq;
12708 bp->cnic_spq_pending = 0;
12709 bp->cnic_kwq_pending = 0;
12711 bp->cnic_data =
data;
12717 bnx2x_setup_cnic_irq_info(bp);
12724 static int bnx2x_unregister_cnic(
struct net_device *dev)
12726 struct bnx2x *bp = netdev_priv(dev);
12734 kfree(bp->cnic_kwq);
12735 bp->cnic_kwq =
NULL;
12742 struct bnx2x *bp = netdev_priv(dev);
12760 bnx2x_cid_ilt_lines(bp);
12782 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",