20 #include <linux/types.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
35 #define BNX2X_PCI_FREE(x, y, size) \
38 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
44 #define BNX2X_FREE(x) \
52 #define BNX2X_PCI_ALLOC(x, y, size) \
54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
57 memset((void *)x, 0, size); \
60 #define BNX2X_ALLOC(x, size) \
62 x = kzalloc(size, GFP_KERNEL); \
249 int bnx2x_cnic_notify(
struct bnx2x *bp,
int cmd);
256 void bnx2x_setup_cnic_irq_info(
struct bnx2x *bp);
263 void bnx2x_setup_cnic_info(
struct bnx2x *bp);
397 unsigned long rx_mode_flags,
398 unsigned long rx_accept_flags,
399 unsigned long tx_accept_flags,
400 unsigned long ramrod_flags);
482 u16 bd_prod,
u16 rx_comp_prod,
u16 rx_sge_prod);
550 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
581 static inline void bnx2x_update_rx_prod_gen(
struct bnx2x *bp,
589 rx_prods.bd_prod = bd_prod;
590 rx_prods.cqe_prod = rx_comp_prod;
591 rx_prods.sge_prod = rx_sge_prod;
603 for (i = 0; i <
sizeof(rx_prods)/4; i++)
604 REG_WR(bp, start + i*4, ((
u32 *)&rx_prods)[
i]);
609 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
610 fp->
index, bd_prod, rx_comp_prod, rx_sge_prod);
613 static inline void bnx2x_igu_ack_sb_gen(
struct bnx2x *bp,
u8 igu_sb_id,
634 static inline void bnx2x_hc_ack_sb(
struct bnx2x *bp,
u8 sb_id,
641 igu_ack.status_block_index =
index;
642 igu_ack.sb_id_and_flags =
643 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
644 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
645 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
646 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
655 static inline void bnx2x_ack_sb(
struct bnx2x *bp,
u8 igu_sb_id,
u8 storm,
659 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
675 static inline u16 bnx2x_hc_ack_int(
struct bnx2x *bp)
685 static inline u16 bnx2x_igu_ack_int(
struct bnx2x *bp)
697 static inline u16 bnx2x_ack_int(
struct bnx2x *bp)
701 return bnx2x_hc_ack_int(bp);
703 return bnx2x_igu_ack_int(bp);
706 static inline int bnx2x_has_tx_work_unload(
struct bnx2x_fp_txdata *txdata)
713 static inline u16 bnx2x_tx_avail(
struct bnx2x *bp,
725 #ifdef BNX2X_STOP_ON_ERROR
734 static inline int bnx2x_tx_queue_has_work(
struct bnx2x_fp_txdata *txdata)
748 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
762 return (fp->rx_comp_cons != rx_cons_sb);
770 static inline void bnx2x_tx_disable(
struct bnx2x *bp)
772 netif_tx_disable(bp->
dev);
776 static inline void bnx2x_free_rx_sge(
struct bnx2x *bp,
796 static inline void bnx2x_add_all_napi(
struct bnx2x *bp)
829 static inline int bnx2x_calc_num_queues(
struct bnx2x *bp)
837 static inline void bnx2x_clear_sge_mask_next_elems(
struct bnx2x_fastpath *fp)
844 for (j = 0; j < 2; j++) {
851 static inline void bnx2x_init_sge_ring_bit_mask(
struct bnx2x_fastpath *fp)
860 bnx2x_clear_sge_mask_next_elems(fp);
878 prod_rx_buf->
data = cons_rx_buf->
data;
885 static inline int func_by_vn(
struct bnx2x *bp,
int vn)
890 static inline int bnx2x_config_rss_eth(
struct bnx2x *bp,
bool config_hash)
902 static inline int bnx2x_func_start(
struct bnx2x *bp)
935 static inline void bnx2x_set_fw_mac_addr(
u16 *fw_hi,
u16 *fw_mid,
u16 *fw_lo,
938 ((
u8 *)fw_hi)[0] = mac[1];
939 ((
u8 *)fw_hi)[1] = mac[0];
940 ((
u8 *)fw_mid)[0] = mac[3];
941 ((
u8 *)fw_mid)[1] = mac[2];
942 ((
u8 *)fw_lo)[0] = mac[5];
943 ((
u8 *)fw_lo)[1] = mac[4];
946 static inline void bnx2x_free_rx_sge_range(
struct bnx2x *bp,
954 for (i = 0; i < last; i++)
955 bnx2x_free_rx_sge(bp, fp, i);
958 static inline void bnx2x_set_next_page_rx_bd(
struct bnx2x_fastpath *fp)
985 return bp->cnic_base_cl_id + (bp->
pf_num >> 1);
992 static inline void bnx2x_init_vlan_mac_fp_objs(
struct bnx2x_fastpath *fp,
1014 static inline u8 bnx2x_get_path_func_num(
struct bnx2x *bp)
1034 func_mf_config[
BP_PORT(bp) + 2 * i].
1046 static inline void bnx2x_init_bp_objs(
struct bnx2x *bp)
1061 bnx2x_get_path_func_num(bp));
1090 static inline void bnx2x_init_txdata(
struct bnx2x *bp,
1092 int txq_index,
__le16 *tx_cons_sb,
1106 static inline u8 bnx2x_cnic_eth_cl_id(
struct bnx2x *bp,
u8 cl_idx)
1108 return bp->cnic_base_cl_id + cl_idx +
1112 static inline u8 bnx2x_cnic_fw_sb_id(
struct bnx2x *bp)
1119 static inline u8 bnx2x_cnic_igu_sb_id(
struct bnx2x *bp)
1125 static inline void bnx2x_init_fcoe_fp(
struct bnx2x *bp)
1128 unsigned long q_type = 0;
1147 bnx2x_rx_ustorm_prods_offset(fp);
1161 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
1167 static inline int bnx2x_clean_tx_queue(
struct bnx2x *bp,
1172 while (bnx2x_has_tx_work_unload(txdata)) {
1174 BNX2X_ERR(
"timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1177 #ifdef BNX2X_STOP_ON_ERROR
1193 static inline void __storm_memset_struct(
struct bnx2x *bp,
1197 for (i = 0; i < size/4; i++)
1198 REG_WR(bp, addr + (i * 4), data[i]);
1207 static inline bool bnx2x_wait_sp_comp(
struct bnx2x *bp,
unsigned long mask)
1213 netif_addr_lock_bh(bp->
dev);
1215 netif_addr_unlock_bh(bp->
dev);
1218 netif_addr_unlock_bh(bp->
dev);
1225 netif_addr_lock_bh(bp->
dev);
1227 BNX2X_ERR(
"Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1229 netif_addr_unlock_bh(bp->
dev);
1232 netif_addr_unlock_bh(bp->
dev);
1265 "Max BW configured to 0 - using 100 instead\n");
1272 static inline bool bnx2x_mtu_allows_gro(
int mtu)
1299 static inline void bnx2x_link_sync_notify(
struct bnx2x *bp)
1306 if (vn ==
BP_VN(bp))
1309 func = func_by_vn(bp, vn);
1323 static inline void bnx2x_update_drv_flags(
struct bnx2x *bp,
u32 flags,
u32 set)
1341 static inline bool bnx2x_is_valid_ether_addr(
struct bnx2x *bp,
u8 *addr)
1343 if (is_valid_ether_addr(addr))
1346 if (is_zero_ether_addr(addr) &&