Go to the documentation of this file.
30 #define BNA_IOC_TIMER_FREQ 200
33 #define BNA_MESSAGE_SIZE 256
35 #define bna_is_small_rxq(_id) ((_id) & 0x1)
37 #define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
38 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
40 #define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
42 #define BNA_TO_POWER_OF_2(x) \
45 while ((x) && (x) != 1) { \
52 #define BNA_TO_POWER_OF_2_HIGH(x) \
64 #define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
67 cpu_to_be64((u64)(_addr)); \
68 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
69 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
76 #define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
78 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
79 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
82 #define containing_rec(addr, type, field) \
83 ((type *)((unsigned char *)(addr) - \
84 (unsigned char *)(&((type *)0)->field)))
86 #define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
89 #define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
90 #define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
92 #define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
94 unsigned int page_index; \
96 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
97 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
98 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
99 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
103 #define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
104 #define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
106 #define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
108 unsigned int page_index; \
110 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
111 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
112 page_addr = (_qpt_ptr)[((_qe_idx) >> \
113 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
114 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
118 #define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
119 #define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
121 #define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
123 unsigned int page_index; \
126 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
127 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
128 page_addr = (_qpt_ptr)[((_qe_idx) >> \
129 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
130 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
133 #define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
134 (&((_cast *)(_q_base))[(_qe_idx)])
136 #define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
138 #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
139 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
141 #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
142 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
144 #define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
145 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
148 #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
149 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
152 #define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
154 #define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
156 #define BNA_Q_PI_ADD(_q_ptr, _num) \
157 (_q_ptr)->q.producer_index = \
158 (((_q_ptr)->q.producer_index + (_num)) & \
159 ((_q_ptr)->q.q_depth - 1))
161 #define BNA_Q_CI_ADD(_q_ptr, _num) \
162 (_q_ptr)->q.consumer_index = \
163 (((_q_ptr)->q.consumer_index + (_num)) \
164 & ((_q_ptr)->q.q_depth - 1))
166 #define BNA_Q_FREE_COUNT(_q_ptr) \
167 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
169 #define BNA_Q_IN_USE_COUNT(_q_ptr) \
170 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
172 #define BNA_LARGE_PKT_SIZE 1000
174 #define BNA_UPDATE_PKT_CNT(_pkt, _len) \
176 if ((_len) > BNA_LARGE_PKT_SIZE) { \
177 (_pkt)->large_pkt_cnt++; \
179 (_pkt)->small_pkt_cnt++; \
183 #define call_rxf_stop_cbfn(rxf) \
185 if ((rxf)->stop_cbfn) { \
186 void (*cbfn)(struct bna_rx *); \
187 struct bna_rx *cbarg; \
188 cbfn = (rxf)->stop_cbfn; \
189 cbarg = (rxf)->stop_cbarg; \
190 (rxf)->stop_cbfn = NULL; \
191 (rxf)->stop_cbarg = NULL; \
196 #define call_rxf_start_cbfn(rxf) \
198 if ((rxf)->start_cbfn) { \
199 void (*cbfn)(struct bna_rx *); \
200 struct bna_rx *cbarg; \
201 cbfn = (rxf)->start_cbfn; \
202 cbarg = (rxf)->start_cbarg; \
203 (rxf)->start_cbfn = NULL; \
204 (rxf)->start_cbarg = NULL; \
209 #define call_rxf_cam_fltr_cbfn(rxf) \
211 if ((rxf)->cam_fltr_cbfn) { \
212 void (*cbfn)(struct bnad *, struct bna_rx *); \
213 struct bnad *cbarg; \
214 cbfn = (rxf)->cam_fltr_cbfn; \
215 cbarg = (rxf)->cam_fltr_cbarg; \
216 (rxf)->cam_fltr_cbfn = NULL; \
217 (rxf)->cam_fltr_cbarg = NULL; \
218 cbfn(cbarg, rxf->rx); \
222 #define call_rxf_pause_cbfn(rxf) \
224 if ((rxf)->oper_state_cbfn) { \
225 void (*cbfn)(struct bnad *, struct bna_rx *); \
226 struct bnad *cbarg; \
227 cbfn = (rxf)->oper_state_cbfn; \
228 cbarg = (rxf)->oper_state_cbarg; \
229 (rxf)->oper_state_cbfn = NULL; \
230 (rxf)->oper_state_cbarg = NULL; \
231 cbfn(cbarg, rxf->rx); \
235 #define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
237 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
239 #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
241 #define xxx_enable(mode, bitmask, xxx) \
247 #define xxx_disable(mode, bitmask, xxx) \
253 #define xxx_inactive(mode, bitmask, xxx) \
259 #define is_promisc_enable(mode, bitmask) \
260 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
262 #define is_promisc_disable(mode, bitmask) \
263 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
265 #define promisc_enable(mode, bitmask) \
266 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
268 #define promisc_disable(mode, bitmask) \
269 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
271 #define promisc_inactive(mode, bitmask) \
272 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
274 #define is_default_enable(mode, bitmask) \
275 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
277 #define is_default_disable(mode, bitmask) \
278 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
280 #define default_enable(mode, bitmask) \
281 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
283 #define default_disable(mode, bitmask) \
284 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
286 #define default_inactive(mode, bitmask) \
287 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
289 #define is_allmulti_enable(mode, bitmask) \
290 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
292 #define is_allmulti_disable(mode, bitmask) \
293 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
295 #define allmulti_enable(mode, bitmask) \
296 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
298 #define allmulti_disable(mode, bitmask) \
299 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
301 #define allmulti_inactive(mode, bitmask) \
302 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
304 #define GET_RXQS(rxp, q0, q1) do { \
305 switch ((rxp)->type) { \
306 case BNA_RXP_SINGLE: \
307 (q0) = rxp->rxq.single.only; \
311 (q0) = rxp->rxq.slr.large; \
312 (q1) = rxp->rxq.slr.small; \
315 (q0) = rxp->rxq.hds.data; \
316 (q1) = rxp->rxq.hds.hdr; \
321 #define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
323 #define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
325 #define bna_tx_from_rid(_bna, _rid, _tx) \
327 struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
328 struct bna_tx *__tx; \
329 struct list_head *qe; \
331 list_for_each(qe, &__tx_mod->tx_active_q) { \
332 __tx = (struct bna_tx *)qe; \
333 if (__tx->rid == (_rid)) { \
340 #define bna_rx_from_rid(_bna, _rid, _rx) \
342 struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
343 struct bna_rx *__rx; \
344 struct list_head *qe; \
346 list_for_each(qe, &__rx_mod->rx_active_q) { \
347 __rx = (struct bna_rx *)qe; \
348 if (__rx->rid == (_rid)) { \
370 #define bna_attr(_bna) (&(_bna)->ioceth.attr)
442 void (*cbfn)(
void *,
struct bna_tx *));
477 void (*cbfn)(
void *,
struct bna_rx *));
484 void (*cbfn)(
struct bnad *,
struct bna_rx *));
487 void (*cbfn)(
struct bnad *,
struct bna_rx *));
490 void (*cbfn)(
struct bnad *,
struct bna_rx *));
493 void (*cbfn)(
struct bnad *,
struct bna_rx *));
496 void (*cbfn)(
struct bnad *,
struct bna_rx *));
500 void (*cbfn)(
struct bnad *,
struct bna_rx *));
516 void (*cbfn)(
void *));
519 void (*cbfn)(
struct bnad *));
521 void (*cbfn)(
struct bnad *));