14 #ifndef __LIBCXGBI_H__
15 #define __LIBCXGBI_H__
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/types.h>
21 #include <linux/list.h>
22 #include <linux/netdevice.h>
23 #include <linux/if_vlan.h>
41 #define log_debug(level, fmt, ...) \
43 if (dbg_level & (level)) \
44 pr_info(fmt, ##__VA_ARGS__); \
48 #define CXGBI_MAX_CONN 16384
51 #define SKB_TX_ISCSI_PDU_HEADER_MAX \
52 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
54 #define ISCSI_PDU_NONPAYLOAD_LEN 312
59 #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0)
61 #define ULP2_MODE_ISCSI 2
63 #define ULP2_MAX_PKT_SIZE 16224
64 #define ULP2_MAX_PDU_PAYLOAD \
65 (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
72 static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 };
73 static inline unsigned int cxgbi_ulp_extra_len(
int submode)
75 return ulp2_extra_len[submode & 3];
82 #define CPL_RX_DDP_STATUS_DDP_SHIFT 16
83 #define CPL_RX_DDP_STATUS_PAD_SHIFT 19
84 #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20
85 #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21
95 #define PPOD_PAGES_MAX 4
136 #define DDP_PGIDX_MAX 4
137 #define DDP_THRESHOLD 2048
139 #define PPOD_PAGES_SHIFT 2
141 #define PPOD_SIZE sizeof(struct cxgbi_pagepod)
142 #define PPOD_SIZE_SHIFT 6
144 #define ULPMEM_DSGL_MAX_NPPODS 16
145 #define ULPMEM_IDATA_MAX_NPPODS 4
146 #define PCIE_MEMWIN_MAX_NPPODS 16
148 #define PPOD_COLOR_SHIFT 0
149 #define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
151 #define PPOD_IDX_SHIFT 6
152 #define PPOD_IDX_MAX_SIZE 24
154 #define PPOD_TID_SHIFT 0
155 #define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
157 #define PPOD_TAG_SHIFT 6
158 #define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
160 #define PPOD_VALID_SHIFT 24
161 #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
162 #define PPOD_VALID_FLAG PPOD_VALID(1U)
284 #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0]))
285 #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags)
286 #define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode)
287 #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq)
288 #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest)
289 #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen)
290 #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next)
292 static inline void cxgbi_skcb_set_flag(
struct sk_buff *
skb,
298 static inline void cxgbi_skcb_clear_flag(
struct sk_buff *
skb,
304 static inline int cxgbi_skcb_test_flag(
struct sk_buff *skb,
310 static inline void cxgbi_sock_set_flag(
struct cxgbi_sock *csk,
315 "csk 0x%p,%u,0x%lx, bit %d.\n",
319 static inline void cxgbi_sock_clear_flag(
struct cxgbi_sock *csk,
324 "csk 0x%p,%u,0x%lx, bit %d.\n",
328 static inline int cxgbi_sock_flag(
struct cxgbi_sock *csk,
336 static inline void cxgbi_sock_set_state(
struct cxgbi_sock *csk,
int state)
339 "csk 0x%p,%u,0x%lx, state -> %u.\n",
344 static inline void cxgbi_sock_free(
struct kref *
kref)
351 "free csk 0x%p, state %u, flags 0x%lx\n",
357 static inline void __cxgbi_sock_put(
const char *
fn,
struct cxgbi_sock *csk)
360 "%s, put csk 0x%p, ref %u-1.\n",
362 kref_put(&csk->
refcnt, cxgbi_sock_free);
364 #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk)
366 static inline void __cxgbi_sock_get(
const char *fn,
struct cxgbi_sock *csk)
369 "%s, get csk 0x%p, ref %u+1.\n",
373 #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk)
375 static inline int cxgbi_sock_is_closing(
struct cxgbi_sock *csk)
380 static inline int cxgbi_sock_is_established(
struct cxgbi_sock *csk)
385 static inline void cxgbi_sock_purge_write_queue(
struct cxgbi_sock *csk)
393 static inline unsigned int cxgbi_sock_compute_wscale(
unsigned int win)
395 unsigned int wscale = 0;
397 while (wscale < 14 && (65535 << wscale) < win)
404 struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
407 __skb_put(skb, wrlen);
410 pr_info(
"alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen);
421 #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
423 static inline void cxgbi_sock_reset_wr_list(
struct cxgbi_sock *csk)
428 static inline void cxgbi_sock_enqueue_wr(
struct cxgbi_sock *csk,
447 static inline int cxgbi_sock_count_pending_wrs(
const struct cxgbi_sock *csk)
508 #define CXGBI_FLAG_DEV_T3 0x1
509 #define CXGBI_FLAG_DEV_T4 0x2
510 #define CXGBI_FLAG_ADAPTER_RESET 0x4
511 #define CXGBI_FLAG_IPV4_SET 0x10
539 unsigned int,
unsigned int,
542 unsigned int,
unsigned int,
unsigned int);
559 #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data)
574 #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
583 #define iscsi_task_cxgbi_data(task) \
584 ((task)->dd_data + sizeof(struct iscsi_tcp_task))
604 if (sw_tag && (sw_tag & ~mask)) {
605 u32 v1 = sw_tag & ((1 << shift) - 1);
606 u32 v2 = (sw_tag >> (shift - 1)) << shift;
608 return v2 |
v1 | 1 << shift;
611 return sw_tag | 1 << shift;
619 if (sw_tag && (sw_tag & ~mask)) {
634 if (cxgbi_is_ddp_tag(tformat, tag))
646 if (cxgbi_is_ddp_tag(tformat, tag)) {
648 v2 = (tag >> (shift + 1)) << tformat->
rsvd_shift;
650 u32 mask = (1 << shift) - 1;
651 tag &= ~(1 << shift);
653 v2 = (tag >> 1) & ~mask;
658 static inline void *cxgbi_alloc_big_mem(
unsigned int size,
669 static inline void cxgbi_free_big_mem(
void *
addr)
671 if (is_vmalloc_addr(addr))
682 pr_info(
"set iscsi ipv4 NOT supported, using %s ipv4.\n",
701 unsigned int max_conn);
737 unsigned int,
unsigned int);