14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
23 #include <linux/if_vlan.h>
28 #include <linux/module.h>
31 static unsigned int dbg_level;
35 #define DRV_MODULE_NAME "libcxgbi"
36 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
37 #define DRV_MODULE_VERSION "0.9.0"
38 #define DRV_MODULE_RELDATE "Jun. 2010"
57 unsigned int max_conn)
61 pmap->
port_csk = cxgbi_alloc_big_mem(max_conn *
65 pr_warn(
"cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
87 "csk 0x%p, cdev 0x%p, offload down.\n",
89 spin_lock_bh(&csk->
lock);
92 spin_unlock_bh(&csk->
lock);
102 "cdev 0x%p, p# %u.\n", cdev, cdev->
nports);
111 if (cdev->
pmap.max_connect)
112 cxgbi_free_big_mem(cdev->
pmap.port_csk);
121 cdev = kzalloc(
sizeof(*cdev) + extra + nports *
126 pr_warn(
"nport %d, OOM.\n", nports);
142 "cdev 0x%p, p# %u.\n", cdev, nports);
150 "cdev 0x%p, p# %u,%s.\n",
155 cxgbi_device_destroy(cdev);
165 if ((cdev->
flags & flag) == flag) {
167 "cdev 0x%p, p# %u,%s.\n",
169 cdev->
ports[0]->name :
"");
171 cxgbi_device_destroy(cdev);
184 if (cdev->
lldev == lldev) {
191 "lldev 0x%p, NO match found.\n", lldev);
207 "vlan dev %s -> %s.\n", vdev->
name, ndev->
name);
212 for (i = 0; i < cdev->
nports; i++) {
213 if (ndev == cdev->
ports[i]) {
214 cdev->
hbas[
i]->vdev = vdev;
224 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->
name);
234 "cdev 0x%p, p#%u.\n", cdev, cdev->
nports);
236 for (i = 0; i < cdev->
nports; i++) {
237 chba = cdev->
hbas[
i];
258 for (i = 0; i < cdev->
nports; i++) {
261 pr_info(
"0x%p, p%d, %s, host alloc failed.\n",
262 cdev, i, cdev->
ports[i]->name);
279 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
280 cdev, i, cdev->
ports[i]->name, chba);
285 pr_info(
"cdev 0x%p, p#%d %s, host add failed.\n",
286 cdev, i, cdev->
ports[i]->name);
292 cdev->
hbas[
i] = chba;
317 static int sock_get_port(
struct cxgbi_sock *csk)
325 pr_err(
"cdev 0x%p, p#%u %s, NO port map.\n",
330 if (csk->
saddr.sin_port) {
331 pr_err(
"source port NON-ZERO %u.\n",
336 spin_lock_bh(&pmap->
lock);
338 spin_unlock_bh(&pmap->
lock);
339 pr_info(
"cdev 0x%p, p#%u %s, ALL ports used.\n",
344 start = idx = pmap->
next;
350 csk->
saddr.sin_port =
354 spin_unlock_bh(&pmap->
lock);
357 "cdev 0x%p, p#%u %s, p %u, %u.\n",
363 }
while (idx != start);
364 spin_unlock_bh(&pmap->
lock);
367 pr_warn(
"cdev 0x%p, p#%u %s, next %u?\n",
373 static void sock_put_port(
struct cxgbi_sock *csk)
378 if (csk->
saddr.sin_port) {
381 csk->
saddr.sin_port = 0;
383 pr_err(
"cdev 0x%p, p#%u %s, port %u OOR.\n",
390 spin_lock_bh(&pmap->
lock);
393 spin_unlock_bh(&pmap->
lock);
396 "cdev 0x%p, p#%u %s, release %u.\n",
429 pr_info(
"alloc csk %zu failed.\n",
sizeof(*csk));
434 pr_info(
"csk 0x%p, alloc cpls failed.\n", csk);
454 static struct rtable *find_route_ipv4(
struct flowi4 *fl4,
460 rt = ip_route_output_ports(&
init_net, fl4,
NULL, daddr, saddr,
478 unsigned int mtu = 0;
483 pr_info(
"address family 0x%x NOT supported.\n",
489 rt = find_route_ipv4(&fl4, 0, daddr->
sin_addr.s_addr, 0, daddr->
sin_port, 0);
491 pr_info(
"no route to ipv4 0x%x, port %u.\n",
497 n = dst_neigh_lookup(dst, &daddr->
sin_addr.s_addr);
505 pr_info(
"multi-cast route %pI4, port %u, dev %s.\n",
515 pr_info(
"rt dev %s, loopback -> %s, mtu %u.\n",
516 n->
dev->name, ndev->
name, mtu);
519 cdev = cxgbi_device_find_by_netdev(ndev, &port);
521 pr_info(
"dst %pI4, %s, NOT cxgbi device.\n",
527 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
529 port, ndev->
name, cdev);
531 csk = cxgbi_sock_create(cdev);
563 dst_confirm(csk->
dst);
569 static void cxgbi_inform_iscsi_conn_closing(
struct cxgbi_sock *csk)
572 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
591 if (csk->
saddr.sin_port)
595 csk->
cdev->csk_release_offload_resources(csk);
597 cxgbi_inform_iscsi_conn_closing(csk);
602 static void need_active_close(
struct cxgbi_sock *csk)
609 spin_lock_bh(&csk->
lock);
610 dst_confirm(csk->
dst);
626 csk->
cdev->csk_send_abort_req(csk);
628 csk->
cdev->csk_send_close_req(csk);
631 spin_unlock_bh(&csk->
lock);
636 pr_info(
"csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
638 &csk->
saddr.sin_addr.s_addr, csk->
saddr.sin_port,
639 &csk->
daddr.sin_addr.s_addr, csk->
daddr.sin_port,
655 spin_lock_bh(&csk->
lock);
658 spin_unlock_bh(&csk->
lock);
667 spin_lock_bh(&csk->
lock);
675 pr_err(
"csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
680 spin_unlock_bh(&csk->
lock);
690 spin_lock_bh(&csk->
lock);
695 switch (csk->
state) {
708 pr_err(
"csk 0x%p,%u,0x%lx,%u, bad state.\n",
711 cxgbi_inform_iscsi_conn_closing(csk);
713 spin_unlock_bh(&csk->
lock);
723 spin_lock_bh(&csk->
lock);
729 switch (csk->
state) {
740 pr_err(
"csk 0x%p,%u,0x%lx,%u, bad state.\n",
744 spin_unlock_bh(&csk->
lock);
750 unsigned int snd_una,
int seq_chk)
753 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
757 spin_lock_bh(&csk->
lock);
764 struct sk_buff *
p = cxgbi_sock_peek_wr(csk);
767 pr_err(
"csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
774 pr_warn(
"csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
781 cxgbi_sock_dequeue_wr(csk);
791 pr_warn(
"csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
799 dst_confirm(csk->
dst);
804 if (csk->
cdev->csk_push_tx_frames(csk, 0))
809 spin_unlock_bh(&csk->
lock);
813 static unsigned int cxgbi_sock_find_best_mtu(
struct cxgbi_sock *csk,
818 while (i < csk->cdev->
nmtus - 1 && csk->
cdev->mtus[i + 1] <= mtu)
829 csk->
advmss = dst_metric_advmss(dst);
831 if (csk->
advmss > pmtu - 40)
835 idx = cxgbi_sock_find_best_mtu(csk, csk->
advmss + 40);
852 while ((skb = cxgbi_sock_dequeue_wr(csk)) !=
NULL)
859 int pending = cxgbi_sock_count_pending_wrs(csk);
862 pr_err(
"csk 0x%p, tid %u, credit %u + %u != %u.\n",
873 spin_lock_bh(&csk->
lock);
877 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
885 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
893 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
901 int frags = skb_shinfo(skb)->nr_frags +
905 pr_err(
"csk 0x%p, skb head %u < %u.\n",
912 pr_err(
"csk 0x%p, frags %d, %u,%u >%u.\n",
913 csk, skb_shinfo(skb)->nr_frags, skb->
len,
931 spin_unlock_bh(&csk->
lock);
935 if (copied == 0 && err == -
EPIPE)
953 static unsigned char ddp_page_order[
DDP_PGIDX_MAX] = {0, 1, 2, 4};
954 static unsigned char ddp_page_shift[
DDP_PGIDX_MAX] = {12, 13, 14, 16};
957 static unsigned char sw_tag_idx_bits;
958 static unsigned char sw_tag_age_bits;
963 static int ddp_adjust_page_table(
void)
966 unsigned int base_order,
order;
969 pr_info(
"PAGE_SIZE 0x%lx too small, min 0x%lx\n",
979 ddp_page_order[
i] = order - base_order +
i;
985 static int ddp_find_page_index(
unsigned long pgsz)
990 if (pgsz == (1
UL << ddp_page_shift[i]))
993 pr_info(
"ddp page size %lu not supported.\n", pgsz);
997 static void ddp_setup_host_page_size(
void)
999 if (page_idx == DDP_PGIDX_MAX) {
1000 page_idx = ddp_find_page_index(
PAGE_SIZE);
1002 if (page_idx == DDP_PGIDX_MAX) {
1004 if (ddp_adjust_page_table() < 0) {
1008 page_idx = ddp_find_page_index(
PAGE_SIZE);
1019 pgsz_factor[i] = ddp_page_order[i];
1033 memcpy(ppod, hdr,
sizeof(*hdr));
1043 memset(ppod, 0,
sizeof(*ppod));
1047 static inline int ddp_find_unused_entries(
struct cxgbi_ddp_info *ddp,
1048 unsigned int start,
unsigned int max,
1052 unsigned int i,
j,
k;
1055 if ((max - start) < count) {
1057 "NOT enough entries %u+%u < %u.\n", start, count, max);
1063 for (i = start; i <
max;) {
1064 for (j = 0, k = i; j <
count; j++, k++) {
1069 for (j = 0, k = i; j <
count; j++, k++)
1078 "NO suitable entries %u available.\n", count);
1082 static inline void ddp_unmark_entries(
struct cxgbi_ddp_info *ddp,
1083 int start,
int count)
1091 static inline void ddp_gl_unmap(
struct pci_dev *pdev,
1096 for (i = 0; i < gl->
nelem; i++)
1101 static inline int ddp_gl_map(
struct pci_dev *pdev,
1106 for (i = 0; i < gl->
nelem; i++) {
1112 "page %d 0x%p, 0x%p dma mapping err.\n",
1113 i, gl->
pages[i], pdev);
1123 ddp_gl_unmap(pdev, gl);
1132 ddp_gl_unmap(pdev, gl);
1144 struct page *sgpage = sg_page(sg);
1145 unsigned int sglen = sg->
length;
1146 unsigned int sgoffset = sg->
offset;
1147 unsigned int npages = (xferlen + sgoffset +
PAGE_SIZE - 1) >>
1153 "xfer %u < threshold %u, no ddp.\n",
1160 sizeof(
struct page *)), gfp);
1163 "xfer %u, %u pages, OOM.\n", xferlen, npages);
1168 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1174 gl->
pages[0] = sgpage;
1176 for (i = 1, sg =
sg_next(sgl), j = 0; i < sgcnt;
1180 if (sgpage == page && sg->offset == sgoffset + sglen)
1181 sglen += sg->length;
1187 if ((j && sgoffset) || ((i != sgcnt - 1) &&
1190 "page %d/%u, %u + %u.\n",
1191 i, sgcnt, sgoffset, sglen);
1196 if (j == gl->
nelem || sg->offset) {
1198 "page %d/%u, offset %u.\n",
1199 j, gl->
nelem, sg->offset);
1204 sgoffset = sg->offset;
1210 if (ddp_gl_map(pdev, gl) < 0)
1231 if (!gl || !gl->
nelem) {
1232 pr_warn(
"tag 0x%x, idx %u, gl 0x%p, %u.\n",
1233 tag, idx, gl, gl ? gl->
nelem : 0);
1238 "tag 0x%x, release idx %u, npods %u.\n",
1241 ddp_unmark_entries(ddp, idx, npods);
1242 ddp_release_gl(gl, ddp->
pdev);
1244 pr_warn(
"tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->
nppods);
1247 static int ddp_tag_reserve(
struct cxgbi_sock *csk,
unsigned int tid,
1262 idx = ddp_find_unused_entries(ddp, 0, ddp->
nppods,
1265 idx = ddp_find_unused_entries(ddp, ddp->
idx_last + 1,
1268 if (idx < 0 && ddp->idx_last >= npods) {
1269 idx = ddp_find_unused_entries(ddp, 0,
1276 "xferlen %u, gl %u, npods %u NO DDP.\n",
1281 tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1292 goto unmark_entries;
1296 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1303 ddp_unmark_entries(ddp, idx, npods);
1308 unsigned int sw_tag,
unsigned int xferlen,
1316 if (page_idx >= DDP_PGIDX_MAX || !cdev->
ddp ||
1319 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1323 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1325 "sw_tag 0x%x NOT usable.\n", sw_tag);
1329 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->
pdev, gfp);
1333 err = ddp_tag_reserve(csk, csk->
tid, sw_tag, tagp, gl, gfp);
1335 ddp_release_gl(gl, cdev->
pdev);
1340 static void ddp_destroy(
struct kref *
kref)
1348 pr_info(
"kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1350 while (i < ddp->nppods) {
1356 pr_info(
"cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1362 cxgbi_free_big_mem(ddp);
1370 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1373 return kref_put(&ddp->
refcnt, ddp_destroy);
1383 unsigned int ppmax,
bits;
1386 bits = __ilog2_u32(ppmax) + 1;
1389 ppmax = (1 << (bits - 1)) - 1;
1396 pr_warn(
"cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1417 cdev->
tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1422 pr_info(
"%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1433 "%s max payload size: %u/%u, %u/%u.\n",
1444 static unsigned char padding[4];
1456 "cdev 0x%p, release tag 0x%x.\n", chba->
cdev, tag);
1459 cxgbi_is_ddp_tag(tformat, tag))
1460 ddp_tag_release(chba, tag);
1480 scsi_in(sc)->
table.sgl,
1481 scsi_in(sc)->
table.nents,
1485 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1486 cconn->
cep->csk, task, scsi_in(sc)->
length,
1487 scsi_in(sc)->
table.nents);
1491 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1496 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1497 chba->
cdev, task, sw_tag, task->
itt, sess->
age, tag, *hdr_itt);
1509 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->
tag_format, tag);
1516 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1517 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1528 "csk 0x%p, cid %d.\n", csk, conn->
id);
1537 static inline int read_pdu_skb(
struct iscsi_conn *conn,
1548 pr_info(
"skb 0x%p, off %u, %d, TCP_ERR.\n",
1549 skb, offset, offloaded);
1553 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1554 skb, offset, offloaded, bytes_read);
1558 pr_info(
"skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1559 skb, offset, offloaded);
1568 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1569 skb, offset, offloaded, bytes_read);
1572 pr_info(
"skb 0x%p, off %u, %d, invalid status %d.\n",
1573 skb, offset, offloaded, status);
1583 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1587 pr_info(
"conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1594 pr_info(
"conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1599 return read_pdu_skb(conn, skb, 0, 0);
1603 struct sk_buff *skb,
unsigned int offset)
1610 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1615 pr_info(
"conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1633 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1634 skb, opcode,
ntohl(tcp_conn->
in.hdr->itt),
1635 tcp_conn->
in.datalen, offloaded ?
"is" :
"not");
1637 return read_pdu_skb(conn, skb, offset, offloaded);
1640 static void csk_return_rx_credits(
struct cxgbi_sock *csk,
int copied)
1647 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1661 must_send = credits + 16384 >= cdev->
rcv_win;
1671 unsigned int read = 0;
1675 "csk 0x%p, conn 0x%p.\n", csk, conn);
1679 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1680 csk, conn, conn ? conn->
id : 0xFF,
1691 "skb 0x%p, NOT ready 0x%lx.\n",
1699 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1704 err = skb_read_pdu_bhs(conn, skb);
1706 pr_err(
"coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1707 "f 0x%lx, plen %u.\n",
1713 err = skb_read_pdu_data(conn, skb, skb,
1716 pr_err(
"coalesced data, csk 0x%p, skb 0x%p,%u, "
1717 "f 0x%lx, plen %u.\n",
1722 err = skb_read_pdu_bhs(conn, skb);
1724 pr_err(
"bhs, csk 0x%p, skb 0x%p,%u, "
1725 "f 0x%lx, plen %u.\n",
1737 pr_err(
"csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1738 " plen %u, NO data.\n",
1747 err = skb_read_pdu_data(conn, skb, dskb, 0);
1749 pr_err(
"data, csk 0x%p, skb 0x%p,%u, "
1750 "f 0x%lx, plen %u, dskb 0x%p,"
1758 err = skb_read_pdu_data(conn, skb, skb, 0);
1770 csk_return_rx_credits(csk, read);
1775 pr_info(
"csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1776 csk, conn, err, read);
1782 static int sgl_seek_offset(
struct scatterlist *sgl,
unsigned int sgcnt,
1783 unsigned int offset,
unsigned int *off,
1800 static int sgl_read_to_frags(
struct scatterlist *sg,
unsigned int sgoffset,
1805 unsigned int sglen = sg->
length - sgoffset;
1806 struct page *page = sg_page(sg);
1816 pr_warn(
"sg %d NULL, len %u/%u.\n",
1825 copy =
min(datalen, sglen);
1826 if (i && page == frags[i - 1].page &&
1829 frags[i - 1].
size += copy;
1831 if (i >= frag_max) {
1832 pr_warn(
"too many pages %u, dlen %u.\n",
1839 frags[
i].
size = copy;
1869 headroom +=
min_t(
unsigned int,
1877 ndev->
stats.tx_dropped++;
1887 task_reserve_itt(task, &task->
hdr->itt);
1890 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1898 static inline void tx_skb_setmode(
struct sk_buff *skb,
int hcrc,
int dcrc)
1918 unsigned int datalen =
count;
1919 int i, padlen = iscsi_padding(count);
1923 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1939 err = sgl_seek_offset(
1943 pr_warn(
"tpdu, sgl %u, bad offset %u/%u.\n",
1950 pr_warn(
"tpdu, sgl %u, bad offset %u + %u.\n",
1962 for (i = 0; i < tdata->
nr_frags; i++, frag++) {
1976 for (i = 0; i < tdata->
nr_frags; i++) {
1977 __skb_fill_page_desc(skb, i,
1978 tdata->
frags[i].page,
1979 tdata->
frags[i].offset,
1980 tdata->
frags[i].size);
1981 skb_frag_ref(skb, i);
1983 skb_shinfo(skb)->nr_frags = tdata->
nr_frags;
2001 i = skb_shinfo(skb)->nr_frags;
2002 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2026 "task 0x%p, skb NULL.\n", task);
2032 err = cxgbi_sock_send_pdus(cconn->
cep->csk, skb);
2037 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2040 if (task->
conn->hdrdgst_en)
2043 if (datalen && task->
conn->datadgst_en)
2046 task->
conn->txdata_octets += pdulen;
2052 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2061 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2074 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2080 memset(tdata, 0,
sizeof(*tdata));
2082 task_release_itt(task, task->
hdr_itt);
2104 strcpy(stats->custom[0].desc,
"eh_abort_cnt");
2109 static int cxgbi_conn_max_xmit_dlength(
struct iscsi_conn *conn)
2116 unsigned int max =
max(max_def, headroom);
2118 max =
min(cconn->
chba->cdev->tx_max_size, max);
2128 static int cxgbi_conn_max_recv_dlength(
struct iscsi_conn *conn)
2132 unsigned int max = cconn->
chba->cdev->rx_max_size;
2138 pr_err(
"MaxRecvDataSegmentLength %u > %u.\n",
2160 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2161 cls_conn, param, buflen, buf);
2167 err = csk->
cdev->csk_ddp_setup_digest(csk, csk->
tid,
2174 err = csk->
cdev->csk_ddp_setup_digest(csk, csk->
tid,
2183 err = cxgbi_conn_max_recv_dlength(conn);
2188 err = cxgbi_conn_max_xmit_dlength(conn);
2205 "cls_conn 0x%p, param %d.\n", ep, param);
2218 &csk->
daddr, param, buf);
2241 cconn->
iconn = conn;
2244 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2245 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2270 err = csk->
cdev->csk_ddp_setup_pgidx(csk, csk->
tid, page_idx, 0);
2288 cxgbi_conn_max_xmit_dlength(conn);
2289 cxgbi_conn_max_recv_dlength(conn);
2292 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2293 cls_session, cls_conn, ep, cconn, csk);
2312 pr_err(
"missing endpoint.\n");
2318 shost = chba->
shost;
2330 session = cls_session->
dd_data;
2332 goto remove_session;
2335 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2347 "cls sess 0x%p.\n", cls_session);
2361 "netdev for host not set.\n");
2366 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2367 shost, chba, chba->
ndev->name, param, buflen, buf);
2374 "hba %s, req. ipv4 %pI4.\n", chba->
ndev->name, &addr);
2375 cxgbi_set_iscsi_ipv4(chba, addr);
2395 "netdev for host not set.\n");
2400 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2401 shost, chba, chba->
ndev->name, param);
2414 addr = cxgbi_get_iscsi_ipv4(chba);
2415 len =
sprintf(buf,
"%pI4", &addr);
2417 "hba %s, ipv4 %pI4.\n", chba->
ndev->name, &addr);
2439 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2440 shost, non_blocking, dst_addr);
2445 pr_info(
"shost 0x%p, priv NULL.\n", shost);
2450 csk = cxgbi_check_route(dst_addr);
2458 pr_info(
"Could not connect through requested host %u"
2459 "hba 0x%p != 0x%p (%u).\n",
2466 err = sock_get_port(csk);
2471 err = csk->
cdev->csk_init_act_open(csk);
2475 if (cxgbi_sock_is_closing(csk)) {
2477 pr_info(
"csk 0x%p is closing.\n", csk);
2484 pr_info(
"iscsi alloc ep, OOM.\n");
2493 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2494 ep, cep, csk, hba, hba->
ndev->name);
2501 return ERR_PTR(err);
2510 if (!cxgbi_sock_is_established(csk))
2523 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2524 ep, cep, cconn, csk, csk->
state, csk->
flags);
2526 if (cconn && cconn->
iconn) {
2536 need_active_close(csk);
2549 pr_err(
"unable to register %s transport 0x%p.\n",
2554 "%s, registered iscsi transport 0x%p.\n",
2565 "de-register transport 0x%p, %s, stt 0x%p.\n",
2566 itp, itp->
name, *stt);
2575 switch (param_type) {
2629 static int __init libcxgbi_init_module(
void)
2634 pr_info(
"tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2638 ddp_setup_host_page_size();
2642 static void __exit libcxgbi_exit_module(
void)