15 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17 #include <linux/module.h>
30 static unsigned int dbg_level;
31 #include "../libcxgbi.h"
33 #define DRV_MODULE_NAME "cxgb3i"
34 #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35 #define DRV_MODULE_VERSION "2.0.0"
36 #define DRV_MODULE_RELDATE "Jun. 2010"
50 static int cxgb3i_rcv_win = 256 * 1024;
52 MODULE_PARM_DESC(cxgb3i_rcv_win,
"TCP receive window in bytes (default=256KB)");
54 static int cxgb3i_snd_win = 128 * 1024;
56 MODULE_PARM_DESC(cxgb3i_snd_win,
"TCP send window in bytes (default=128KB)");
58 static int cxgb3i_rx_credit_thres = 10 * 1024;
61 "RX credits return threshold in bytes (default=10KB)");
63 static unsigned int cxgb3i_max_connect = 8 * 1024;
65 MODULE_PARM_DESC(cxgb3i_max_connect,
"Max. # of connections (default=8092)");
67 static unsigned int cxgb3i_sport_base = 20000;
71 static void cxgb3i_dev_open(
struct t3cdev *);
72 static void cxgb3i_dev_close(
struct t3cdev *);
73 static void cxgb3i_dev_event_handler(
struct t3cdev *,
u32,
u32);
78 .add = cxgb3i_dev_open,
79 .remove = cxgb3i_dev_close,
80 .event_handler = cxgb3i_dev_event_handler,
91 .max_sectors = 0xFFFF,
153 static int push_tx_frames(
struct cxgbi_sock *csk,
int req_completion);
158 unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win);
177 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
183 l2t_send(csk->
cdev->lldev, skb, csk->
l2t);
186 static inline void act_open_arp_failure(
struct t3cdev *
dev,
struct sk_buff *skb)
197 static void send_close_req(
struct cxgbi_sock *csk)
201 unsigned int tid = csk->
tid;
204 "csk 0x%p,%u,0x%lx,%u.\n",
215 push_tx_frames(csk, 1);
225 static void abort_arp_failure(
struct t3cdev *tdev,
struct sk_buff *skb)
230 "t3dev 0x%p, tid %u, skb 0x%p.\n",
236 static void send_abort_req(
struct cxgbi_sock *csk)
246 cxgbi_sock_purge_write_queue(csk);
251 set_arp_failure_handler(skb, abort_arp_failure);
260 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
264 l2t_send(csk->
cdev->lldev, skb, csk->
l2t);
272 static void send_abort_rpl(
struct cxgbi_sock *csk,
int rst_status)
278 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
286 rpl->
cmd = rst_status;
302 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
307 pr_info(
"csk 0x%p, credit %u, OOM.\n", csk, credits);
332 static void init_wr_tab(
unsigned int wr_len)
339 int sgl_len = (3 *
i) / 2 + (i & 1);
342 skb_wrs[
i] = (sgl_len <= wr_len
343 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
349 int len,
int req_completion)
354 skb_reset_transport_header(skb);
355 req = (
struct tx_data_wr *)__skb_push(skb,
sizeof(*req));
387 static void arp_failure_skb_discard(
struct t3cdev *
dev,
struct sk_buff *skb)
392 static int push_tx_frames(
struct cxgbi_sock *csk,
int req_completion)
400 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
407 int frags = skb_shinfo(skb)->nr_frags + (len != skb->
data_len);
408 int wrs_needed = skb_wrs[frags];
410 if (wrs_needed > 1 && len +
sizeof(
struct tx_data_wr) <= wrlen)
413 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
415 if (csk->
wr_cred < wrs_needed) {
417 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
425 skb->
csum = wrs_needed;
428 cxgbi_sock_enqueue_wr(csk, skb);
431 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
432 "left %u, unack %u.\n",
437 if ((req_completion &&
444 make_tx_data_wr(csk, skb, len, req_completion);
450 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
452 set_arp_failure_handler(skb, arp_failure_skb_discard);
453 l2t_send(csk->
cdev->lldev, skb, csk->
l2t);
464 static inline void free_atid(
struct cxgbi_sock *csk)
473 static int do_act_establish(
struct t3cdev *tdev,
struct sk_buff *skb,
void *
ctx)
482 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
483 atid, atid, csk, csk->
state, csk->
flags, rcv_isn);
494 spin_lock_bh(&csk->
lock);
501 pr_info(
"csk 0x%p,%u,0x%lx,%u, got EST.\n",
515 push_tx_frames(csk, 1);
519 spin_unlock_bh(&csk->
lock);
528 static int act_open_rpl_status_to_errno(
int status)
546 static void act_open_retry_timer(
unsigned long data)
552 "csk 0x%p,%u,0x%lx,%u.\n",
556 spin_lock_bh(&csk->
lock);
561 skb->
sk = (
struct sock *)csk;
562 set_arp_failure_handler(skb, act_open_arp_failure);
563 send_act_open_req(csk, skb, csk->
l2t);
565 spin_unlock_bh(&csk->
lock);
569 static int do_act_open_rpl(
struct t3cdev *tdev,
struct sk_buff *skb,
void *ctx)
574 pr_info(
"csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
585 spin_lock_bh(&csk->
lock);
587 csk->
retry_timer.function != act_open_retry_timer) {
592 act_open_rpl_status_to_errno(rpl->
status));
594 spin_unlock_bh(&csk->
lock);
609 "csk 0x%p,%u,0x%lx,%u.\n",
628 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
642 static int abort_status_to_errno(
struct cxgbi_sock *csk,
int abort_reason,
645 switch (abort_reason) {
659 static int do_abort_req(
struct t3cdev *cdev,
struct sk_buff *skb,
void *ctx)
666 "csk 0x%p,%u,0x%lx,%u.\n",
675 spin_lock_bh(&csk->
lock);
684 send_abort_rpl(csk, rst_status);
687 csk->
err = abort_status_to_errno(csk, req->
status, &rst_status);
692 spin_unlock_bh(&csk->
lock);
706 static int do_abort_rpl(
struct t3cdev *cdev,
struct sk_buff *skb,
void *ctx)
712 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
742 static int do_iscsi_hdr(
struct t3cdev *t3dev,
struct sk_buff *skb,
void *ctx)
753 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
756 spin_lock_bh(&csk->
lock);
760 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
771 skb_reset_transport_header(skb);
774 len = hdr_len =
ntohs(hdr_cpl->
len);
776 if (skb->
len <= hdr_len) {
777 pr_err(
"%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
787 pr_err(
"%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
789 skb->
len,
sizeof(ddp_cpl), err);
796 status =
ntohl(ddp_cpl.ddp_status);
799 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
809 if (skb->
len > (hdr_len +
sizeof(ddp_cpl))) {
810 err =
skb_copy_bits(skb, hdr_len, &data_cpl,
sizeof(data_cpl));
812 pr_err(
"%s: tid %u, cp %zu/%u failed %d.\n",
814 csk->
tid,
sizeof(data_cpl), skb->
len, err);
817 data_len =
ntohs(data_cpl.len);
819 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
821 len +=
sizeof(data_cpl) + data_len;
826 __pskb_trim(skb, len);
830 spin_unlock_bh(&csk->
lock);
836 spin_unlock_bh(&csk->
lock);
846 static int do_wr_ack(
struct t3cdev *cdev,
struct sk_buff *skb,
void *ctx)
852 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
897 l2t_release(t3dev, csk->
l2t);
903 static void release_offload_resources(
struct cxgbi_sock *csk)
908 "csk 0x%p,%u,0x%lx,%u.\n",
916 cxgbi_sock_reset_wr_list(csk);
930 static void update_address(
struct cxgbi_hba *chba)
934 chba->
ipv4addr != cxgb3i_get_private_ipv4addr(chba->
vdev)) {
935 cxgb3i_set_private_ipv4addr(chba->
vdev, chba->
ipv4addr);
936 cxgb3i_set_private_ipv4addr(chba->
ndev, 0);
940 cxgb3i_get_private_ipv4addr(chba->
ndev)) {
941 cxgb3i_set_private_ipv4addr(chba->
ndev, chba->
ipv4addr);
945 }
else if (cxgb3i_get_private_ipv4addr(chba->
ndev)) {
947 cxgb3i_set_private_ipv4addr(chba->
vdev, 0);
948 cxgb3i_set_private_ipv4addr(chba->
ndev, 0);
952 static int init_act_open(
struct cxgbi_sock *csk)
962 "csk 0x%p,%u,0x%lx.\n", csk, csk->
state, csk->
flags);
964 update_address(chba);
970 &csk->
daddr.sin_addr.s_addr);
972 pr_err(
"NO l2t available.\n");
979 pr_err(
"NO atid available.\n");
988 skb->
sk = (
struct sock *)csk;
989 set_arp_failure_handler(skb, act_open_arp_failure);
994 cxgbi_sock_reset_wr_list(csk);
998 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1004 send_act_open_req(csk, skb, csk->
l2t);
1033 unsigned int wr_len;
1039 pr_warn(
"t3 0x%p, offload up, ioctl failed.\n", t3dev);
1047 cxgb3i_max_connect);
1051 init_wr_tab(wr_len);
1060 pr_info(
"cdev 0x%p, offload up, added.\n", cdev);
1067 static inline void ulp_mem_io_set_hdr(
struct sk_buff *skb,
unsigned int addr)
1071 memset(req, 0,
sizeof(*req));
1081 unsigned int idx,
unsigned int npods,
1090 "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
1091 csk, idx, npods, gl);
1093 for (i = 0; i < npods; i++, idx++, pm_addr +=
PPOD_SIZE) {
1100 ulp_mem_io_set_hdr(skb, pm_addr);
1110 static void ddp_clear_map(
struct cxgbi_hba *chba,
unsigned int tag,
1111 unsigned int idx,
unsigned int npods)
1119 "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
1120 cdev, idx, npods, tag);
1122 for (i = 0; i < npods; i++, idx++, pm_addr +=
PPOD_SIZE) {
1127 pr_err(
"tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
1128 tag, idx, i, npods);
1131 ulp_mem_io_set_hdr(skb, pm_addr);
1137 static int ddp_setup_conn_pgidx(
struct cxgbi_sock *csk,
1138 unsigned int tid,
int pg_idx,
bool reply)
1146 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1174 static int ddp_setup_conn_digest(
struct cxgbi_sock *csk,
unsigned int tid,
1175 int hcrc,
int dcrc,
int reply)
1180 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1183 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1214 pr_info(
"t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
1229 unsigned int pgsz_factor[4];
1234 pr_warn(
"t3dev 0x%p, ddp 0x%p already set up.\n",
1242 pr_err(
"%s, failed to get iscsi param err=%d.\n",
1248 uinfo.max_txsz, uinfo.max_rxsz);
1256 for (i = 0; i < 4; i++)
1257 uinfo.pgsz_factor[i] = pgsz_factor[i];
1262 pr_warn(
"%s unable to set iscsi param err=%d, ddp disabled.\n",
1274 pr_info(
"tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1282 static void cxgb3i_dev_close(
struct t3cdev *t3dev)
1287 pr_info(
"0x%p close, f 0x%x.\n", cdev, cdev ? cdev->
flags : 0);
1298 static void cxgb3i_dev_open(
struct t3cdev *t3dev)
1305 pr_info(
"0x%p, updating.\n", cdev);
1311 pr_warn(
"device 0x%p register failed.\n", t3dev);
1316 cdev->
lldev = t3dev;
1322 cdev->
snd_win = cxgb3i_snd_win;
1323 cdev->
rcv_win = cxgb3i_rcv_win;
1328 cdev->
itp = &cxgb3i_iscsi_transport;
1330 err = cxgb3i_ddp_init(cdev);
1332 pr_info(
"0x%p ddp init failed\n", cdev);
1338 pr_info(
"0x%p offload init failed\n", cdev);
1343 &cxgb3i_host_template, cxgb3i_stt);
1347 for (i = 0; i < cdev->
nports; i++)
1348 cdev->
hbas[i]->ipv4addr =
1349 cxgb3i_get_private_ipv4addr(cdev->
ports[i]);
1351 pr_info(
"cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1352 cdev, cdev ? cdev->
flags : 0, t3dev, err);
1364 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1365 t3dev, cdev, event, port);
1385 static int __init cxgb3i_init_module(
void)
1405 static void __exit cxgb3i_exit_module(
void)