42 #include <linux/sunrpc/debug.h>
45 #include <asm/unaligned.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
57 static void rdma_build_arg_xdr(
struct svc_rqst *rqstp,
66 page = ctxt->
pages[0];
72 rqstp->
rq_arg.head[0].iov_len =
min(byte_count, ctxt->
sge[0].length);
73 rqstp->
rq_arg.len = byte_count;
74 rqstp->
rq_arg.buflen = byte_count;
77 bc = byte_count - rqstp->
rq_arg.head[0].iov_len;
81 rqstp->
rq_arg.page_base = 0;
84 while (bc && sge_no < ctxt->
count) {
85 page = ctxt->
pages[sge_no];
88 bc -=
min(bc, ctxt->
sge[sge_no].length);
89 rqstp->
rq_arg.buflen += ctxt->
sge[sge_no].length;
104 while (sge_no < ctxt->count) {
105 page = ctxt->
pages[sge_no++];
112 rqstp->
rq_arg.tail[0].iov_len = 0;
151 head->
arg.head[0] = rqstp->
rq_arg.head[0];
152 head->
arg.tail[0] = rqstp->
rq_arg.tail[0];
155 head->
arg.page_base = 0;
156 head->
arg.page_len = ch_bytes;
157 head->
arg.len = rqstp->
rq_arg.len + ch_bytes;
158 head->
arg.buflen = rqstp->
rq_arg.buflen + ch_bytes;
160 chl_map->
ch[0].start = 0;
162 rpl_map->
sge[sge_no].iov_base =
165 rpl_map->
sge[sge_no].iov_len = sge_bytes;
170 head->
arg.pages[page_no] = rqstp->
rq_arg.pages[page_no];
173 byte_count -= sge_bytes;
174 ch_bytes -= sge_bytes;
181 chl_map->
ch[ch_no].count =
182 sge_no - chl_map->
ch[ch_no].start;
185 chl_map->
ch[ch_no].start = sge_no;
189 head->
arg.page_len += ch_bytes;
190 head->
arg.len += ch_bytes;
191 head->
arg.buflen += ch_bytes;
198 if ((sge_bytes + page_off) ==
PAGE_SIZE) {
208 page_off += sge_bytes;
228 static int fast_reg_read_chunks(
struct svcxprt_rdma *xprt,
249 head->
arg.head[0] = rqstp->
rq_arg.head[0];
250 head->
arg.tail[0] = rqstp->
rq_arg.tail[0];
253 head->
arg.page_base = 0;
254 head->
arg.page_len = byte_count;
255 head->
arg.len = rqstp->
rq_arg.len + byte_count;
256 head->
arg.buflen = rqstp->
rq_arg.buflen + byte_count;
264 for (page_no = 0; page_no < frmr->
page_list_len; page_no++) {
266 ib_dma_map_page(xprt->
sc_cm_id->device,
267 rqstp->
rq_arg.pages[page_no], 0,
269 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
273 head->
arg.pages[page_no] = rqstp->
rq_arg.pages[page_no];
275 head->
count += page_no;
283 for (ch_no = 0; ch_no < ch_count; ch_no++) {
286 rpl_map->
sge[ch_no].iov_len = len;
287 chl_map->
ch[ch_no].count = 1;
288 chl_map->
ch[ch_no].start = ch_no;
300 printk(
"svcrdma: error fast registering xdr for xprt %p", xprt);
317 for (i = 0; i <
count; i++) {
318 ctxt->
sge[
i].length = 0;
323 ib_dma_map_page(xprt->
sc_cm_id->device,
328 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
334 ctxt->
sge[
i].addr = (
unsigned long)vec[i].iov_base;
335 ctxt->
sge[
i].lkey = frmr->
mr->lkey;
338 *sgl_offset = *sgl_offset + vec[
i].
iov_len;
400 ch = svc_rdma_get_read_chunk(rmsgp);
413 sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
414 rpl_map, chl_map, ch_count,
417 sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
418 rpl_map, chl_map, ch_count,
440 memset(&read_wr, 0,
sizeof read_wr);
441 read_wr.wr_id = (
unsigned long)ctxt;
443 ctxt->
wr_op = read_wr.opcode;
448 read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
449 read_wr.sg_list = ctxt->
sge;
451 rdma_read_max_sge(xprt, chl_map->
ch[ch_no].count);
452 err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->
frmr,
453 &rpl_map->
sge[chl_map->
ch[ch_no].start],
461 if (((ch+1)->rc_discrim == 0) &&
462 (read_wr.num_sge == chl_map->
ch[ch_no].count)) {
469 if (hdr_ctxt->
frmr) {
479 ctxt->
wr_op = read_wr.opcode;
480 read_wr.ex.invalidate_rkey =
481 ctxt->
frmr->mr->lkey;
484 memset(&inv_wr, 0,
sizeof inv_wr);
487 inv_wr.ex.invalidate_rkey =
488 hdr_ctxt->
frmr->mr->lkey;
489 read_wr.next = &inv_wr;
506 if (read_wr.num_sge < chl_map->
ch[ch_no].count) {
507 chl_map->
ch[ch_no].count -= read_wr.num_sge;
508 chl_map->
ch[ch_no].start += read_wr.num_sge;
533 static int rdma_read_complete(
struct svc_rqst *rqstp,
542 for (page_no = 0; page_no < head->
count; page_no++) {
548 rqstp->
rq_arg.page_len = head->
arg.page_len;
549 rqstp->
rq_arg.page_base = head->
arg.page_base;
556 rqstp->
rq_arg.head[0] = head->
arg.head[0];
557 rqstp->
rq_arg.tail[0] = head->
arg.tail[0];
568 ret = rqstp->
rq_arg.head[0].iov_len
570 + rqstp->
rq_arg.tail[0].iov_len;
571 dprintk(
"svcrdma: deferred read ret=%d, rq_arg.len =%d, "
572 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
574 rqstp->
rq_arg.head[0].iov_len);
594 dprintk(
"svcrdma: rqstp=%p\n", rqstp);
601 list_del_init(&ctxt->
dto_q);
605 return rdma_read_complete(rqstp, ctxt);
612 list_del_init(&ctxt->
dto_q);
631 dprintk(
"svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
632 ctxt, rdma_xprt, rqstp, ctxt->
wc_status);
637 rdma_build_arg_xdr(rqstp, ctxt, ctxt->
byte_len);
651 ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
662 ret = rqstp->
rq_arg.head[0].iov_len
664 + rqstp->
rq_arg.tail[0].iov_len;
667 dprintk(
"svcrdma: ret = %d, rq_arg.len =%d, "
668 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
670 rqstp->
rq_arg.head[0].iov_base,
671 rqstp->
rq_arg.head[0].iov_len);
679 dprintk(
"svcrdma: transport %p is closing\n", xprt);