42 #include <linux/sunrpc/debug.h>
45 #include <asm/unaligned.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
93 frva = (
void *)((
unsigned long)(xdr->head[0].iov_base) &
PAGE_MASK);
94 vec->
sge[sge_no].iov_base = xdr->head[0].iov_base;
95 vec->
sge[sge_no].iov_len = xdr->head[0].iov_len;
105 page_off = (
unsigned long)xdr->head[0].iov_base & ~
PAGE_MASK;
107 ib_dma_map_page(xprt->
sc_cm_id->device,
112 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
118 page_off = xdr->page_base;
119 page_bytes = xdr->page_len + page_off;
124 vec->
sge[sge_no].iov_base = frva + frmr->
map_len + page_off;
125 vec->
sge[sge_no].iov_len = page_bytes;
130 page = xdr->
pages[page_no++];
132 page_bytes -= sge_bytes;
135 ib_dma_map_page(xprt->
sc_cm_id->device,
138 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
151 if (0 == xdr->tail[0].iov_len)
155 vec->
sge[sge_no].iov_len = xdr->tail[0].iov_len;
157 if (((
unsigned long)xdr->tail[0].iov_base &
PAGE_MASK) ==
158 ((
unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
163 vec->
sge[sge_no].iov_base = xdr->tail[0].iov_base;
168 page_off = (
unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
169 va = (
void *)((
unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
170 vec->
sge[sge_no].iov_base = frva + frmr->
map_len + page_off;
177 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
192 printk(
"svcrdma: Error fast registering memory for xprt %p\n", xprt);
209 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
212 return fast_reg_xdr(xprt, xdr, vec);
218 vec->
sge[sge_no].iov_base = xdr->head[0].iov_base;
219 vec->
sge[sge_no].iov_len = xdr->head[0].iov_len;
224 page_bytes = xdr->page_len;
225 page_off = xdr->page_base;
227 vec->
sge[sge_no].iov_base =
230 page_bytes -= sge_bytes;
231 vec->
sge[sge_no].iov_len = sge_bytes;
239 if (xdr->tail[0].iov_len) {
240 vec->
sge[sge_no].iov_base = xdr->tail[0].iov_base;
241 vec->
sge[sge_no].iov_len = xdr->tail[0].iov_len;
245 dprintk(
"svcrdma: map_xdr: sge_no %d page_no %d "
246 "page_base %u page_len %u head_len %zu tail_len %zu\n",
247 sge_no, page_no, xdr->page_base, xdr->page_len,
248 xdr->head[0].iov_len, xdr->tail[0].iov_len);
256 u32 xdr_off,
size_t len,
int dir)
260 if (xdr_off < xdr->
head[0].iov_len) {
262 xdr_off += (
unsigned long)xdr->head[0].iov_base & ~
PAGE_MASK;
265 xdr_off -= xdr->head[0].iov_len;
266 if (xdr_off < xdr->page_len) {
272 xdr_off -= xdr->page_len;
273 xdr_off += (
unsigned long)
278 dma_addr = ib_dma_map_page(xprt->
sc_cm_id->device, page, xdr_off,
290 u32 xdr_off,
int write_len,
303 dprintk(
"svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
304 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
305 rmr, (
unsigned long long)to, xdr_off,
313 for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->
count;
315 if (vec->
sge[xdr_sge_no].iov_len > bc)
317 bc -= vec->
sge[xdr_sge_no].iov_len;
326 sge_bytes =
min_t(
size_t,
327 bc, vec->
sge[xdr_sge_no].iov_len-sge_off);
328 sge[sge_no].
length = sge_bytes;
331 dma_map_xdr(xprt, &rqstp->
rq_res, xdr_off,
333 xdr_off += sge_bytes;
334 if (ib_dma_mapping_error(xprt->
sc_cm_id->device,
341 vec->
sge[xdr_sge_no].iov_base + sge_off;
342 sge[sge_no].
lkey = vec->
frmr->mr->lkey;
354 memset(&write_wr, 0,
sizeof write_wr);
356 write_wr.wr_id = (
unsigned long)ctxt;
357 write_wr.sg_list = &sge[0];
358 write_wr.num_sge = sge_no;
361 write_wr.wr.rdma.rkey = rmr;
362 write_wr.wr.rdma.remote_addr = to;
393 arg_ary = svc_rdma_get_write_array(rdma_argp);
400 max_write = vec->
frmr->map_len;
405 for (xdr_off = rqstp->
rq_res.head[0].iov_len, chunk_no = 0;
411 arg_ch = &arg_ary->
wc_array[chunk_no].wc_target;
424 this_write =
min(write_len, max_write);
425 ret = send_write(xprt, rqstp,
427 rs_offset + chunk_off,
432 dprintk(
"svcrdma: RDMA_WRITE failed, ret=%d\n",
436 chunk_off += this_write;
437 xdr_off += this_write;
438 xfer_len -= this_write;
439 write_len -= this_write;
445 return rqstp->
rq_res.page_len + rqstp->
rq_res.tail[0].iov_len;
466 arg_ary = svc_rdma_get_reply_array(rdma_argp);
475 max_write = vec->
frmr->map_len;
481 for (xdr_off = 0, chunk_no = 0;
482 xfer_len && chunk_no < nchunks;
485 ch = &arg_ary->
wc_array[chunk_no].wc_target;
498 this_write =
min(write_len, max_write);
499 ret = send_write(xprt, rqstp,
501 rs_offset + chunk_off,
506 dprintk(
"svcrdma: RDMA_WRITE failed, ret=%d\n",
510 chunk_off += this_write;
511 xdr_off += this_write;
512 xfer_len -= this_write;
513 write_len -= this_write;
557 "svcrdma: could not post a receive buffer, err=%d."
558 "Closing transport %p.\n", ret, rdma);
578 ib_dma_map_page(rdma->
sc_cm_id->device, page, 0,
580 if (ib_dma_mapping_error(rdma->
sc_cm_id->device, ctxt->
sge[0].addr))
587 for (sge_no = 1; byte_count && sge_no < vec->
count; sge_no++) {
589 sge_bytes =
min_t(
size_t, vec->
sge[sge_no].iov_len, byte_count);
590 byte_count -= sge_bytes;
592 ctxt->
sge[sge_no].addr =
593 dma_map_xdr(rdma, &rqstp->
rq_res, xdr_off,
595 xdr_off += sge_bytes;
596 if (ib_dma_mapping_error(rdma->
sc_cm_id->device,
597 ctxt->
sge[sge_no].addr))
602 ctxt->
sge[sge_no].addr = (
unsigned long)
603 vec->
sge[sge_no].iov_base;
604 ctxt->
sge[sge_no].lkey = vec->
frmr->mr->lkey;
606 ctxt->
sge[sge_no].length = sge_bytes;
614 for (page_no = 0; page_no < rqstp->
rq_resused; page_no++) {
623 if (page_no+1 >= sge_no)
624 ctxt->
sge[page_no+1].length = 0;
627 memset(&send_wr, 0,
sizeof send_wr);
629 send_wr.wr_id = (
unsigned long)ctxt;
630 send_wr.sg_list = ctxt->
sge;
631 send_wr.num_sge = sge_no;
636 memset(&inv_wr, 0,
sizeof inv_wr);
639 inv_wr.ex.invalidate_rkey =
641 send_wr.next = &inv_wr;
664 static void *xdr_start(
struct xdr_buf *xdr)
666 return xdr->head[0].iov_base -
669 xdr->tail[0].iov_len -
670 xdr->head[0].iov_len);
684 struct page *res_page;
688 dprintk(
"svcrdma: sending response for rqstp=%p\n", rqstp);
691 rdma_argp = xdr_start(&rqstp->
rq_arg);
697 ret = map_xdr(rdma, &rqstp->
rq_res, vec);
700 inline_bytes = rqstp->
rq_res.len;
705 reply_ary = svc_rdma_get_reply_array(rdma_argp);
711 rdma_resp, reply_type);
714 ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
724 ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
733 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
736 dprintk(
"svcrdma: send_reply returns %d\n", ret);