53 # define RPCDBG_FACILITY RPCDBG_TRANS
65 static const char transfertypes[][12] = {
86 rpcrdma_convert_iovs(
struct xdr_buf *xdrbuf,
unsigned int pos,
93 if (pos == 0 && xdrbuf->head[0].iov_len) {
96 seg[
n].
mr_len = xdrbuf->head[0].iov_len;
100 len = xdrbuf->page_len;
102 page_base = xdrbuf->page_base & ~
PAGE_MASK;
104 while (len && n < nsegs) {
106 seg[
n].
mr_offset = (
void *)(
unsigned long) page_base;
116 if (len && n == nsegs)
119 if (xdrbuf->tail[0].iov_len) {
129 seg[
n].
mr_len = xdrbuf->tail[0].iov_len;
170 rpcrdma_create_chunks(
struct rpc_rqst *rqst,
struct xdr_buf *
target,
175 int nsegs, nchunks = 0;
198 pos = target->head[0].iov_len;
207 cur_wchunk !=
NULL, r_xprt);
220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
222 seg->
mr_rkey, pos, n < nsegs ?
"more" :
"last");
224 r_xprt->
rx_stats.read_chunk_count++;
232 "elem %d@0x%llx:0x%x (%s)\n", __func__,
235 seg->
mr_rkey, n < nsegs ?
"more" :
"last");
238 r_xprt->
rx_stats.reply_chunk_count++;
240 r_xprt->
rx_stats.write_chunk_count++;
259 iptr = (
__be32 *) cur_rchunk;
266 iptr = (
__be32 *) cur_wchunk;
276 return (
unsigned char *)iptr - (
unsigned char *)headerp;
279 for (pos = 0; nchunks--;)
293 rpcrdma_inline_pullup(
struct rpc_rqst *rqst,
int pad)
295 int i, npages, curlen;
297 unsigned char *srcp, *destp;
300 struct page **ppages;
302 destp = rqst->rq_svec[0].iov_base;
303 curlen = rqst->rq_svec[0].iov_len;
309 pad -= (curlen + 36);
313 dprintk(
"RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
314 __func__, pad, destp, rqst->rq_slen, curlen);
316 copy_len = rqst->rq_snd_buf.page_len;
318 if (rqst->rq_snd_buf.tail[0].iov_len) {
319 curlen = rqst->rq_snd_buf.tail[0].iov_len;
320 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
322 rqst->rq_snd_buf.tail[0].iov_base, curlen);
323 r_xprt->
rx_stats.pullup_copy_count += curlen;
325 dprintk(
"RPC: %s: tail destp 0x%p len %d\n",
326 __func__, destp + copy_len, curlen);
327 rqst->rq_svec[0].iov_len += curlen;
329 r_xprt->
rx_stats.pullup_copy_count += copy_len;
331 page_base = rqst->rq_snd_buf.page_base;
335 for (i = 0; copy_len && i < npages; i++) {
337 if (curlen > copy_len)
339 dprintk(
"RPC: %s: page %d destp 0x%p len %d curlen %d\n",
340 __func__, i, destp, copy_len, curlen);
342 memcpy(destp, srcp+page_base, curlen);
344 rqst->rq_svec[0].iov_len += curlen;
369 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
373 size_t hdrlen, rpclen, padlen;
381 base = rqst->rq_svec[0].iov_base;
382 rpclen = rqst->rq_svec[0].iov_len;
387 headerp->
rm_xid = rqst->rq_xid;
413 else if (rqst->rq_rcv_buf.page_len == 0)
415 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
436 else if (rqst->rq_snd_buf.page_len == 0)
449 dprintk(
"RPC: %s: too much data (%d/%d) for inline\n",
450 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
464 padlen = rpcrdma_inline_pullup(rqst,
476 hdrlen += 2 *
sizeof(
u32);
484 rpclen = rqst->rq_svec[0].iov_len;
506 hdrlen = rpcrdma_create_chunks(rqst,
507 &rqst->rq_snd_buf, headerp, rtype);
511 hdrlen = rpcrdma_create_chunks(rqst,
512 &rqst->rq_rcv_buf, headerp, wtype);
518 dprintk(
"RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
519 " headerp 0x%p base 0x%p lkey 0x%x\n",
520 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
521 headerp, base, req->
rl_iov.lkey);
548 req->
rl_send_iov[3].length = rqst->rq_slen - rpclen;
577 dprintk(
"RPC: %s: chunk %d@0x%llx:0x%x\n",
580 (
unsigned long long)off,
589 if (*w++ != xdr_zero)
596 *iptrp = (
__be32 *) cur_wchunk;
604 rpcrdma_inline_fixup(
struct rpc_rqst *rqst,
char *srcp,
int copy_len,
int pad)
606 int i, npages, curlen, olen;
608 struct page **ppages;
611 curlen = rqst->rq_rcv_buf.head[0].iov_len;
612 if (curlen > copy_len) {
614 rqst->rq_rcv_buf.head[0].iov_len = curlen;
617 dprintk(
"RPC: %s: srcp 0x%p len %d hdrlen %d\n",
618 __func__, srcp, copy_len, curlen);
621 rqst->rq_rcv_buf.head[0].iov_base = srcp;
627 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
628 page_base = rqst->rq_rcv_buf.page_base;
632 if (copy_len && rqst->rq_rcv_buf.page_len) {
635 for (; i < npages; i++) {
637 if (curlen > copy_len)
640 " srcp 0x%p len %d curlen %d\n",
641 __func__, i, srcp, copy_len, curlen);
643 memcpy(destp + page_base, srcp, curlen);
652 rqst->rq_rcv_buf.page_len = olen - copy_len;
654 rqst->rq_rcv_buf.page_len = 0;
656 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
658 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
659 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
660 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
661 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
662 dprintk(
"RPC: %s: tail srcp 0x%p len %d curlen %d\n",
663 __func__, srcp, copy_len, curlen);
664 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
665 copy_len -= curlen; ++
i;
667 rqst->rq_rcv_buf.tail[0].iov_len = 0;
671 unsigned char *
p = rqst->rq_rcv_buf.tail[0].iov_base;
673 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
678 " %d extra segments (%d lost)\n",
679 __func__, olen, i, copy_len);
682 rqst->rq_private_buf = rqst->rq_rcv_buf;
694 struct rpc_xprt *xprt = ep->
rep_xprt;
696 spin_lock_bh(&xprt->transport_lock);
697 if (++xprt->connect_cookie == 0)
698 ++xprt->connect_cookie;
700 if (!xprt_test_and_set_connected(xprt))
703 if (xprt_test_and_clear_connected(xprt))
706 spin_unlock_bh(&xprt->transport_lock);
729 struct rpc_rqst *rqst;
730 struct rpc_xprt *xprt = rep->
rr_xprt;
738 if (r_xprt->
rx_ep.rep_connected == 1) {
745 dprintk(
"RPC: %s: short/invalid reply\n", __func__);
750 dprintk(
"RPC: %s: invalid version %d\n",
756 spin_lock(&xprt->transport_lock);
759 spin_unlock(&xprt->transport_lock);
760 dprintk(
"RPC: %s: reply 0x%p failed "
761 "to match any request xid 0x%08x len %d\n",
775 spin_unlock(&xprt->transport_lock);
776 dprintk(
"RPC: %s: duplicate reply 0x%p to RPC "
777 "request 0x%p: xid 0x%08x\n", __func__, rep, req,
782 dprintk(
"RPC: %s: reply 0x%p completes request 0x%p\n"
783 " RPC request 0x%p xid 0x%08x\n",
784 __func__, rep, req, rqst, headerp->
rm_xid);
806 rdmalen = rpcrdma_count_chunks(rep,
809 if (rdmalen < 0 || *iptr++ != xdr_zero)
812 ((
unsigned char *)iptr - (
unsigned char *)headerp);
813 status = rep->
rr_len + rdmalen;
814 r_xprt->
rx_stats.total_rdma_reply += rdmalen;
817 rdmalen = 4 - rdmalen;
823 iptr = (
__be32 *)((
unsigned char *)headerp + 28);
828 rpcrdma_inline_fixup(rqst, (
char *)iptr, rep->
rr_len, rdmalen);
838 iptr = (
__be32 *)((
unsigned char *)headerp + 28);
839 rdmalen = rpcrdma_count_chunks(rep, req->
rl_nchunks, 0, &iptr);
842 r_xprt->
rx_stats.total_rdma_reply += rdmalen;
849 dprintk(
"%s: invalid rpcrdma reply header (type %d):"
850 " chunks[012] == %d %d %d"
851 " expected chunks <= %d\n",
870 rep->
rr_func = rpcrdma_unbind_func;
883 dprintk(
"RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
884 __func__, xprt, rqst, status);
886 spin_unlock(&xprt->transport_lock);