24 #include <linux/tcp.h>
25 #include <linux/slab.h>
27 #include <linux/export.h>
31 #define RPCDBG_FACILITY RPCDBG_TRANS
38 static inline int xprt_need_to_requeue(
struct rpc_xprt *xprt)
40 return xprt->bc_alloc_count > 0;
43 static inline void xprt_inc_alloc_count(
struct rpc_xprt *xprt,
unsigned int n)
45 xprt->bc_alloc_count +=
n;
48 static inline int xprt_dec_alloc_count(
struct rpc_xprt *xprt,
unsigned int n)
50 return xprt->bc_alloc_count -=
n;
57 static void xprt_free_allocation(
struct rpc_rqst *
req)
59 struct xdr_buf *xbufp;
61 dprintk(
"RPC: free allocations for req= %p\n", req);
63 xbufp = &req->rq_private_buf;
64 free_page((
unsigned long)xbufp->head[0].iov_base);
65 xbufp = &req->rq_snd_buf;
66 free_page((
unsigned long)xbufp->head[0].iov_base);
92 struct xdr_buf *xbufp =
NULL;
93 struct rpc_rqst *
req, *
tmp;
97 dprintk(
"RPC: setup backchannel transport\n");
107 INIT_LIST_HEAD(&tmp_list);
108 for (i = 0; i < min_reqs; i++) {
110 req = kzalloc(
sizeof(
struct rpc_rqst),
GFP_KERNEL);
117 dprintk(
"RPC: adding req= %p\n", req);
118 list_add(&req->rq_bc_pa_list, &tmp_list);
121 INIT_LIST_HEAD(&req->rq_list);
122 INIT_LIST_HEAD(&req->rq_bc_list);
126 if (page_rcv ==
NULL) {
130 xbufp = &req->rq_rcv_buf;
133 xbufp->tail[0].iov_base =
NULL;
134 xbufp->tail[0].iov_len = 0;
141 if (page_snd ==
NULL) {
146 xbufp = &req->rq_snd_buf;
148 xbufp->head[0].iov_len = 0;
149 xbufp->tail[0].iov_base =
NULL;
150 xbufp->tail[0].iov_len = 0;
159 spin_lock_bh(&xprt->bc_pa_lock);
160 list_splice(&tmp_list, &xprt->bc_pa_list);
161 xprt_inc_alloc_count(xprt, min_reqs);
162 spin_unlock_bh(&xprt->bc_pa_lock);
164 dprintk(
"RPC: setup backchannel transport done\n");
172 xprt_free_allocation(req);
174 dprintk(
"RPC: setup backchannel transport failed\n");
192 dprintk(
"RPC: destroy backchannel transport\n");
195 spin_lock_bh(&xprt->bc_pa_lock);
196 xprt_dec_alloc_count(xprt, max_reqs);
199 xprt_free_allocation(req);
203 spin_unlock_bh(&xprt->bc_pa_lock);
205 dprintk(
"RPC: backchannel list empty= %s\n",
206 list_empty(&xprt->bc_pa_list) ?
"true" :
"false");
223 struct rpc_rqst *
req;
225 dprintk(
"RPC: allocate a backchannel request\n");
226 spin_lock(&xprt->bc_pa_lock);
227 if (!list_empty(&xprt->bc_pa_list)) {
234 spin_unlock(&xprt->bc_pa_lock);
237 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
238 req->rq_reply_bytes_recvd = 0;
239 req->rq_bytes_sent = 0;
240 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
241 sizeof(req->rq_private_buf));
243 dprintk(
"RPC: backchannel req=%p\n", req);
253 struct rpc_xprt *xprt = req->rq_xprt;
255 dprintk(
"RPC: free backchannel req=%p\n", req);
259 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
262 if (!xprt_need_to_requeue(xprt)) {
269 dprintk(
"RPC: Last session removed req=%p\n", req);
270 xprt_free_allocation(req);
278 spin_lock_bh(&xprt->bc_pa_lock);
279 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
280 spin_unlock_bh(&xprt->bc_pa_lock);