Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
backchannel_rqst.c
Go to the documentation of this file.
1 /******************************************************************************
2 
3 (c) 2007 Network Appliance, Inc. All Rights Reserved.
4 (c) 2009 NetApp. All Rights Reserved.
5 
6 NetApp provides this source code under the GPL v2 License.
7 The GPL v2 license is available at
8 http://opensource.org/licenses/gpl-license.php.
9 
10 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 
22 ******************************************************************************/
23 
24 #include <linux/tcp.h>
25 #include <linux/slab.h>
26 #include <linux/sunrpc/xprt.h>
27 #include <linux/export.h>
28 #include <linux/sunrpc/bc_xprt.h>
29 
30 #ifdef RPC_DEBUG
31 #define RPCDBG_FACILITY RPCDBG_TRANS
32 #endif
33 
34 /*
35  * Helper routines that track the number of preallocation elements
36  * on the transport.
37  */
38 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39 {
40  return xprt->bc_alloc_count > 0;
41 }
42 
43 static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44 {
45  xprt->bc_alloc_count += n;
46 }
47 
48 static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
49 {
50  return xprt->bc_alloc_count -= n;
51 }
52 
53 /*
54  * Free the preallocated rpc_rqst structure and the memory
55  * buffers hanging off of it.
56  */
57 static void xprt_free_allocation(struct rpc_rqst *req)
58 {
59  struct xdr_buf *xbufp;
60 
61  dprintk("RPC: free allocations for req= %p\n", req);
62  BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
63  xbufp = &req->rq_private_buf;
64  free_page((unsigned long)xbufp->head[0].iov_base);
65  xbufp = &req->rq_snd_buf;
66  free_page((unsigned long)xbufp->head[0].iov_base);
67  list_del(&req->rq_bc_pa_list);
68  kfree(req);
69 }
70 
71 /*
72  * Preallocate up to min_reqs structures and related buffers for use
73  * by the backchannel. This function can be called multiple times
74  * when creating new sessions that use the same rpc_xprt. The
75  * preallocated buffers are added to the pool of resources used by
76  * the rpc_xprt. Anyone of these resources may be used used by an
77  * incoming callback request. It's up to the higher levels in the
78  * stack to enforce that the maximum number of session slots is not
79  * being exceeded.
80  *
81  * Some callback arguments can be large. For example, a pNFS server
82  * using multiple deviceids. The list can be unbound, but the client
83  * has the ability to tell the server the maximum size of the callback
84  * requests. Each deviceID is 16 bytes, so allocate one page
85  * for the arguments to have enough room to receive a number of these
86  * deviceIDs. The NFS client indicates to the pNFS server that its
87  * callback requests can be up to 4096 bytes in size.
88  */
89 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
90 {
91  struct page *page_rcv = NULL, *page_snd = NULL;
92  struct xdr_buf *xbufp = NULL;
93  struct rpc_rqst *req, *tmp;
94  struct list_head tmp_list;
95  int i;
96 
97  dprintk("RPC: setup backchannel transport\n");
98 
99  /*
100  * We use a temporary list to keep track of the preallocated
101  * buffers. Once we're done building the list we splice it
102  * into the backchannel preallocation list off of the rpc_xprt
103  * struct. This helps minimize the amount of time the list
104  * lock is held on the rpc_xprt struct. It also makes cleanup
105  * easier in case of memory allocation errors.
106  */
107  INIT_LIST_HEAD(&tmp_list);
108  for (i = 0; i < min_reqs; i++) {
109  /* Pre-allocate one backchannel rpc_rqst */
110  req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
111  if (req == NULL) {
112  printk(KERN_ERR "Failed to create bc rpc_rqst\n");
113  goto out_free;
114  }
115 
116  /* Add the allocated buffer to the tmp list */
117  dprintk("RPC: adding req= %p\n", req);
118  list_add(&req->rq_bc_pa_list, &tmp_list);
119 
120  req->rq_xprt = xprt;
121  INIT_LIST_HEAD(&req->rq_list);
122  INIT_LIST_HEAD(&req->rq_bc_list);
123 
124  /* Preallocate one XDR receive buffer */
125  page_rcv = alloc_page(GFP_KERNEL);
126  if (page_rcv == NULL) {
127  printk(KERN_ERR "Failed to create bc receive xbuf\n");
128  goto out_free;
129  }
130  xbufp = &req->rq_rcv_buf;
131  xbufp->head[0].iov_base = page_address(page_rcv);
132  xbufp->head[0].iov_len = PAGE_SIZE;
133  xbufp->tail[0].iov_base = NULL;
134  xbufp->tail[0].iov_len = 0;
135  xbufp->page_len = 0;
136  xbufp->len = PAGE_SIZE;
137  xbufp->buflen = PAGE_SIZE;
138 
139  /* Preallocate one XDR send buffer */
140  page_snd = alloc_page(GFP_KERNEL);
141  if (page_snd == NULL) {
142  printk(KERN_ERR "Failed to create bc snd xbuf\n");
143  goto out_free;
144  }
145 
146  xbufp = &req->rq_snd_buf;
147  xbufp->head[0].iov_base = page_address(page_snd);
148  xbufp->head[0].iov_len = 0;
149  xbufp->tail[0].iov_base = NULL;
150  xbufp->tail[0].iov_len = 0;
151  xbufp->page_len = 0;
152  xbufp->len = 0;
153  xbufp->buflen = PAGE_SIZE;
154  }
155 
156  /*
157  * Add the temporary list to the backchannel preallocation list
158  */
159  spin_lock_bh(&xprt->bc_pa_lock);
160  list_splice(&tmp_list, &xprt->bc_pa_list);
161  xprt_inc_alloc_count(xprt, min_reqs);
162  spin_unlock_bh(&xprt->bc_pa_lock);
163 
164  dprintk("RPC: setup backchannel transport done\n");
165  return 0;
166 
167 out_free:
168  /*
169  * Memory allocation failed, free the temporary list
170  */
171  list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
172  xprt_free_allocation(req);
173 
174  dprintk("RPC: setup backchannel transport failed\n");
175  return -ENOMEM;
176 }
178 
188 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
189 {
190  struct rpc_rqst *req = NULL, *tmp = NULL;
191 
192  dprintk("RPC: destroy backchannel transport\n");
193 
194  BUG_ON(max_reqs == 0);
195  spin_lock_bh(&xprt->bc_pa_lock);
196  xprt_dec_alloc_count(xprt, max_reqs);
197  list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
198  dprintk("RPC: req=%p\n", req);
199  xprt_free_allocation(req);
200  if (--max_reqs == 0)
201  break;
202  }
203  spin_unlock_bh(&xprt->bc_pa_lock);
204 
205  dprintk("RPC: backchannel list empty= %s\n",
206  list_empty(&xprt->bc_pa_list) ? "true" : "false");
207 }
209 
210 /*
211  * One or more rpc_rqst structure have been preallocated during the
212  * backchannel setup. Buffer space for the send and private XDR buffers
213  * has been preallocated as well. Use xprt_alloc_bc_request to allocate
214  * to this request. Use xprt_free_bc_request to return it.
215  *
216  * We know that we're called in soft interrupt context, grab the spin_lock
217  * since there is no need to grab the bottom half spin_lock.
218  *
219  * Return an available rpc_rqst, otherwise NULL if non are available.
220  */
221 struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
222 {
223  struct rpc_rqst *req;
224 
225  dprintk("RPC: allocate a backchannel request\n");
226  spin_lock(&xprt->bc_pa_lock);
227  if (!list_empty(&xprt->bc_pa_list)) {
228  req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
229  rq_bc_pa_list);
230  list_del(&req->rq_bc_pa_list);
231  } else {
232  req = NULL;
233  }
234  spin_unlock(&xprt->bc_pa_lock);
235 
236  if (req != NULL) {
237  set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
238  req->rq_reply_bytes_recvd = 0;
239  req->rq_bytes_sent = 0;
240  memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
241  sizeof(req->rq_private_buf));
242  }
243  dprintk("RPC: backchannel req=%p\n", req);
244  return req;
245 }
246 
247 /*
248  * Return the preallocated rpc_rqst structure and XDR buffers
249  * associated with this rpc_task.
250  */
251 void xprt_free_bc_request(struct rpc_rqst *req)
252 {
253  struct rpc_xprt *xprt = req->rq_xprt;
254 
255  dprintk("RPC: free backchannel req=%p\n", req);
256 
258  BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
259  clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
261 
262  if (!xprt_need_to_requeue(xprt)) {
263  /*
264  * The last remaining session was destroyed while this
265  * entry was in use. Free the entry and don't attempt
266  * to add back to the list because there is no need to
267  * have anymore preallocated entries.
268  */
269  dprintk("RPC: Last session removed req=%p\n", req);
270  xprt_free_allocation(req);
271  return;
272  }
273 
274  /*
275  * Return it to the list of preallocations so that it
276  * may be reused by a new callback request.
277  */
278  spin_lock_bh(&xprt->bc_pa_lock);
279  list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
280  spin_unlock_bh(&xprt->bc_pa_lock);
281 }
282