Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
clnt.c
Go to the documentation of this file.
1 /*
2  * linux/net/sunrpc/clnt.c
3  *
4  * This file contains the high-level RPC interface.
5  * It is modeled as a finite state machine to support both synchronous
6  * and asynchronous requests.
7  *
8  * - RPC header generation and argument serialization.
9  * - Credential refresh.
10  * - TCP connect handling.
11  * - Retry of operation when it is suspected the operation failed because
12  * of uid squashing on the server, or when the credentials were stale
13  * and need to be refreshed, or when a packet was damaged in transit.
14  * This may be have to be moved to the VFS layer.
15  *
16  * Copyright (C) 1992,1993 Rick Sladkey <[email protected]>
17  * Copyright (C) 1995,1996 Olaf Kirch <[email protected]>
18  */
19 
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kallsyms.h>
24 #include <linux/mm.h>
25 #include <linux/namei.h>
26 #include <linux/mount.h>
27 #include <linux/slab.h>
28 #include <linux/utsname.h>
29 #include <linux/workqueue.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/un.h>
33 #include <linux/rcupdate.h>
34 
35 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/metrics.h>
38 #include <linux/sunrpc/bc_xprt.h>
39 #include <trace/events/sunrpc.h>
40 
41 #include "sunrpc.h"
42 #include "netns.h"
43 
44 #ifdef RPC_DEBUG
45 # define RPCDBG_FACILITY RPCDBG_CALL
46 #endif
47 
48 #define dprint_status(t) \
49  dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
50  __func__, t->tk_status)
51 
52 /*
53  * All RPC clients are linked into this list
54  */
55 
56 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
57 
58 
59 static void call_start(struct rpc_task *task);
60 static void call_reserve(struct rpc_task *task);
61 static void call_reserveresult(struct rpc_task *task);
62 static void call_allocate(struct rpc_task *task);
63 static void call_decode(struct rpc_task *task);
64 static void call_bind(struct rpc_task *task);
65 static void call_bind_status(struct rpc_task *task);
66 static void call_transmit(struct rpc_task *task);
67 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
68 static void call_bc_transmit(struct rpc_task *task);
69 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
70 static void call_status(struct rpc_task *task);
71 static void call_transmit_status(struct rpc_task *task);
72 static void call_refresh(struct rpc_task *task);
73 static void call_refreshresult(struct rpc_task *task);
74 static void call_timeout(struct rpc_task *task);
75 static void call_connect(struct rpc_task *task);
76 static void call_connect_status(struct rpc_task *task);
77 
78 static __be32 *rpc_encode_header(struct rpc_task *task);
79 static __be32 *rpc_verify_header(struct rpc_task *task);
80 static int rpc_ping(struct rpc_clnt *clnt);
81 
82 static void rpc_register_client(struct rpc_clnt *clnt)
83 {
84  struct net *net = rpc_net_ns(clnt);
85  struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
86 
87  spin_lock(&sn->rpc_client_lock);
88  list_add(&clnt->cl_clients, &sn->all_clients);
89  spin_unlock(&sn->rpc_client_lock);
90 }
91 
92 static void rpc_unregister_client(struct rpc_clnt *clnt)
93 {
94  struct net *net = rpc_net_ns(clnt);
95  struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
96 
97  spin_lock(&sn->rpc_client_lock);
98  list_del(&clnt->cl_clients);
99  spin_unlock(&sn->rpc_client_lock);
100 }
101 
102 static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
103 {
104  if (clnt->cl_dentry) {
105  if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy)
106  clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth);
108  }
109  clnt->cl_dentry = NULL;
110 }
111 
112 static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
113 {
114  struct net *net = rpc_net_ns(clnt);
115  struct super_block *pipefs_sb;
116 
117  pipefs_sb = rpc_get_sb_net(net);
118  if (pipefs_sb) {
119  __rpc_clnt_remove_pipedir(clnt);
120  rpc_put_sb_net(net);
121  }
122 }
123 
124 static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
125  struct rpc_clnt *clnt,
126  const char *dir_name)
127 {
128  static uint32_t clntid;
129  char name[15];
130  struct qstr q = { .name = name };
131  struct dentry *dir, *dentry;
132  int error;
133 
134  dir = rpc_d_lookup_sb(sb, dir_name);
135  if (dir == NULL)
136  return dir;
137  for (;;) {
138  q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
139  name[sizeof(name) - 1] = '\0';
140  q.hash = full_name_hash(q.name, q.len);
141  dentry = rpc_create_client_dir(dir, &q, clnt);
142  if (!IS_ERR(dentry))
143  break;
144  error = PTR_ERR(dentry);
145  if (error != -EEXIST) {
146  printk(KERN_INFO "RPC: Couldn't create pipefs entry"
147  " %s/%s, error %d\n",
148  dir_name, name, error);
149  break;
150  }
151  }
152  dput(dir);
153  return dentry;
154 }
155 
156 static int
157 rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name)
158 {
159  struct net *net = rpc_net_ns(clnt);
160  struct super_block *pipefs_sb;
161  struct dentry *dentry;
162 
163  clnt->cl_dentry = NULL;
164  if (dir_name == NULL)
165  return 0;
166  pipefs_sb = rpc_get_sb_net(net);
167  if (!pipefs_sb)
168  return 0;
169  dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
170  rpc_put_sb_net(net);
171  if (IS_ERR(dentry))
172  return PTR_ERR(dentry);
173  clnt->cl_dentry = dentry;
174  return 0;
175 }
176 
177 static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
178 {
179  if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
180  ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
181  return 1;
182  return 0;
183 }
184 
185 static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
186  struct super_block *sb)
187 {
188  struct dentry *dentry;
189  int err = 0;
190 
191  switch (event) {
192  case RPC_PIPEFS_MOUNT:
193  dentry = rpc_setup_pipedir_sb(sb, clnt,
194  clnt->cl_program->pipe_dir_name);
195  BUG_ON(dentry == NULL);
196  if (IS_ERR(dentry))
197  return PTR_ERR(dentry);
198  clnt->cl_dentry = dentry;
199  if (clnt->cl_auth->au_ops->pipes_create) {
200  err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth);
201  if (err)
202  __rpc_clnt_remove_pipedir(clnt);
203  }
204  break;
205  case RPC_PIPEFS_UMOUNT:
206  __rpc_clnt_remove_pipedir(clnt);
207  break;
208  default:
209  printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
210  return -ENOTSUPP;
211  }
212  return err;
213 }
214 
215 static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
216  struct super_block *sb)
217 {
218  int error = 0;
219 
220  for (;; clnt = clnt->cl_parent) {
221  if (!rpc_clnt_skip_event(clnt, event))
222  error = __rpc_clnt_handle_event(clnt, event, sb);
223  if (error || clnt == clnt->cl_parent)
224  break;
225  }
226  return error;
227 }
228 
229 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
230 {
231  struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
232  struct rpc_clnt *clnt;
233 
234  spin_lock(&sn->rpc_client_lock);
236  if (clnt->cl_program->pipe_dir_name == NULL)
237  break;
238  if (rpc_clnt_skip_event(clnt, event))
239  continue;
240  if (atomic_inc_not_zero(&clnt->cl_count) == 0)
241  continue;
242  spin_unlock(&sn->rpc_client_lock);
243  return clnt;
244  }
245  spin_unlock(&sn->rpc_client_lock);
246  return NULL;
247 }
248 
249 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
250  void *ptr)
251 {
252  struct super_block *sb = ptr;
253  struct rpc_clnt *clnt;
254  int error = 0;
255 
256  while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
257  error = __rpc_pipefs_event(clnt, event, sb);
258  rpc_release_client(clnt);
259  if (error)
260  break;
261  }
262  return error;
263 }
264 
265 static struct notifier_block rpc_clients_block = {
266  .notifier_call = rpc_pipefs_event,
267  .priority = SUNRPC_PIPEFS_RPC_PRIO,
268 };
269 
271 {
272  return rpc_pipefs_notifier_register(&rpc_clients_block);
273 }
274 
276 {
277  return rpc_pipefs_notifier_unregister(&rpc_clients_block);
278 }
279 
280 static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
281 {
282  clnt->cl_nodelen = strlen(nodename);
283  if (clnt->cl_nodelen > UNX_MAXNODENAME)
284  clnt->cl_nodelen = UNX_MAXNODENAME;
285  memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
286 }
287 
288 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
289 {
290  const struct rpc_program *program = args->program;
291  const struct rpc_version *version;
292  struct rpc_clnt *clnt = NULL;
293  struct rpc_auth *auth;
294  int err;
295 
296  /* sanity check the name before trying to print it */
297  dprintk("RPC: creating %s client for %s (xprt %p)\n",
298  program->name, args->servername, xprt);
299 
300  err = rpciod_up();
301  if (err)
302  goto out_no_rpciod;
303  err = -EINVAL;
304  if (!xprt)
305  goto out_no_xprt;
306 
307  if (args->version >= program->nrvers)
308  goto out_err;
309  version = program->version[args->version];
310  if (version == NULL)
311  goto out_err;
312 
313  err = -ENOMEM;
314  clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
315  if (!clnt)
316  goto out_err;
317  clnt->cl_parent = clnt;
318 
319  rcu_assign_pointer(clnt->cl_xprt, xprt);
320  clnt->cl_procinfo = version->procs;
321  clnt->cl_maxproc = version->nrprocs;
322  clnt->cl_protname = program->name;
323  clnt->cl_prog = args->prognumber ? : program->number;
324  clnt->cl_vers = version->number;
325  clnt->cl_stats = program->stats;
326  clnt->cl_metrics = rpc_alloc_iostats(clnt);
327  err = -ENOMEM;
328  if (clnt->cl_metrics == NULL)
329  goto out_no_stats;
330  clnt->cl_program = program;
331  INIT_LIST_HEAD(&clnt->cl_tasks);
332  spin_lock_init(&clnt->cl_lock);
333 
334  if (!xprt_bound(xprt))
335  clnt->cl_autobind = 1;
336 
337  clnt->cl_timeout = xprt->timeout;
338  if (args->timeout != NULL) {
339  memcpy(&clnt->cl_timeout_default, args->timeout,
340  sizeof(clnt->cl_timeout_default));
341  clnt->cl_timeout = &clnt->cl_timeout_default;
342  }
343 
344  clnt->cl_rtt = &clnt->cl_rtt_default;
345  rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
346  clnt->cl_principal = NULL;
347  if (args->client_name) {
348  clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
349  if (!clnt->cl_principal)
350  goto out_no_principal;
351  }
352 
353  atomic_set(&clnt->cl_count, 1);
354 
355  err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
356  if (err < 0)
357  goto out_no_path;
358 
359  auth = rpcauth_create(args->authflavor, clnt);
360  if (IS_ERR(auth)) {
361  printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
362  args->authflavor);
363  err = PTR_ERR(auth);
364  goto out_no_auth;
365  }
366 
367  /* save the nodename */
368  rpc_clnt_set_nodename(clnt, utsname()->nodename);
369  rpc_register_client(clnt);
370  return clnt;
371 
372 out_no_auth:
373  rpc_clnt_remove_pipedir(clnt);
374 out_no_path:
375  kfree(clnt->cl_principal);
376 out_no_principal:
378 out_no_stats:
379  kfree(clnt);
380 out_err:
381  xprt_put(xprt);
382 out_no_xprt:
383  rpciod_down();
384 out_no_rpciod:
385  return ERR_PTR(err);
386 }
387 
398 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
399 {
400  struct rpc_xprt *xprt;
401  struct rpc_clnt *clnt;
402  struct xprt_create xprtargs = {
403  .net = args->net,
404  .ident = args->protocol,
405  .srcaddr = args->saddress,
406  .dstaddr = args->address,
407  .addrlen = args->addrsize,
408  .servername = args->servername,
409  .bc_xprt = args->bc_xprt,
410  };
411  char servername[48];
412 
413  /*
414  * If the caller chooses not to specify a hostname, whip
415  * up a string representation of the passed-in address.
416  */
417  if (xprtargs.servername == NULL) {
418  struct sockaddr_un *sun =
419  (struct sockaddr_un *)args->address;
420  struct sockaddr_in *sin =
421  (struct sockaddr_in *)args->address;
422  struct sockaddr_in6 *sin6 =
423  (struct sockaddr_in6 *)args->address;
424 
425  servername[0] = '\0';
426  switch (args->address->sa_family) {
427  case AF_LOCAL:
428  snprintf(servername, sizeof(servername), "%s",
429  sun->sun_path);
430  break;
431  case AF_INET:
432  snprintf(servername, sizeof(servername), "%pI4",
433  &sin->sin_addr.s_addr);
434  break;
435  case AF_INET6:
436  snprintf(servername, sizeof(servername), "%pI6",
437  &sin6->sin6_addr);
438  break;
439  default:
440  /* caller wants default server name, but
441  * address family isn't recognized. */
442  return ERR_PTR(-EINVAL);
443  }
444  xprtargs.servername = servername;
445  }
446 
447  xprt = xprt_create_transport(&xprtargs);
448  if (IS_ERR(xprt))
449  return (struct rpc_clnt *)xprt;
450 
451  /*
452  * By default, kernel RPC client connects from a reserved port.
453  * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
454  * but it is always enabled for rpciod, which handles the connect
455  * operation.
456  */
457  xprt->resvport = 1;
458  if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
459  xprt->resvport = 0;
460 
461  clnt = rpc_new_client(args, xprt);
462  if (IS_ERR(clnt))
463  return clnt;
464 
465  if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
466  int err = rpc_ping(clnt);
467  if (err != 0) {
468  rpc_shutdown_client(clnt);
469  return ERR_PTR(err);
470  }
471  }
472 
473  clnt->cl_softrtry = 1;
474  if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
475  clnt->cl_softrtry = 0;
476 
477  if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
478  clnt->cl_autobind = 1;
479  if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
480  clnt->cl_discrtry = 1;
481  if (!(args->flags & RPC_CLNT_CREATE_QUIET))
482  clnt->cl_chatty = 1;
483 
484  return clnt;
485 }
487 
488 /*
489  * This function clones the RPC client structure. It allows us to share the
490  * same transport while varying parameters such as the authentication
491  * flavour.
492  */
493 static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
494  struct rpc_clnt *clnt)
495 {
496  struct rpc_xprt *xprt;
497  struct rpc_clnt *new;
498  int err;
499 
500  err = -ENOMEM;
501  rcu_read_lock();
502  xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
503  rcu_read_unlock();
504  if (xprt == NULL)
505  goto out_err;
506  args->servername = xprt->servername;
507 
508  new = rpc_new_client(args, xprt);
509  if (IS_ERR(new)) {
510  err = PTR_ERR(new);
511  goto out_put;
512  }
513 
514  atomic_inc(&clnt->cl_count);
515  new->cl_parent = clnt;
516 
517  /* Turn off autobind on clones */
518  new->cl_autobind = 0;
519  new->cl_softrtry = clnt->cl_softrtry;
520  new->cl_discrtry = clnt->cl_discrtry;
521  new->cl_chatty = clnt->cl_chatty;
522  return new;
523 
524 out_put:
525  xprt_put(xprt);
526 out_err:
527  dprintk("RPC: %s: returned error %d\n", __func__, err);
528  return ERR_PTR(err);
529 }
530 
538 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
539 {
540  struct rpc_create_args args = {
541  .program = clnt->cl_program,
542  .prognumber = clnt->cl_prog,
543  .version = clnt->cl_vers,
544  .authflavor = clnt->cl_auth->au_flavor,
545  .client_name = clnt->cl_principal,
546  };
547  return __rpc_clone_client(&args, clnt);
548 }
550 
559 struct rpc_clnt *
560 rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
561 {
562  struct rpc_create_args args = {
563  .program = clnt->cl_program,
564  .prognumber = clnt->cl_prog,
565  .version = clnt->cl_vers,
566  .authflavor = flavor,
567  .client_name = clnt->cl_principal,
568  };
569  return __rpc_clone_client(&args, clnt);
570 }
572 
573 /*
574  * Kill all tasks for the given client.
575  * XXX: kill their descendants as well?
576  */
577 void rpc_killall_tasks(struct rpc_clnt *clnt)
578 {
579  struct rpc_task *rovr;
580 
581 
582  if (list_empty(&clnt->cl_tasks))
583  return;
584  dprintk("RPC: killing all tasks for client %p\n", clnt);
585  /*
586  * Spin lock all_tasks to prevent changes...
587  */
588  spin_lock(&clnt->cl_lock);
589  list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
590  if (!RPC_IS_ACTIVATED(rovr))
591  continue;
592  if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
593  rovr->tk_flags |= RPC_TASK_KILLED;
594  rpc_exit(rovr, -EIO);
595  if (RPC_IS_QUEUED(rovr))
597  rovr);
598  }
599  }
600  spin_unlock(&clnt->cl_lock);
601 }
603 
604 /*
605  * Properly shut down an RPC client, terminating all outstanding
606  * requests.
607  */
608 void rpc_shutdown_client(struct rpc_clnt *clnt)
609 {
610  dprintk_rcu("RPC: shutting down %s client for %s\n",
611  clnt->cl_protname,
612  rcu_dereference(clnt->cl_xprt)->servername);
613 
614  while (!list_empty(&clnt->cl_tasks)) {
615  rpc_killall_tasks(clnt);
616  wait_event_timeout(destroy_wait,
617  list_empty(&clnt->cl_tasks), 1*HZ);
618  }
619 
620  rpc_release_client(clnt);
621 }
623 
624 /*
625  * Free an RPC client
626  */
627 static void
628 rpc_free_client(struct rpc_clnt *clnt)
629 {
630  dprintk_rcu("RPC: destroying %s client for %s\n",
631  clnt->cl_protname,
632  rcu_dereference(clnt->cl_xprt)->servername);
633  if (clnt->cl_parent != clnt)
635  rpc_unregister_client(clnt);
636  rpc_clnt_remove_pipedir(clnt);
638  kfree(clnt->cl_principal);
639  clnt->cl_metrics = NULL;
641  rpciod_down();
642  kfree(clnt);
643 }
644 
645 /*
646  * Free an RPC client
647  */
648 static void
649 rpc_free_auth(struct rpc_clnt *clnt)
650 {
651  if (clnt->cl_auth == NULL) {
652  rpc_free_client(clnt);
653  return;
654  }
655 
656  /*
657  * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
658  * release remaining GSS contexts. This mechanism ensures
659  * that it can do so safely.
660  */
661  atomic_inc(&clnt->cl_count);
662  rpcauth_release(clnt->cl_auth);
663  clnt->cl_auth = NULL;
664  if (atomic_dec_and_test(&clnt->cl_count))
665  rpc_free_client(clnt);
666 }
667 
668 /*
669  * Release reference to the RPC client
670  */
671 void
673 {
674  dprintk("RPC: rpc_release_client(%p)\n", clnt);
675 
676  if (list_empty(&clnt->cl_tasks))
677  wake_up(&destroy_wait);
678  if (atomic_dec_and_test(&clnt->cl_count))
679  rpc_free_auth(clnt);
680 }
681 
693  const struct rpc_program *program,
694  u32 vers)
695 {
696  struct rpc_clnt *clnt;
697  const struct rpc_version *version;
698  int err;
699 
700  BUG_ON(vers >= program->nrvers || !program->version[vers]);
701  version = program->version[vers];
702  clnt = rpc_clone_client(old);
703  if (IS_ERR(clnt))
704  goto out;
705  clnt->cl_procinfo = version->procs;
706  clnt->cl_maxproc = version->nrprocs;
707  clnt->cl_protname = program->name;
708  clnt->cl_prog = program->number;
709  clnt->cl_vers = version->number;
710  clnt->cl_stats = program->stats;
711  err = rpc_ping(clnt);
712  if (err != 0) {
713  rpc_shutdown_client(clnt);
714  clnt = ERR_PTR(err);
715  }
716 out:
717  return clnt;
718 }
720 
722 {
723  struct rpc_clnt *clnt = task->tk_client;
724 
725  if (clnt != NULL) {
726  /* Remove from client task list */
727  spin_lock(&clnt->cl_lock);
728  list_del(&task->tk_task);
729  spin_unlock(&clnt->cl_lock);
730  task->tk_client = NULL;
731 
732  rpc_release_client(clnt);
733  }
734 }
735 
736 static
737 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
738 {
739  if (clnt != NULL) {
741  task->tk_client = clnt;
742  atomic_inc(&clnt->cl_count);
743  if (clnt->cl_softrtry)
744  task->tk_flags |= RPC_TASK_SOFT;
745  if (sk_memalloc_socks()) {
746  struct rpc_xprt *xprt;
747 
748  rcu_read_lock();
749  xprt = rcu_dereference(clnt->cl_xprt);
750  if (xprt->swapper)
751  task->tk_flags |= RPC_TASK_SWAPPER;
752  rcu_read_unlock();
753  }
754  /* Add to the client's list of all tasks */
755  spin_lock(&clnt->cl_lock);
756  list_add_tail(&task->tk_task, &clnt->cl_tasks);
757  spin_unlock(&clnt->cl_lock);
758  }
759 }
760 
761 void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
762 {
764  rpc_task_set_client(task, clnt);
765 }
767 
768 
769 static void
770 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
771 {
772  if (msg != NULL) {
773  task->tk_msg.rpc_proc = msg->rpc_proc;
774  task->tk_msg.rpc_argp = msg->rpc_argp;
775  task->tk_msg.rpc_resp = msg->rpc_resp;
776  if (msg->rpc_cred != NULL)
777  task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
778  }
779 }
780 
781 /*
782  * Default callback for async RPC calls
783  */
784 static void
785 rpc_default_callback(struct rpc_task *task, void *data)
786 {
787 }
788 
789 static const struct rpc_call_ops rpc_default_ops = {
790  .rpc_call_done = rpc_default_callback,
791 };
792 
797 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
798 {
799  struct rpc_task *task;
800 
801  task = rpc_new_task(task_setup_data);
802  if (IS_ERR(task))
803  goto out;
804 
805  rpc_task_set_client(task, task_setup_data->rpc_client);
806  rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
807 
808  if (task->tk_action == NULL)
809  rpc_call_start(task);
810 
811  atomic_inc(&task->tk_count);
812  rpc_execute(task);
813 out:
814  return task;
815 }
817 
824 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
825 {
826  struct rpc_task *task;
827  struct rpc_task_setup task_setup_data = {
828  .rpc_client = clnt,
829  .rpc_message = msg,
830  .callback_ops = &rpc_default_ops,
831  .flags = flags,
832  };
833  int status;
834 
835  BUG_ON(flags & RPC_TASK_ASYNC);
836 
837  task = rpc_run_task(&task_setup_data);
838  if (IS_ERR(task))
839  return PTR_ERR(task);
840  status = task->tk_status;
841  rpc_put_task(task);
842  return status;
843 }
845 
854 int
855 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
856  const struct rpc_call_ops *tk_ops, void *data)
857 {
858  struct rpc_task *task;
859  struct rpc_task_setup task_setup_data = {
860  .rpc_client = clnt,
861  .rpc_message = msg,
862  .callback_ops = tk_ops,
863  .callback_data = data,
864  .flags = flags|RPC_TASK_ASYNC,
865  };
866 
867  task = rpc_run_task(&task_setup_data);
868  if (IS_ERR(task))
869  return PTR_ERR(task);
870  rpc_put_task(task);
871  return 0;
872 }
874 
875 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
876 
882 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
883  const struct rpc_call_ops *tk_ops)
884 {
885  struct rpc_task *task;
886  struct xdr_buf *xbufp = &req->rq_snd_buf;
887  struct rpc_task_setup task_setup_data = {
888  .callback_ops = tk_ops,
889  };
890 
891  dprintk("RPC: rpc_run_bc_task req= %p\n", req);
892  /*
893  * Create an rpc_task to send the data
894  */
895  task = rpc_new_task(&task_setup_data);
896  if (IS_ERR(task)) {
898  goto out;
899  }
900  task->tk_rqstp = req;
901 
902  /*
903  * Set up the xdr_buf length.
904  * This also indicates that the buffer is XDR encoded already.
905  */
906  xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
907  xbufp->tail[0].iov_len;
908 
909  task->tk_action = call_bc_transmit;
910  atomic_inc(&task->tk_count);
911  BUG_ON(atomic_read(&task->tk_count) != 2);
912  rpc_execute(task);
913 
914 out:
915  dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
916  return task;
917 }
918 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
919 
920 void
922 {
923  task->tk_action = call_start;
924 }
926 
935 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
936 {
937  size_t bytes;
938  struct rpc_xprt *xprt;
939 
940  rcu_read_lock();
941  xprt = rcu_dereference(clnt->cl_xprt);
942 
943  bytes = xprt->addrlen;
944  if (bytes > bufsize)
945  bytes = bufsize;
946  memcpy(buf, &xprt->addr, bytes);
947  rcu_read_unlock();
948 
949  return bytes;
950 }
952 
962 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
963  enum rpc_display_format_t format)
964 {
965  struct rpc_xprt *xprt;
966 
967  xprt = rcu_dereference(clnt->cl_xprt);
968 
969  if (xprt->address_strings[format] != NULL)
970  return xprt->address_strings[format];
971  else
972  return "unprintable";
973 }
975 
976 static const struct sockaddr_in rpc_inaddr_loopback = {
977  .sin_family = AF_INET,
978  .sin_addr.s_addr = htonl(INADDR_ANY),
979 };
980 
981 static const struct sockaddr_in6 rpc_in6addr_loopback = {
982  .sin6_family = AF_INET6,
983  .sin6_addr = IN6ADDR_ANY_INIT,
984 };
985 
986 /*
987  * Try a getsockname() on a connected datagram socket. Using a
988  * connected datagram socket prevents leaving a socket in TIME_WAIT.
989  * This conserves the ephemeral port number space.
990  *
991  * Returns zero and fills in "buf" if successful; otherwise, a
992  * negative errno is returned.
993  */
994 static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
995  struct sockaddr *buf, int buflen)
996 {
997  struct socket *sock;
998  int err;
999 
1000  err = __sock_create(net, sap->sa_family,
1001  SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
1002  if (err < 0) {
1003  dprintk("RPC: can't create UDP socket (%d)\n", err);
1004  goto out;
1005  }
1006 
1007  switch (sap->sa_family) {
1008  case AF_INET:
1009  err = kernel_bind(sock,
1010  (struct sockaddr *)&rpc_inaddr_loopback,
1011  sizeof(rpc_inaddr_loopback));
1012  break;
1013  case AF_INET6:
1014  err = kernel_bind(sock,
1015  (struct sockaddr *)&rpc_in6addr_loopback,
1016  sizeof(rpc_in6addr_loopback));
1017  break;
1018  default:
1019  err = -EAFNOSUPPORT;
1020  goto out;
1021  }
1022  if (err < 0) {
1023  dprintk("RPC: can't bind UDP socket (%d)\n", err);
1024  goto out_release;
1025  }
1026 
1027  err = kernel_connect(sock, sap, salen, 0);
1028  if (err < 0) {
1029  dprintk("RPC: can't connect UDP socket (%d)\n", err);
1030  goto out_release;
1031  }
1032 
1033  err = kernel_getsockname(sock, buf, &buflen);
1034  if (err < 0) {
1035  dprintk("RPC: getsockname failed (%d)\n", err);
1036  goto out_release;
1037  }
1038 
1039  err = 0;
1040  if (buf->sa_family == AF_INET6) {
1041  struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
1042  sin6->sin6_scope_id = 0;
1043  }
1044  dprintk("RPC: %s succeeded\n", __func__);
1045 
1046 out_release:
1047  sock_release(sock);
1048 out:
1049  return err;
1050 }
1051 
1052 /*
1053  * Scraping a connected socket failed, so we don't have a useable
1054  * local address. Fallback: generate an address that will prevent
1055  * the server from calling us back.
1056  *
1057  * Returns zero and fills in "buf" if successful; otherwise, a
1058  * negative errno is returned.
1059  */
1060 static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
1061 {
1062  switch (family) {
1063  case AF_INET:
1064  if (buflen < sizeof(rpc_inaddr_loopback))
1065  return -EINVAL;
1066  memcpy(buf, &rpc_inaddr_loopback,
1067  sizeof(rpc_inaddr_loopback));
1068  break;
1069  case AF_INET6:
1070  if (buflen < sizeof(rpc_in6addr_loopback))
1071  return -EINVAL;
1072  memcpy(buf, &rpc_in6addr_loopback,
1073  sizeof(rpc_in6addr_loopback));
1074  default:
1075  dprintk("RPC: %s: address family not supported\n",
1076  __func__);
1077  return -EAFNOSUPPORT;
1078  }
1079  dprintk("RPC: %s: succeeded\n", __func__);
1080  return 0;
1081 }
1082 
1099 int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
1100 {
1101  struct sockaddr_storage address;
1102  struct sockaddr *sap = (struct sockaddr *)&address;
1103  struct rpc_xprt *xprt;
1104  struct net *net;
1105  size_t salen;
1106  int err;
1107 
1108  rcu_read_lock();
1109  xprt = rcu_dereference(clnt->cl_xprt);
1110  salen = xprt->addrlen;
1111  memcpy(sap, &xprt->addr, salen);
1112  net = get_net(xprt->xprt_net);
1113  rcu_read_unlock();
1114 
1115  rpc_set_port(sap, 0);
1116  err = rpc_sockname(net, sap, salen, buf, buflen);
1117  put_net(net);
1118  if (err != 0)
1119  /* Couldn't discover local address, return ANYADDR */
1120  return rpc_anyaddr(sap->sa_family, buf, buflen);
1121  return 0;
1122 }
1124 
1125 void
1126 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
1127 {
1128  struct rpc_xprt *xprt;
1129 
1130  rcu_read_lock();
1131  xprt = rcu_dereference(clnt->cl_xprt);
1132  if (xprt->ops->set_buffer_size)
1133  xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1134  rcu_read_unlock();
1135 }
1137 
1143 int rpc_protocol(struct rpc_clnt *clnt)
1144 {
1145  int protocol;
1146 
1147  rcu_read_lock();
1148  protocol = rcu_dereference(clnt->cl_xprt)->prot;
1149  rcu_read_unlock();
1150  return protocol;
1151 }
1153 
1159 struct net *rpc_net_ns(struct rpc_clnt *clnt)
1160 {
1161  struct net *ret;
1162 
1163  rcu_read_lock();
1164  ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
1165  rcu_read_unlock();
1166  return ret;
1167 }
1169 
1179 size_t rpc_max_payload(struct rpc_clnt *clnt)
1180 {
1181  size_t ret;
1182 
1183  rcu_read_lock();
1184  ret = rcu_dereference(clnt->cl_xprt)->max_payload;
1185  rcu_read_unlock();
1186  return ret;
1187 }
1189 
1195 void rpc_force_rebind(struct rpc_clnt *clnt)
1196 {
1197  if (clnt->cl_autobind) {
1198  rcu_read_lock();
1199  xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
1200  rcu_read_unlock();
1201  }
1202 }
1204 
1205 /*
1206  * Restart an (async) RPC call from the call_prepare state.
1207  * Usually called from within the exit handler.
1208  */
1209 int
1211 {
1212  if (RPC_ASSASSINATED(task))
1213  return 0;
1214  task->tk_action = call_start;
1215  if (task->tk_ops->rpc_call_prepare != NULL)
1216  task->tk_action = rpc_prepare_task;
1217  return 1;
1218 }
1220 
1221 /*
1222  * Restart an (async) RPC call. Usually called from within the
1223  * exit handler.
1224  */
1225 int
1227 {
1228  if (RPC_ASSASSINATED(task))
1229  return 0;
1230  task->tk_action = call_start;
1231  return 1;
1232 }
1234 
1235 #ifdef RPC_DEBUG
1236 static const char *rpc_proc_name(const struct rpc_task *task)
1237 {
1238  const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1239 
1240  if (proc) {
1241  if (proc->p_name)
1242  return proc->p_name;
1243  else
1244  return "NULL";
1245  } else
1246  return "no proc";
1247 }
1248 #endif
1249 
1250 /*
1251  * 0. Initial state
1252  *
1253  * Other FSM states can be visited zero or more times, but
1254  * this state is visited exactly once for each RPC.
1255  */
1256 static void
1257 call_start(struct rpc_task *task)
1258 {
1259  struct rpc_clnt *clnt = task->tk_client;
1260 
1261  dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
1262  clnt->cl_protname, clnt->cl_vers,
1263  rpc_proc_name(task),
1264  (RPC_IS_ASYNC(task) ? "async" : "sync"));
1265 
1266  /* Increment call count */
1267  task->tk_msg.rpc_proc->p_count++;
1268  clnt->cl_stats->rpccnt++;
1269  task->tk_action = call_reserve;
1270 }
1271 
1272 /*
1273  * 1. Reserve an RPC call slot
1274  */
1275 static void
1276 call_reserve(struct rpc_task *task)
1277 {
1278  dprint_status(task);
1279 
1280  task->tk_status = 0;
1281  task->tk_action = call_reserveresult;
1282  xprt_reserve(task);
1283 }
1284 
1285 /*
1286  * 1b. Grok the result of xprt_reserve()
1287  */
1288 static void
1289 call_reserveresult(struct rpc_task *task)
1290 {
1291  int status = task->tk_status;
1292 
1293  dprint_status(task);
1294 
1295  /*
1296  * After a call to xprt_reserve(), we must have either
1297  * a request slot or else an error status.
1298  */
1299  task->tk_status = 0;
1300  if (status >= 0) {
1301  if (task->tk_rqstp) {
1302  task->tk_action = call_refresh;
1303  return;
1304  }
1305 
1306  printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
1307  __func__, status);
1308  rpc_exit(task, -EIO);
1309  return;
1310  }
1311 
1312  /*
1313  * Even though there was an error, we may have acquired
1314  * a request slot somehow. Make sure not to leak it.
1315  */
1316  if (task->tk_rqstp) {
1317  printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
1318  __func__, status);
1319  xprt_release(task);
1320  }
1321 
1322  switch (status) {
1323  case -ENOMEM:
1324  rpc_delay(task, HZ >> 2);
1325  case -EAGAIN: /* woken up; retry */
1326  task->tk_action = call_reserve;
1327  return;
1328  case -EIO: /* probably a shutdown */
1329  break;
1330  default:
1331  printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
1332  __func__, status);
1333  break;
1334  }
1335  rpc_exit(task, status);
1336 }
1337 
1338 /*
1339  * 2. Bind and/or refresh the credentials
1340  */
1341 static void
1342 call_refresh(struct rpc_task *task)
1343 {
1344  dprint_status(task);
1345 
1346  task->tk_action = call_refreshresult;
1347  task->tk_status = 0;
1348  task->tk_client->cl_stats->rpcauthrefresh++;
1349  rpcauth_refreshcred(task);
1350 }
1351 
1352 /*
1353  * 2a. Process the results of a credential refresh
1354  */
1355 static void
1356 call_refreshresult(struct rpc_task *task)
1357 {
1358  int status = task->tk_status;
1359 
1360  dprint_status(task);
1361 
1362  task->tk_status = 0;
1363  task->tk_action = call_refresh;
1364  switch (status) {
1365  case 0:
1366  if (rpcauth_uptodatecred(task))
1367  task->tk_action = call_allocate;
1368  return;
1369  case -ETIMEDOUT:
1370  rpc_delay(task, 3*HZ);
1371  case -EAGAIN:
1372  status = -EACCES;
1373  if (!task->tk_cred_retry)
1374  break;
1375  task->tk_cred_retry--;
1376  dprintk("RPC: %5u %s: retry refresh creds\n",
1377  task->tk_pid, __func__);
1378  return;
1379  }
1380  dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1381  task->tk_pid, __func__, status);
1382  rpc_exit(task, status);
1383 }
1384 
1385 /*
1386  * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
1387  * (Note: buffer memory is freed in xprt_release).
1388  */
1389 static void
1390 call_allocate(struct rpc_task *task)
1391 {
1392  unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
1393  struct rpc_rqst *req = task->tk_rqstp;
1394  struct rpc_xprt *xprt = task->tk_xprt;
1395  struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1396 
1397  dprint_status(task);
1398 
1399  task->tk_status = 0;
1400  task->tk_action = call_bind;
1401 
1402  if (req->rq_buffer)
1403  return;
1404 
1405  if (proc->p_proc != 0) {
1406  BUG_ON(proc->p_arglen == 0);
1407  if (proc->p_decode != NULL)
1408  BUG_ON(proc->p_replen == 0);
1409  }
1410 
1411  /*
1412  * Calculate the size (in quads) of the RPC call
1413  * and reply headers, and convert both values
1414  * to byte sizes.
1415  */
1416  req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1417  req->rq_callsize <<= 2;
1418  req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1419  req->rq_rcvsize <<= 2;
1420 
1421  req->rq_buffer = xprt->ops->buf_alloc(task,
1422  req->rq_callsize + req->rq_rcvsize);
1423  if (req->rq_buffer != NULL)
1424  return;
1425 
1426  dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1427 
1428  if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1429  task->tk_action = call_allocate;
1430  rpc_delay(task, HZ>>4);
1431  return;
1432  }
1433 
1434  rpc_exit(task, -ERESTARTSYS);
1435 }
1436 
1437 static inline int
1438 rpc_task_need_encode(struct rpc_task *task)
1439 {
1440  return task->tk_rqstp->rq_snd_buf.len == 0;
1441 }
1442 
1443 static inline void
1444 rpc_task_force_reencode(struct rpc_task *task)
1445 {
1446  task->tk_rqstp->rq_snd_buf.len = 0;
1447  task->tk_rqstp->rq_bytes_sent = 0;
1448 }
1449 
1450 static inline void
1451 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1452 {
1453  buf->head[0].iov_base = start;
1454  buf->head[0].iov_len = len;
1455  buf->tail[0].iov_len = 0;
1456  buf->page_len = 0;
1457  buf->flags = 0;
1458  buf->len = 0;
1459  buf->buflen = len;
1460 }
1461 
1462 /*
1463  * 3. Encode arguments of an RPC call
1464  */
1465 static void
1466 rpc_xdr_encode(struct rpc_task *task)
1467 {
1468  struct rpc_rqst *req = task->tk_rqstp;
1469  kxdreproc_t encode;
1470  __be32 *p;
1471 
1472  dprint_status(task);
1473 
1474  rpc_xdr_buf_init(&req->rq_snd_buf,
1475  req->rq_buffer,
1476  req->rq_callsize);
1477  rpc_xdr_buf_init(&req->rq_rcv_buf,
1478  (char *)req->rq_buffer + req->rq_callsize,
1479  req->rq_rcvsize);
1480 
1481  p = rpc_encode_header(task);
1482  if (p == NULL) {
1483  printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1484  rpc_exit(task, -EIO);
1485  return;
1486  }
1487 
1488  encode = task->tk_msg.rpc_proc->p_encode;
1489  if (encode == NULL)
1490  return;
1491 
1492  task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1493  task->tk_msg.rpc_argp);
1494 }
1495 
1496 /*
1497  * 4. Get the server port number if not yet set
1498  */
1499 static void
1500 call_bind(struct rpc_task *task)
1501 {
1502  struct rpc_xprt *xprt = task->tk_xprt;
1503 
1504  dprint_status(task);
1505 
1506  task->tk_action = call_connect;
1507  if (!xprt_bound(xprt)) {
1508  task->tk_action = call_bind_status;
1509  task->tk_timeout = xprt->bind_timeout;
1510  xprt->ops->rpcbind(task);
1511  }
1512 }
1513 
1514 /*
1515  * 4a. Sort out bind result
1516  */
1517 static void
1518 call_bind_status(struct rpc_task *task)
1519 {
1520  int status = -EIO;
1521 
1522  if (task->tk_status >= 0) {
1523  dprint_status(task);
1524  task->tk_status = 0;
1525  task->tk_action = call_connect;
1526  return;
1527  }
1528 
1529  trace_rpc_bind_status(task);
1530  switch (task->tk_status) {
1531  case -ENOMEM:
1532  dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1533  rpc_delay(task, HZ >> 2);
1534  goto retry_timeout;
1535  case -EACCES:
1536  dprintk("RPC: %5u remote rpcbind: RPC program/version "
1537  "unavailable\n", task->tk_pid);
1538  /* fail immediately if this is an RPC ping */
1539  if (task->tk_msg.rpc_proc->p_proc == 0) {
1540  status = -EOPNOTSUPP;
1541  break;
1542  }
1543  if (task->tk_rebind_retry == 0)
1544  break;
1545  task->tk_rebind_retry--;
1546  rpc_delay(task, 3*HZ);
1547  goto retry_timeout;
1548  case -ETIMEDOUT:
1549  dprintk("RPC: %5u rpcbind request timed out\n",
1550  task->tk_pid);
1551  goto retry_timeout;
1552  case -EPFNOSUPPORT:
1553  /* server doesn't support any rpcbind version we know of */
1554  dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1555  task->tk_pid);
1556  break;
1557  case -EPROTONOSUPPORT:
1558  dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1559  task->tk_pid);
1560  task->tk_status = 0;
1561  task->tk_action = call_bind;
1562  return;
1563  case -ECONNREFUSED: /* connection problems */
1564  case -ECONNRESET:
1565  case -ENOTCONN:
1566  case -EHOSTDOWN:
1567  case -EHOSTUNREACH:
1568  case -ENETUNREACH:
1569  case -EPIPE:
1570  dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1571  task->tk_pid, task->tk_status);
1572  if (!RPC_IS_SOFTCONN(task)) {
1573  rpc_delay(task, 5*HZ);
1574  goto retry_timeout;
1575  }
1576  status = task->tk_status;
1577  break;
1578  default:
1579  dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1580  task->tk_pid, -task->tk_status);
1581  }
1582 
1583  rpc_exit(task, status);
1584  return;
1585 
1586 retry_timeout:
1587  task->tk_action = call_timeout;
1588 }
1589 
1590 /*
1591  * 4b. Connect to the RPC server
1592  */
1593 static void
1594 call_connect(struct rpc_task *task)
1595 {
1596  struct rpc_xprt *xprt = task->tk_xprt;
1597 
1598  dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1599  task->tk_pid, xprt,
1600  (xprt_connected(xprt) ? "is" : "is not"));
1601 
1602  task->tk_action = call_transmit;
1603  if (!xprt_connected(xprt)) {
1604  task->tk_action = call_connect_status;
1605  if (task->tk_status < 0)
1606  return;
1607  xprt_connect(task);
1608  }
1609 }
1610 
1611 /*
1612  * 4c. Sort out connect result
1613  */
1614 static void
1615 call_connect_status(struct rpc_task *task)
1616 {
1617  struct rpc_clnt *clnt = task->tk_client;
1618  int status = task->tk_status;
1619 
1620  dprint_status(task);
1621 
1622  task->tk_status = 0;
1623  if (status >= 0 || status == -EAGAIN) {
1624  clnt->cl_stats->netreconn++;
1625  task->tk_action = call_transmit;
1626  return;
1627  }
1628 
1629  trace_rpc_connect_status(task, status);
1630  switch (status) {
1631  /* if soft mounted, test if we've timed out */
1632  case -ETIMEDOUT:
1633  task->tk_action = call_timeout;
1634  break;
1635  default:
1636  rpc_exit(task, -EIO);
1637  }
1638 }
1639 
1640 /*
1641  * 5. Transmit the RPC request, and wait for reply
1642  */
1643 static void
1644 call_transmit(struct rpc_task *task)
1645 {
1646  dprint_status(task);
1647 
1648  task->tk_action = call_status;
1649  if (task->tk_status < 0)
1650  return;
1651  task->tk_status = xprt_prepare_transmit(task);
1652  if (task->tk_status != 0)
1653  return;
1654  task->tk_action = call_transmit_status;
1655  /* Encode here so that rpcsec_gss can use correct sequence number. */
1656  if (rpc_task_need_encode(task)) {
1657  BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1658  rpc_xdr_encode(task);
1659  /* Did the encode result in an error condition? */
1660  if (task->tk_status != 0) {
1661  /* Was the error nonfatal? */
1662  if (task->tk_status == -EAGAIN)
1663  rpc_delay(task, HZ >> 4);
1664  else
1665  rpc_exit(task, task->tk_status);
1666  return;
1667  }
1668  }
1669  xprt_transmit(task);
1670  if (task->tk_status < 0)
1671  return;
1672  /*
1673  * On success, ensure that we call xprt_end_transmit() before sleeping
1674  * in order to allow access to the socket to other RPC requests.
1675  */
1676  call_transmit_status(task);
1677  if (rpc_reply_expected(task))
1678  return;
1679  task->tk_action = rpc_exit_task;
1680  rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1681 }
1682 
1683 /*
1684  * 5a. Handle cleanup after a transmission
1685  */
1686 static void
1687 call_transmit_status(struct rpc_task *task)
1688 {
1689  task->tk_action = call_status;
1690 
1691  /*
1692  * Common case: success. Force the compiler to put this
1693  * test first.
1694  */
1695  if (task->tk_status == 0) {
1696  xprt_end_transmit(task);
1697  rpc_task_force_reencode(task);
1698  return;
1699  }
1700 
1701  switch (task->tk_status) {
1702  case -EAGAIN:
1703  break;
1704  default:
1705  dprint_status(task);
1706  xprt_end_transmit(task);
1707  rpc_task_force_reencode(task);
1708  break;
1709  /*
1710  * Special cases: if we've been waiting on the
1711  * socket's write_space() callback, or if the
1712  * socket just returned a connection error,
1713  * then hold onto the transport lock.
1714  */
1715  case -ECONNREFUSED:
1716  case -EHOSTDOWN:
1717  case -EHOSTUNREACH:
1718  case -ENETUNREACH:
1719  if (RPC_IS_SOFTCONN(task)) {
1720  xprt_end_transmit(task);
1721  rpc_exit(task, task->tk_status);
1722  break;
1723  }
1724  case -ECONNRESET:
1725  case -ENOTCONN:
1726  case -EPIPE:
1727  rpc_task_force_reencode(task);
1728  }
1729 }
1730 
1731 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1732 /*
1733  * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1734  * addition, disconnect on connectivity errors.
1735  */
1736 static void
1737 call_bc_transmit(struct rpc_task *task)
1738 {
1739  struct rpc_rqst *req = task->tk_rqstp;
1740 
1741  BUG_ON(task->tk_status != 0);
1742  task->tk_status = xprt_prepare_transmit(task);
1743  if (task->tk_status == -EAGAIN) {
1744  /*
1745  * Could not reserve the transport. Try again after the
1746  * transport is released.
1747  */
1748  task->tk_status = 0;
1749  task->tk_action = call_bc_transmit;
1750  return;
1751  }
1752 
1753  task->tk_action = rpc_exit_task;
1754  if (task->tk_status < 0) {
1755  printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1756  "error: %d\n", task->tk_status);
1757  return;
1758  }
1759 
1760  xprt_transmit(task);
1761  xprt_end_transmit(task);
1762  dprint_status(task);
1763  switch (task->tk_status) {
1764  case 0:
1765  /* Success */
1766  break;
1767  case -EHOSTDOWN:
1768  case -EHOSTUNREACH:
1769  case -ENETUNREACH:
1770  case -ETIMEDOUT:
1771  /*
1772  * Problem reaching the server. Disconnect and let the
1773  * forechannel reestablish the connection. The server will
1774  * have to retransmit the backchannel request and we'll
1775  * reprocess it. Since these ops are idempotent, there's no
1776  * need to cache our reply at this time.
1777  */
1778  printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1779  "error: %d\n", task->tk_status);
1780  xprt_conditional_disconnect(task->tk_xprt,
1781  req->rq_connect_cookie);
1782  break;
1783  default:
1784  /*
1785  * We were unable to reply and will have to drop the
1786  * request. The server should reconnect and retransmit.
1787  */
1788  BUG_ON(task->tk_status == -EAGAIN);
1789  printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1790  "error: %d\n", task->tk_status);
1791  break;
1792  }
1793  rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1794 }
1795 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1796 
1797 /*
1798  * 6. Sort out the RPC call status
1799  */
1800 static void
1801 call_status(struct rpc_task *task)
1802 {
1803  struct rpc_clnt *clnt = task->tk_client;
1804  struct rpc_rqst *req = task->tk_rqstp;
1805  int status;
1806 
1807  if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1808  task->tk_status = req->rq_reply_bytes_recvd;
1809 
1810  dprint_status(task);
1811 
1812  status = task->tk_status;
1813  if (status >= 0) {
1814  task->tk_action = call_decode;
1815  return;
1816  }
1817 
1818  trace_rpc_call_status(task);
1819  task->tk_status = 0;
1820  switch(status) {
1821  case -EHOSTDOWN:
1822  case -EHOSTUNREACH:
1823  case -ENETUNREACH:
1824  /*
1825  * Delay any retries for 3 seconds, then handle as if it
1826  * were a timeout.
1827  */
1828  rpc_delay(task, 3*HZ);
1829  case -ETIMEDOUT:
1830  task->tk_action = call_timeout;
1831  if (task->tk_client->cl_discrtry)
1832  xprt_conditional_disconnect(task->tk_xprt,
1833  req->rq_connect_cookie);
1834  break;
1835  case -ECONNRESET:
1836  case -ECONNREFUSED:
1837  rpc_force_rebind(clnt);
1838  rpc_delay(task, 3*HZ);
1839  case -EPIPE:
1840  case -ENOTCONN:
1841  task->tk_action = call_bind;
1842  break;
1843  case -EAGAIN:
1844  task->tk_action = call_transmit;
1845  break;
1846  case -EIO:
1847  /* shutdown or soft timeout */
1848  rpc_exit(task, status);
1849  break;
1850  default:
1851  if (clnt->cl_chatty)
1852  printk("%s: RPC call returned error %d\n",
1853  clnt->cl_protname, -status);
1854  rpc_exit(task, status);
1855  }
1856 }
1857 
1858 /*
1859  * 6a. Handle RPC timeout
1860  * We do not release the request slot, so we keep using the
1861  * same XID for all retransmits.
1862  */
1863 static void
1864 call_timeout(struct rpc_task *task)
1865 {
1866  struct rpc_clnt *clnt = task->tk_client;
1867 
1868  if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1869  dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1870  goto retry;
1871  }
1872 
1873  dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1874  task->tk_timeouts++;
1875 
1876  if (RPC_IS_SOFTCONN(task)) {
1877  rpc_exit(task, -ETIMEDOUT);
1878  return;
1879  }
1880  if (RPC_IS_SOFT(task)) {
1881  if (clnt->cl_chatty) {
1882  rcu_read_lock();
1883  printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1884  clnt->cl_protname,
1885  rcu_dereference(clnt->cl_xprt)->servername);
1886  rcu_read_unlock();
1887  }
1888  if (task->tk_flags & RPC_TASK_TIMEOUT)
1889  rpc_exit(task, -ETIMEDOUT);
1890  else
1891  rpc_exit(task, -EIO);
1892  return;
1893  }
1894 
1895  if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1896  task->tk_flags |= RPC_CALL_MAJORSEEN;
1897  if (clnt->cl_chatty) {
1898  rcu_read_lock();
1899  printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1900  clnt->cl_protname,
1901  rcu_dereference(clnt->cl_xprt)->servername);
1902  rcu_read_unlock();
1903  }
1904  }
1905  rpc_force_rebind(clnt);
1906  /*
1907  * Did our request time out due to an RPCSEC_GSS out-of-sequence
1908  * event? RFC2203 requires the server to drop all such requests.
1909  */
1910  rpcauth_invalcred(task);
1911 
1912 retry:
1913  clnt->cl_stats->rpcretrans++;
1914  task->tk_action = call_bind;
1915  task->tk_status = 0;
1916 }
1917 
1918 /*
1919  * 7. Decode the RPC reply
1920  */
1921 static void
1922 call_decode(struct rpc_task *task)
1923 {
1924  struct rpc_clnt *clnt = task->tk_client;
1925  struct rpc_rqst *req = task->tk_rqstp;
1926  kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
1927  __be32 *p;
1928 
1929  dprint_status(task);
1930 
1931  if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1932  if (clnt->cl_chatty) {
1933  rcu_read_lock();
1934  printk(KERN_NOTICE "%s: server %s OK\n",
1935  clnt->cl_protname,
1936  rcu_dereference(clnt->cl_xprt)->servername);
1937  rcu_read_unlock();
1938  }
1939  task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1940  }
1941 
1942  /*
1943  * Ensure that we see all writes made by xprt_complete_rqst()
1944  * before it changed req->rq_reply_bytes_recvd.
1945  */
1946  smp_rmb();
1947  req->rq_rcv_buf.len = req->rq_private_buf.len;
1948 
1949  /* Check that the softirq receive buffer is valid */
1950  WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1951  sizeof(req->rq_rcv_buf)) != 0);
1952 
1953  if (req->rq_rcv_buf.len < 12) {
1954  if (!RPC_IS_SOFT(task)) {
1955  task->tk_action = call_bind;
1956  clnt->cl_stats->rpcretrans++;
1957  goto out_retry;
1958  }
1959  dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1960  clnt->cl_protname, task->tk_status);
1961  task->tk_action = call_timeout;
1962  goto out_retry;
1963  }
1964 
1965  p = rpc_verify_header(task);
1966  if (IS_ERR(p)) {
1967  if (p == ERR_PTR(-EAGAIN))
1968  goto out_retry;
1969  return;
1970  }
1971 
1972  task->tk_action = rpc_exit_task;
1973 
1974  if (decode) {
1975  task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1976  task->tk_msg.rpc_resp);
1977  }
1978  dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1979  task->tk_status);
1980  return;
1981 out_retry:
1982  task->tk_status = 0;
1983  /* Note: rpc_verify_header() may have freed the RPC slot */
1984  if (task->tk_rqstp == req) {
1985  req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1986  if (task->tk_client->cl_discrtry)
1987  xprt_conditional_disconnect(task->tk_xprt,
1988  req->rq_connect_cookie);
1989  }
1990 }
1991 
1992 static __be32 *
1993 rpc_encode_header(struct rpc_task *task)
1994 {
1995  struct rpc_clnt *clnt = task->tk_client;
1996  struct rpc_rqst *req = task->tk_rqstp;
1997  __be32 *p = req->rq_svec[0].iov_base;
1998 
1999  /* FIXME: check buffer size? */
2000 
2001  p = xprt_skip_transport_header(task->tk_xprt, p);
2002  *p++ = req->rq_xid; /* XID */
2003  *p++ = htonl(RPC_CALL); /* CALL */
2004  *p++ = htonl(RPC_VERSION); /* RPC version */
2005  *p++ = htonl(clnt->cl_prog); /* program number */
2006  *p++ = htonl(clnt->cl_vers); /* program version */
2007  *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
2008  p = rpcauth_marshcred(task, p);
2009  req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2010  return p;
2011 }
2012 
2013 static __be32 *
2014 rpc_verify_header(struct rpc_task *task)
2015 {
2016  struct rpc_clnt *clnt = task->tk_client;
2017  struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
2018  int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
2019  __be32 *p = iov->iov_base;
2020  u32 n;
2021  int error = -EACCES;
2022 
2023  if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
2024  /* RFC-1014 says that the representation of XDR data must be a
2025  * multiple of four bytes
2026  * - if it isn't pointer subtraction in the NFS client may give
2027  * undefined results
2028  */
2029  dprintk("RPC: %5u %s: XDR representation not a multiple of"
2030  " 4 bytes: 0x%x\n", task->tk_pid, __func__,
2031  task->tk_rqstp->rq_rcv_buf.len);
2032  goto out_eio;
2033  }
2034  if ((len -= 3) < 0)
2035  goto out_overflow;
2036 
2037  p += 1; /* skip XID */
2038  if ((n = ntohl(*p++)) != RPC_REPLY) {
2039  dprintk("RPC: %5u %s: not an RPC reply: %x\n",
2040  task->tk_pid, __func__, n);
2041  goto out_garbage;
2042  }
2043 
2044  if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
2045  if (--len < 0)
2046  goto out_overflow;
2047  switch ((n = ntohl(*p++))) {
2048  case RPC_AUTH_ERROR:
2049  break;
2050  case RPC_MISMATCH:
2051  dprintk("RPC: %5u %s: RPC call version mismatch!\n",
2052  task->tk_pid, __func__);
2053  error = -EPROTONOSUPPORT;
2054  goto out_err;
2055  default:
2056  dprintk("RPC: %5u %s: RPC call rejected, "
2057  "unknown error: %x\n",
2058  task->tk_pid, __func__, n);
2059  goto out_eio;
2060  }
2061  if (--len < 0)
2062  goto out_overflow;
2063  switch ((n = ntohl(*p++))) {
2064  case RPC_AUTH_REJECTEDCRED:
2065  case RPC_AUTH_REJECTEDVERF:
2066  case RPCSEC_GSS_CREDPROBLEM:
2067  case RPCSEC_GSS_CTXPROBLEM:
2068  if (!task->tk_cred_retry)
2069  break;
2070  task->tk_cred_retry--;
2071  dprintk("RPC: %5u %s: retry stale creds\n",
2072  task->tk_pid, __func__);
2073  rpcauth_invalcred(task);
2074  /* Ensure we obtain a new XID! */
2075  xprt_release(task);
2076  task->tk_action = call_reserve;
2077  goto out_retry;
2078  case RPC_AUTH_BADCRED:
2079  case RPC_AUTH_BADVERF:
2080  /* possibly garbled cred/verf? */
2081  if (!task->tk_garb_retry)
2082  break;
2083  task->tk_garb_retry--;
2084  dprintk("RPC: %5u %s: retry garbled creds\n",
2085  task->tk_pid, __func__);
2086  task->tk_action = call_bind;
2087  goto out_retry;
2088  case RPC_AUTH_TOOWEAK:
2089  rcu_read_lock();
2090  printk(KERN_NOTICE "RPC: server %s requires stronger "
2091  "authentication.\n",
2092  rcu_dereference(clnt->cl_xprt)->servername);
2093  rcu_read_unlock();
2094  break;
2095  default:
2096  dprintk("RPC: %5u %s: unknown auth error: %x\n",
2097  task->tk_pid, __func__, n);
2098  error = -EIO;
2099  }
2100  dprintk("RPC: %5u %s: call rejected %d\n",
2101  task->tk_pid, __func__, n);
2102  goto out_err;
2103  }
2104  if (!(p = rpcauth_checkverf(task, p))) {
2105  dprintk("RPC: %5u %s: auth check failed\n",
2106  task->tk_pid, __func__);
2107  goto out_garbage; /* bad verifier, retry */
2108  }
2109  len = p - (__be32 *)iov->iov_base - 1;
2110  if (len < 0)
2111  goto out_overflow;
2112  switch ((n = ntohl(*p++))) {
2113  case RPC_SUCCESS:
2114  return p;
2115  case RPC_PROG_UNAVAIL:
2116  dprintk_rcu("RPC: %5u %s: program %u is unsupported "
2117  "by server %s\n", task->tk_pid, __func__,
2118  (unsigned int)clnt->cl_prog,
2119  rcu_dereference(clnt->cl_xprt)->servername);
2120  error = -EPFNOSUPPORT;
2121  goto out_err;
2122  case RPC_PROG_MISMATCH:
2123  dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported "
2124  "by server %s\n", task->tk_pid, __func__,
2125  (unsigned int)clnt->cl_prog,
2126  (unsigned int)clnt->cl_vers,
2127  rcu_dereference(clnt->cl_xprt)->servername);
2128  error = -EPROTONOSUPPORT;
2129  goto out_err;
2130  case RPC_PROC_UNAVAIL:
2131  dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, "
2132  "version %u on server %s\n",
2133  task->tk_pid, __func__,
2134  rpc_proc_name(task),
2135  clnt->cl_prog, clnt->cl_vers,
2136  rcu_dereference(clnt->cl_xprt)->servername);
2137  error = -EOPNOTSUPP;
2138  goto out_err;
2139  case RPC_GARBAGE_ARGS:
2140  dprintk("RPC: %5u %s: server saw garbage\n",
2141  task->tk_pid, __func__);
2142  break; /* retry */
2143  default:
2144  dprintk("RPC: %5u %s: server accept status: %x\n",
2145  task->tk_pid, __func__, n);
2146  /* Also retry */
2147  }
2148 
2149 out_garbage:
2150  clnt->cl_stats->rpcgarbage++;
2151  if (task->tk_garb_retry) {
2152  task->tk_garb_retry--;
2153  dprintk("RPC: %5u %s: retrying\n",
2154  task->tk_pid, __func__);
2155  task->tk_action = call_bind;
2156 out_retry:
2157  return ERR_PTR(-EAGAIN);
2158  }
2159 out_eio:
2160  error = -EIO;
2161 out_err:
2162  rpc_exit(task, error);
2163  dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
2164  __func__, error);
2165  return ERR_PTR(error);
2166 out_overflow:
2167  dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
2168  __func__);
2169  goto out_garbage;
2170 }
2171 
2172 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2173 {
2174 }
2175 
2176 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
2177 {
2178  return 0;
2179 }
2180 
2181 static struct rpc_procinfo rpcproc_null = {
2182  .p_encode = rpcproc_encode_null,
2183  .p_decode = rpcproc_decode_null,
2184 };
2185 
2186 static int rpc_ping(struct rpc_clnt *clnt)
2187 {
2188  struct rpc_message msg = {
2189  .rpc_proc = &rpcproc_null,
2190  };
2191  int err;
2192  msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
2193  err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
2194  put_rpccred(msg.rpc_cred);
2195  return err;
2196 }
2197 
2198 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
2199 {
2200  struct rpc_message msg = {
2201  .rpc_proc = &rpcproc_null,
2202  .rpc_cred = cred,
2203  };
2204  struct rpc_task_setup task_setup_data = {
2205  .rpc_client = clnt,
2206  .rpc_message = &msg,
2207  .callback_ops = &rpc_default_ops,
2208  .flags = flags,
2209  };
2210  return rpc_run_task(&task_setup_data);
2211 }
2213 
2214 #ifdef RPC_DEBUG
2215 static void rpc_show_header(void)
2216 {
2217  printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
2218  "-timeout ---ops--\n");
2219 }
2220 
2221 static void rpc_show_task(const struct rpc_clnt *clnt,
2222  const struct rpc_task *task)
2223 {
2224  const char *rpc_waitq = "none";
2225 
2226  if (RPC_IS_QUEUED(task))
2227  rpc_waitq = rpc_qname(task->tk_waitqueue);
2228 
2229  printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
2230  task->tk_pid, task->tk_flags, task->tk_status,
2231  clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
2232  clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
2233  task->tk_action, rpc_waitq);
2234 }
2235 
2236 void rpc_show_tasks(struct net *net)
2237 {
2238  struct rpc_clnt *clnt;
2239  struct rpc_task *task;
2240  int header = 0;
2241  struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2242 
2243  spin_lock(&sn->rpc_client_lock);
2244  list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
2245  spin_lock(&clnt->cl_lock);
2246  list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
2247  if (!header) {
2248  rpc_show_header();
2249  header++;
2250  }
2251  rpc_show_task(clnt, task);
2252  }
2253  spin_unlock(&clnt->cl_lock);
2254  }
2255  spin_unlock(&sn->rpc_client_lock);
2256 }
2257 #endif