Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
linux
sunrpc
svc.h
Go to the documentation of this file.
1
/*
2
* linux/include/linux/sunrpc/svc.h
3
*
4
* RPC server declarations.
5
*
6
* Copyright (C) 1995, 1996 Olaf Kirch <
[email protected]
>
7
*/
8
9
10
#ifndef SUNRPC_SVC_H
11
#define SUNRPC_SVC_H
12
13
#include <linux/in.h>
14
#include <linux/in6.h>
15
#include <
linux/sunrpc/types.h
>
16
#include <
linux/sunrpc/xdr.h
>
17
#include <
linux/sunrpc/auth.h
>
18
#include <
linux/sunrpc/svcauth.h
>
19
#include <linux/wait.h>
20
#include <
linux/mm.h
>
21
22
/*
23
* This is the RPC server thread function prototype
24
*/
25
typedef
int
(*
svc_thread_fn
)(
void
*);
26
27
/* statistics for svc_pool structures */
28
struct
svc_pool_stats
{
29
unsigned
long
packets
;
30
unsigned
long
sockets_queued
;
31
unsigned
long
threads_woken
;
32
unsigned
long
threads_timedout
;
33
};
34
35
/*
36
*
37
* RPC service thread pool.
38
*
39
* Pool of threads and temporary sockets. Generally there is only
40
* a single one of these per RPC service, but on NUMA machines those
41
* services that can benefit from it (i.e. nfs but not lockd) will
42
* have one pool per NUMA node. This optimisation reduces cross-
43
* node traffic on multi-node NUMA NFS servers.
44
*/
45
struct
svc_pool
{
46
unsigned
int
sp_id
;
/* pool id; also node id on NUMA */
47
spinlock_t
sp_lock
;
/* protects all fields */
48
struct
list_head
sp_threads
;
/* idle server threads */
49
struct
list_head
sp_sockets
;
/* pending sockets */
50
unsigned
int
sp_nrthreads
;
/* # of threads in pool */
51
struct
list_head
sp_all_threads
;
/* all server threads */
52
struct
svc_pool_stats
sp_stats
;
/* statistics on pool operation */
53
}
____cacheline_aligned_in_smp
;
54
55
/*
56
* RPC service.
57
*
58
* An RPC service is a ``daemon,'' possibly multithreaded, which
59
* receives and processes incoming RPC messages.
60
* It has one or more transport sockets associated with it, and maintains
61
* a list of idle threads waiting for input.
62
*
63
* We currently do not support more than one RPC program per daemon.
64
*/
65
struct
svc_serv
{
66
struct
svc_program
*
sv_program
;
/* RPC program */
67
struct
svc_stat
*
sv_stats
;
/* RPC statistics */
68
spinlock_t
sv_lock
;
69
unsigned
int
sv_nrthreads
;
/* # of server threads */
70
unsigned
int
sv_maxconn
;
/* max connections allowed or
71
* '0' causing max to be based
72
* on number of threads. */
73
74
unsigned
int
sv_max_payload
;
/* datagram payload size */
75
unsigned
int
sv_max_mesg
;
/* max_payload + 1 page for overheads */
76
unsigned
int
sv_xdrsize
;
/* XDR buffer size */
77
struct
list_head
sv_permsocks
;
/* all permanent sockets */
78
struct
list_head
sv_tempsocks
;
/* all temporary sockets */
79
int
sv_tmpcnt
;
/* count of temporary sockets */
80
struct
timer_list
sv_temptimer
;
/* timer for aging temporary sockets */
81
82
char
*
sv_name
;
/* service name */
83
84
unsigned
int
sv_nrpools
;
/* number of thread pools */
85
struct
svc_pool
*
sv_pools
;
/* array of thread pools */
86
87
void
(*
sv_shutdown
)(
struct
svc_serv
*serv,
88
struct
net
*
net
);
89
/* Callback to use when last thread
90
* exits.
91
*/
92
93
struct
module
*
sv_module
;
/* optional module to count when
94
* adding threads */
95
svc_thread_fn
sv_function
;
/* main function for threads */
96
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
97
struct
list_head
sv_cb_list;
/* queue for callback requests
98
* that arrive over the same
99
* connection */
100
spinlock_t
sv_cb_lock;
/* protects the svc_cb_list */
101
wait_queue_head_t
sv_cb_waitq;
/* sleep here if there are no
102
* entries in the svc_cb_list */
103
struct
svc_xprt
*sv_bc_xprt;
/* callback on fore channel */
104
#endif
/* CONFIG_SUNRPC_BACKCHANNEL */
105
};
106
107
/*
108
* We use sv_nrthreads as a reference count. svc_destroy() drops
109
* this refcount, so we need to bump it up around operations that
110
* change the number of threads. Horrible, but there it is.
111
* Should be called with the BKL held.
112
*/
113
static
inline
void
svc_get(
struct
svc_serv
*serv)
114
{
115
serv->
sv_nrthreads
++;
116
}
117
118
/*
119
* Maximum payload size supported by a kernel RPC server.
120
* This is use to determine the max number of pages nfsd is
121
* willing to return in a single READ operation.
122
*
123
* These happen to all be powers of 2, which is not strictly
124
* necessary but helps enforce the real limitation, which is
125
* that they should be multiples of PAGE_CACHE_SIZE.
126
*
127
* For UDP transports, a block plus NFS,RPC, and UDP headers
128
* has to fit into the IP datagram limit of 64K. The largest
129
* feasible number for all known page sizes is probably 48K,
130
* but we choose 32K here. This is the same as the historical
131
* Linux limit; someone who cares more about NFS/UDP performance
132
* can test a larger number.
133
*
134
* For TCP transports we have more freedom. A size of 1MB is
135
* chosen to match the client limit. Other OSes are known to
136
* have larger limits, but those numbers are probably beyond
137
* the point of diminishing returns.
138
*/
139
#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
140
#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
141
#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
142
143
extern
u32
svc_max_payload
(
const
struct
svc_rqst
*rqstp);
144
145
/*
146
* RPC Requsts and replies are stored in one or more pages.
147
* We maintain an array of pages for each server thread.
148
* Requests are copied into these pages as they arrive. Remaining
149
* pages are available to write the reply into.
150
*
151
* Pages are sent using ->sendpage so each server thread needs to
152
* allocate more to replace those used in sending. To help keep track
153
* of these pages we have a receive list where all pages initialy live,
154
* and a send list where pages are moved to when there are to be part
155
* of a reply.
156
*
157
* We use xdr_buf for holding responses as it fits well with NFS
158
* read responses (that have a header, and some data pages, and possibly
159
* a tail) and means we can share some client side routines.
160
*
161
* The xdr_buf.head kvec always points to the first page in the rq_*pages
162
* list. The xdr_buf.pages pointer points to the second page on that
163
* list. xdr_buf.tail points to the end of the first page.
164
* This assumes that the non-page part of an rpc reply will fit
165
* in a page - NFSd ensures this. lockd also has no trouble.
166
*
167
* Each request/reply pair can have at most one "payload", plus two pages,
168
* one for the request, and one for the reply.
169
* We using ->sendfile to return read data, we might need one extra page
170
* if the request is not page-aligned. So add another '1'.
171
*/
172
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
173
+ 2 + 1)
174
175
static
inline
u32
svc_getnl(
struct
kvec
*iov)
176
{
177
__be32
val
, *vp;
178
vp = iov->
iov_base
;
179
val = *vp++;
180
iov->
iov_base
= (
void
*)vp;
181
iov->
iov_len
-=
sizeof
(
__be32
);
182
return
ntohl
(val);
183
}
184
185
static
inline
void
svc_putnl(
struct
kvec
*iov,
u32
val
)
186
{
187
__be32
*vp = iov->
iov_base
+ iov->
iov_len
;
188
*vp =
htonl
(val);
189
iov->
iov_len
+=
sizeof
(
__be32
);
190
}
191
192
static
inline
__be32
svc_getu32(
struct
kvec
*iov)
193
{
194
__be32
val
, *vp;
195
vp = iov->
iov_base
;
196
val = *vp++;
197
iov->
iov_base
= (
void
*)vp;
198
iov->
iov_len
-=
sizeof
(
__be32
);
199
return
val
;
200
}
201
202
static
inline
void
svc_ungetu32(
struct
kvec
*iov)
203
{
204
__be32
*vp = (
__be32
*)iov->
iov_base
;
205
iov->
iov_base
= (
void
*)(vp - 1);
206
iov->
iov_len
+=
sizeof
(*vp);
207
}
208
209
static
inline
void
svc_putu32(
struct
kvec
*iov,
__be32
val
)
210
{
211
__be32
*vp = iov->
iov_base
+ iov->
iov_len
;
212
*vp =
val
;
213
iov->
iov_len
+=
sizeof
(
__be32
);
214
}
215
216
/*
217
* The context of a single thread, including the request currently being
218
* processed.
219
*/
220
struct
svc_rqst
{
221
struct
list_head
rq_list
;
/* idle list */
222
struct
list_head
rq_all
;
/* all threads list */
223
struct
svc_xprt
*
rq_xprt
;
/* transport ptr */
224
225
struct
sockaddr_storage
rq_addr
;
/* peer address */
226
size_t
rq_addrlen
;
227
struct
sockaddr_storage
rq_daddr
;
/* dest addr of request
228
* - reply from here */
229
size_t
rq_daddrlen
;
230
231
struct
svc_serv
*
rq_server
;
/* RPC service definition */
232
struct
svc_pool
*
rq_pool
;
/* thread pool */
233
struct
svc_procedure
*
rq_procinfo
;
/* procedure info */
234
struct
auth_ops *
rq_authop
;
/* authentication flavour */
235
struct
svc_cred
rq_cred
;
/* auth info */
236
void
*
rq_xprt_ctxt
;
/* transport specific context ptr */
237
struct
svc_deferred_req
*
rq_deferred
;
/* deferred request we are replaying */
238
int
rq_usedeferral
;
/* use deferral */
239
240
size_t
rq_xprt_hlen
;
/* xprt header len */
241
struct
xdr_buf
rq_arg
;
242
struct
xdr_buf
rq_res
;
243
struct
page
*
rq_pages
[
RPCSVC_MAXPAGES
];
244
struct
page
* *
rq_respages
;
/* points into rq_pages */
245
int
rq_resused
;
/* number of pages used for result */
246
247
struct
kvec
rq_vec
[
RPCSVC_MAXPAGES
];
/* generally useful.. */
248
249
__be32
rq_xid
;
/* transmission id */
250
u32
rq_prog
;
/* program number */
251
u32
rq_vers
;
/* program version */
252
u32
rq_proc
;
/* procedure number */
253
u32
rq_prot
;
/* IP protocol */
254
unsigned
short
255
rq_secure
: 1;
/* secure port */
256
257
void
*
rq_argp
;
/* decoded arguments */
258
void
*
rq_resp
;
/* xdr'd results */
259
void
*
rq_auth_data
;
/* flavor-specific data */
260
261
int
rq_reserved
;
/* space on socket outq
262
* reserved for this request
263
*/
264
265
struct
cache_req
rq_chandle
;
/* handle passed to caches for
266
* request delaying
267
*/
268
bool
rq_dropme
;
269
/* Catering to nfsd */
270
struct
auth_domain *
rq_client
;
/* RPC peer info */
271
struct
auth_domain *
rq_gssclient
;
/* "gss/"-style peer info */
272
int
rq_cachetype
;
273
struct
svc_cacherep
*
rq_cacherep
;
/* cache info */
274
int
rq_splice_ok
;
/* turned off in gss privacy
275
* to prevent encrypting page
276
* cache pages */
277
wait_queue_head_t
rq_wait
;
/* synchronization */
278
struct
task_struct
*
rq_task
;
/* service thread */
279
};
280
281
#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
282
283
/*
284
* Rigorous type checking on sockaddr type conversions
285
*/
286
static
inline
struct
sockaddr_in
*svc_addr_in(
const
struct
svc_rqst
*rqst)
287
{
288
return
(
struct
sockaddr_in
*) &rqst->
rq_addr
;
289
}
290
291
static
inline
struct
sockaddr_in6
*svc_addr_in6(
const
struct
svc_rqst
*rqst)
292
{
293
return
(
struct
sockaddr_in6
*) &rqst->
rq_addr
;
294
}
295
296
static
inline
struct
sockaddr
*svc_addr(
const
struct
svc_rqst
*rqst)
297
{
298
return
(
struct
sockaddr
*) &rqst->
rq_addr
;
299
}
300
301
static
inline
struct
sockaddr_in
*svc_daddr_in(
const
struct
svc_rqst
*rqst)
302
{
303
return
(
struct
sockaddr_in
*) &rqst->
rq_daddr
;
304
}
305
306
static
inline
struct
sockaddr_in6
*svc_daddr_in6(
const
struct
svc_rqst
*rqst)
307
{
308
return
(
struct
sockaddr_in6
*) &rqst->
rq_daddr
;
309
}
310
311
static
inline
struct
sockaddr
*svc_daddr(
const
struct
svc_rqst
*rqst)
312
{
313
return
(
struct
sockaddr
*) &rqst->
rq_daddr
;
314
}
315
316
/*
317
* Check buffer bounds after decoding arguments
318
*/
319
static
inline
int
320
xdr_argsize_check(
struct
svc_rqst
*rqstp,
__be32
*
p
)
321
{
322
char
*
cp
= (
char
*)p;
323
struct
kvec
*vec = &rqstp->
rq_arg
.head[0];
324
return
cp >= (
char
*)vec->
iov_base
325
&& cp <= (
char
*)vec->
iov_base
+ vec->
iov_len
;
326
}
327
328
static
inline
int
329
xdr_ressize_check(
struct
svc_rqst
*rqstp,
__be32
*
p
)
330
{
331
struct
kvec
*vec = &rqstp->
rq_res
.head[0];
332
char
*
cp
= (
char
*)p;
333
334
vec->
iov_len
= cp - (
char
*)vec->
iov_base
;
335
336
return
vec->
iov_len
<=
PAGE_SIZE
;
337
}
338
339
static
inline
void
svc_free_res_pages(
struct
svc_rqst
*rqstp)
340
{
341
while
(rqstp->
rq_resused
) {
342
struct
page
**
pp
= (rqstp->
rq_respages
+
343
--rqstp->
rq_resused
);
344
if
(*pp) {
345
put_page
(*pp);
346
*pp =
NULL
;
347
}
348
}
349
}
350
351
struct
svc_deferred_req
{
352
u32
prot
;
/* protocol (UDP or TCP) */
353
struct
svc_xprt
*
xprt
;
354
struct
sockaddr_storage
addr
;
/* where reply must go */
355
size_t
addrlen
;
356
struct
sockaddr_storage
daddr
;
/* where reply must come from */
357
size_t
daddrlen
;
358
struct
cache_deferred_req
handle
;
359
size_t
xprt_hlen
;
360
int
argslen
;
361
__be32
args
[0];
362
};
363
364
/*
365
* List of RPC programs on the same transport endpoint
366
*/
367
struct
svc_program
{
368
struct
svc_program
*
pg_next
;
/* other programs (same xprt) */
369
u32
pg_prog
;
/* program number */
370
unsigned
int
pg_lovers
;
/* lowest version */
371
unsigned
int
pg_hivers
;
/* lowest version */
372
unsigned
int
pg_nvers
;
/* number of versions */
373
struct
svc_version
**
pg_vers
;
/* version array */
374
char
*
pg_name
;
/* service name */
375
char
*
pg_class
;
/* class name: services sharing authentication */
376
struct
svc_stat
*
pg_stats
;
/* rpc statistics */
377
int
(*
pg_authenticate
)(
struct
svc_rqst
*);
378
};
379
380
/*
381
* RPC program version
382
*/
383
struct
svc_version
{
384
u32
vs_vers
;
/* version number */
385
u32
vs_nproc
;
/* number of procedures */
386
struct
svc_procedure
*
vs_proc
;
/* per-procedure info */
387
u32
vs_xdrsize
;
/* xdrsize needed for this version */
388
389
unsigned
int
vs_hidden
: 1;
/* Don't register with portmapper.
390
* Only used for nfsacl so far. */
391
392
/* Override dispatch function (e.g. when caching replies).
393
* A return value of 0 means drop the request.
394
* vs_dispatch == NULL means use default dispatcher.
395
*/
396
int
(*
vs_dispatch
)(
struct
svc_rqst
*,
__be32
*);
397
};
398
399
/*
400
* RPC procedure info
401
*/
402
typedef
__be32
(*
svc_procfunc
)(
struct
svc_rqst
*,
void
*
argp
,
void
*
resp
);
403
struct
svc_procedure
{
404
svc_procfunc
pc_func
;
/* process the request */
405
kxdrproc_t
pc_decode
;
/* XDR decode args */
406
kxdrproc_t
pc_encode
;
/* XDR encode result */
407
kxdrproc_t
pc_release
;
/* XDR free result */
408
unsigned
int
pc_argsize
;
/* argument struct size */
409
unsigned
int
pc_ressize
;
/* result struct size */
410
unsigned
int
pc_count
;
/* call count */
411
unsigned
int
pc_cachetype
;
/* cache info (NFS) */
412
unsigned
int
pc_xdrressize
;
/* maximum size of XDR reply */
413
};
414
415
/*
416
* Function prototypes.
417
*/
418
int
svc_rpcb_setup
(
struct
svc_serv
*serv,
struct
net
*
net
);
419
void
svc_rpcb_cleanup
(
struct
svc_serv
*serv,
struct
net
*
net
);
420
int
svc_bind
(
struct
svc_serv
*serv,
struct
net
*
net
);
421
struct
svc_serv
*
svc_create
(
struct
svc_program
*,
unsigned
int
,
422
void
(*shutdown)(
struct
svc_serv
*,
struct
net
*
net
));
423
struct
svc_rqst
*
svc_prepare_thread
(
struct
svc_serv
*serv,
424
struct
svc_pool
*
pool
,
int
node
);
425
void
svc_exit_thread
(
struct
svc_rqst
*);
426
struct
svc_serv
*
svc_create_pooled
(
struct
svc_program
*,
unsigned
int
,
427
void
(*shutdown)(
struct
svc_serv
*,
struct
net
*
net
),
428
svc_thread_fn
,
struct
module
*);
429
int
svc_set_num_threads
(
struct
svc_serv
*,
struct
svc_pool
*,
int
);
430
int
svc_pool_stats_open
(
struct
svc_serv
*serv,
struct
file
*
file
);
431
void
svc_destroy
(
struct
svc_serv
*);
432
void
svc_shutdown_net
(
struct
svc_serv
*,
struct
net
*);
433
int
svc_process
(
struct
svc_rqst
*);
434
int
bc_svc_process
(
struct
svc_serv
*,
struct
rpc_rqst *,
435
struct
svc_rqst
*);
436
int
svc_register
(
const
struct
svc_serv
*,
struct
net
*,
const
int
,
437
const
unsigned
short
,
const
unsigned
short
);
438
439
void
svc_wake_up
(
struct
svc_serv
*);
440
void
svc_reserve
(
struct
svc_rqst
*rqstp,
int
space);
441
struct
svc_pool
*
svc_pool_for_cpu
(
struct
svc_serv
*serv,
int
cpu
);
442
char
*
svc_print_addr
(
struct
svc_rqst
*,
char
*,
size_t
);
443
444
#define RPC_MAX_ADDRBUFLEN (63U)
445
446
/*
447
* When we want to reduce the size of the reserved space in the response
448
* buffer, we need to take into account the size of any checksum data that
449
* may be at the end of the packet. This is difficult to determine exactly
450
* for all cases without actually generating the checksum, so we just use a
451
* static value.
452
*/
453
static
inline
void
svc_reserve_auth(
struct
svc_rqst
*rqstp,
int
space)
454
{
455
int
added_space = 0;
456
457
if
(rqstp->
rq_authop
->flavour)
458
added_space = RPC_MAX_AUTH_SIZE;
459
svc_reserve
(rqstp, space + added_space);
460
}
461
462
#endif
/* SUNRPC_SVC_H */
Generated on Thu Jan 10 2013 14:52:39 for Linux Kernel by
1.8.2