Commit 9c9f3f5fa62cc4959e4d4d1cf1ec74f2d6ac1197

Authored by Andy Adamson
Committed by Benny Halevy
1 parent 7652e5a09b

nfs41: sunrpc: add a struct svc_xprt pointer to struct svc_serv for backchannel use

This svc_xprt is passed on to the callback service thread to be later used
to processes incoming svc_rqst's

Signed-off-by: Benny Halevy <bhalevy@panasas.com>

Showing 2 changed files with 5 additions and 0 deletions Inline Diff

include/linux/sunrpc/svc.h
1 /* 1 /*
2 * linux/include/linux/sunrpc/svc.h 2 * linux/include/linux/sunrpc/svc.h
3 * 3 *
4 * RPC server declarations. 4 * RPC server declarations.
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */ 7 */
8 8
9 9
10 #ifndef SUNRPC_SVC_H 10 #ifndef SUNRPC_SVC_H
11 #define SUNRPC_SVC_H 11 #define SUNRPC_SVC_H
12 12
13 #include <linux/in.h> 13 #include <linux/in.h>
14 #include <linux/in6.h> 14 #include <linux/in6.h>
15 #include <linux/sunrpc/types.h> 15 #include <linux/sunrpc/types.h>
16 #include <linux/sunrpc/xdr.h> 16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/auth.h> 17 #include <linux/sunrpc/auth.h>
18 #include <linux/sunrpc/svcauth.h> 18 #include <linux/sunrpc/svcauth.h>
19 #include <linux/wait.h> 19 #include <linux/wait.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 21
22 /* 22 /*
23 * This is the RPC server thread function prototype 23 * This is the RPC server thread function prototype
24 */ 24 */
25 typedef int (*svc_thread_fn)(void *); 25 typedef int (*svc_thread_fn)(void *);
26 26
27 /* statistics for svc_pool structures */ 27 /* statistics for svc_pool structures */
28 struct svc_pool_stats { 28 struct svc_pool_stats {
29 unsigned long packets; 29 unsigned long packets;
30 unsigned long sockets_queued; 30 unsigned long sockets_queued;
31 unsigned long threads_woken; 31 unsigned long threads_woken;
32 unsigned long overloads_avoided; 32 unsigned long overloads_avoided;
33 unsigned long threads_timedout; 33 unsigned long threads_timedout;
34 }; 34 };
35 35
36 /* 36 /*
37 * 37 *
38 * RPC service thread pool. 38 * RPC service thread pool.
39 * 39 *
40 * Pool of threads and temporary sockets. Generally there is only 40 * Pool of threads and temporary sockets. Generally there is only
41 * a single one of these per RPC service, but on NUMA machines those 41 * a single one of these per RPC service, but on NUMA machines those
42 * services that can benefit from it (i.e. nfs but not lockd) will 42 * services that can benefit from it (i.e. nfs but not lockd) will
43 * have one pool per NUMA node. This optimisation reduces cross- 43 * have one pool per NUMA node. This optimisation reduces cross-
44 * node traffic on multi-node NUMA NFS servers. 44 * node traffic on multi-node NUMA NFS servers.
45 */ 45 */
46 struct svc_pool { 46 struct svc_pool {
47 unsigned int sp_id; /* pool id; also node id on NUMA */ 47 unsigned int sp_id; /* pool id; also node id on NUMA */
48 spinlock_t sp_lock; /* protects all fields */ 48 spinlock_t sp_lock; /* protects all fields */
49 struct list_head sp_threads; /* idle server threads */ 49 struct list_head sp_threads; /* idle server threads */
50 struct list_head sp_sockets; /* pending sockets */ 50 struct list_head sp_sockets; /* pending sockets */
51 unsigned int sp_nrthreads; /* # of threads in pool */ 51 unsigned int sp_nrthreads; /* # of threads in pool */
52 struct list_head sp_all_threads; /* all server threads */ 52 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */ 53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */ 54 struct svc_pool_stats sp_stats; /* statistics on pool operation */
55 } ____cacheline_aligned_in_smp; 55 } ____cacheline_aligned_in_smp;
56 56
57 /* 57 /*
58 * RPC service. 58 * RPC service.
59 * 59 *
60 * An RPC service is a ``daemon,'' possibly multithreaded, which 60 * An RPC service is a ``daemon,'' possibly multithreaded, which
61 * receives and processes incoming RPC messages. 61 * receives and processes incoming RPC messages.
62 * It has one or more transport sockets associated with it, and maintains 62 * It has one or more transport sockets associated with it, and maintains
63 * a list of idle threads waiting for input. 63 * a list of idle threads waiting for input.
64 * 64 *
65 * We currently do not support more than one RPC program per daemon. 65 * We currently do not support more than one RPC program per daemon.
66 */ 66 */
67 struct svc_serv { 67 struct svc_serv {
68 struct svc_program * sv_program; /* RPC program */ 68 struct svc_program * sv_program; /* RPC program */
69 struct svc_stat * sv_stats; /* RPC statistics */ 69 struct svc_stat * sv_stats; /* RPC statistics */
70 spinlock_t sv_lock; 70 spinlock_t sv_lock;
71 unsigned int sv_nrthreads; /* # of server threads */ 71 unsigned int sv_nrthreads; /* # of server threads */
72 unsigned int sv_maxconn; /* max connections allowed or 72 unsigned int sv_maxconn; /* max connections allowed or
73 * '0' causing max to be based 73 * '0' causing max to be based
74 * on number of threads. */ 74 * on number of threads. */
75 75
76 unsigned int sv_max_payload; /* datagram payload size */ 76 unsigned int sv_max_payload; /* datagram payload size */
77 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ 77 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
78 unsigned int sv_xdrsize; /* XDR buffer size */ 78 unsigned int sv_xdrsize; /* XDR buffer size */
79 struct list_head sv_permsocks; /* all permanent sockets */ 79 struct list_head sv_permsocks; /* all permanent sockets */
80 struct list_head sv_tempsocks; /* all temporary sockets */ 80 struct list_head sv_tempsocks; /* all temporary sockets */
81 int sv_tmpcnt; /* count of temporary sockets */ 81 int sv_tmpcnt; /* count of temporary sockets */
82 struct timer_list sv_temptimer; /* timer for aging temporary sockets */ 82 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
83 83
84 char * sv_name; /* service name */ 84 char * sv_name; /* service name */
85 85
86 unsigned int sv_nrpools; /* number of thread pools */ 86 unsigned int sv_nrpools; /* number of thread pools */
87 struct svc_pool * sv_pools; /* array of thread pools */ 87 struct svc_pool * sv_pools; /* array of thread pools */
88 88
89 void (*sv_shutdown)(struct svc_serv *serv); 89 void (*sv_shutdown)(struct svc_serv *serv);
90 /* Callback to use when last thread 90 /* Callback to use when last thread
91 * exits. 91 * exits.
92 */ 92 */
93 93
94 struct module * sv_module; /* optional module to count when 94 struct module * sv_module; /* optional module to count when
95 * adding threads */ 95 * adding threads */
96 svc_thread_fn sv_function; /* main function for threads */ 96 svc_thread_fn sv_function; /* main function for threads */
97 unsigned int sv_drc_max_pages; /* Total pages for DRC */ 97 unsigned int sv_drc_max_pages; /* Total pages for DRC */
98 unsigned int sv_drc_pages_used;/* DRC pages used */ 98 unsigned int sv_drc_pages_used;/* DRC pages used */
99 #if defined(CONFIG_NFS_V4_1) 99 #if defined(CONFIG_NFS_V4_1)
100 struct list_head sv_cb_list; /* queue for callback requests 100 struct list_head sv_cb_list; /* queue for callback requests
101 * that arrive over the same 101 * that arrive over the same
102 * connection */ 102 * connection */
103 spinlock_t sv_cb_lock; /* protects the svc_cb_list */ 103 spinlock_t sv_cb_lock; /* protects the svc_cb_list */
104 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no 104 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
105 * entries in the svc_cb_list */ 105 * entries in the svc_cb_list */
106 struct svc_xprt *bc_xprt;
106 #endif /* CONFIG_NFS_V4_1 */ 107 #endif /* CONFIG_NFS_V4_1 */
107 }; 108 };
108 109
109 /* 110 /*
110 * We use sv_nrthreads as a reference count. svc_destroy() drops 111 * We use sv_nrthreads as a reference count. svc_destroy() drops
111 * this refcount, so we need to bump it up around operations that 112 * this refcount, so we need to bump it up around operations that
112 * change the number of threads. Horrible, but there it is. 113 * change the number of threads. Horrible, but there it is.
113 * Should be called with the BKL held. 114 * Should be called with the BKL held.
114 */ 115 */
115 static inline void svc_get(struct svc_serv *serv) 116 static inline void svc_get(struct svc_serv *serv)
116 { 117 {
117 serv->sv_nrthreads++; 118 serv->sv_nrthreads++;
118 } 119 }
119 120
120 /* 121 /*
121 * Maximum payload size supported by a kernel RPC server. 122 * Maximum payload size supported by a kernel RPC server.
122 * This is use to determine the max number of pages nfsd is 123 * This is use to determine the max number of pages nfsd is
123 * willing to return in a single READ operation. 124 * willing to return in a single READ operation.
124 * 125 *
125 * These happen to all be powers of 2, which is not strictly 126 * These happen to all be powers of 2, which is not strictly
126 * necessary but helps enforce the real limitation, which is 127 * necessary but helps enforce the real limitation, which is
127 * that they should be multiples of PAGE_CACHE_SIZE. 128 * that they should be multiples of PAGE_CACHE_SIZE.
128 * 129 *
129 * For UDP transports, a block plus NFS,RPC, and UDP headers 130 * For UDP transports, a block plus NFS,RPC, and UDP headers
130 * has to fit into the IP datagram limit of 64K. The largest 131 * has to fit into the IP datagram limit of 64K. The largest
131 * feasible number for all known page sizes is probably 48K, 132 * feasible number for all known page sizes is probably 48K,
132 * but we choose 32K here. This is the same as the historical 133 * but we choose 32K here. This is the same as the historical
133 * Linux limit; someone who cares more about NFS/UDP performance 134 * Linux limit; someone who cares more about NFS/UDP performance
134 * can test a larger number. 135 * can test a larger number.
135 * 136 *
136 * For TCP transports we have more freedom. A size of 1MB is 137 * For TCP transports we have more freedom. A size of 1MB is
137 * chosen to match the client limit. Other OSes are known to 138 * chosen to match the client limit. Other OSes are known to
138 * have larger limits, but those numbers are probably beyond 139 * have larger limits, but those numbers are probably beyond
139 * the point of diminishing returns. 140 * the point of diminishing returns.
140 */ 141 */
141 #define RPCSVC_MAXPAYLOAD (1*1024*1024u) 142 #define RPCSVC_MAXPAYLOAD (1*1024*1024u)
142 #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD 143 #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
143 #define RPCSVC_MAXPAYLOAD_UDP (32*1024u) 144 #define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
144 145
145 extern u32 svc_max_payload(const struct svc_rqst *rqstp); 146 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
146 147
147 /* 148 /*
148 * RPC Requsts and replies are stored in one or more pages. 149 * RPC Requsts and replies are stored in one or more pages.
149 * We maintain an array of pages for each server thread. 150 * We maintain an array of pages for each server thread.
150 * Requests are copied into these pages as they arrive. Remaining 151 * Requests are copied into these pages as they arrive. Remaining
151 * pages are available to write the reply into. 152 * pages are available to write the reply into.
152 * 153 *
153 * Pages are sent using ->sendpage so each server thread needs to 154 * Pages are sent using ->sendpage so each server thread needs to
154 * allocate more to replace those used in sending. To help keep track 155 * allocate more to replace those used in sending. To help keep track
155 * of these pages we have a receive list where all pages initialy live, 156 * of these pages we have a receive list where all pages initialy live,
156 * and a send list where pages are moved to when there are to be part 157 * and a send list where pages are moved to when there are to be part
157 * of a reply. 158 * of a reply.
158 * 159 *
159 * We use xdr_buf for holding responses as it fits well with NFS 160 * We use xdr_buf for holding responses as it fits well with NFS
160 * read responses (that have a header, and some data pages, and possibly 161 * read responses (that have a header, and some data pages, and possibly
161 * a tail) and means we can share some client side routines. 162 * a tail) and means we can share some client side routines.
162 * 163 *
163 * The xdr_buf.head kvec always points to the first page in the rq_*pages 164 * The xdr_buf.head kvec always points to the first page in the rq_*pages
164 * list. The xdr_buf.pages pointer points to the second page on that 165 * list. The xdr_buf.pages pointer points to the second page on that
165 * list. xdr_buf.tail points to the end of the first page. 166 * list. xdr_buf.tail points to the end of the first page.
166 * This assumes that the non-page part of an rpc reply will fit 167 * This assumes that the non-page part of an rpc reply will fit
167 * in a page - NFSd ensures this. lockd also has no trouble. 168 * in a page - NFSd ensures this. lockd also has no trouble.
168 * 169 *
169 * Each request/reply pair can have at most one "payload", plus two pages, 170 * Each request/reply pair can have at most one "payload", plus two pages,
170 * one for the request, and one for the reply. 171 * one for the request, and one for the reply.
171 * We using ->sendfile to return read data, we might need one extra page 172 * We using ->sendfile to return read data, we might need one extra page
172 * if the request is not page-aligned. So add another '1'. 173 * if the request is not page-aligned. So add another '1'.
173 */ 174 */
174 #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \ 175 #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
175 + 2 + 1) 176 + 2 + 1)
176 177
177 static inline u32 svc_getnl(struct kvec *iov) 178 static inline u32 svc_getnl(struct kvec *iov)
178 { 179 {
179 __be32 val, *vp; 180 __be32 val, *vp;
180 vp = iov->iov_base; 181 vp = iov->iov_base;
181 val = *vp++; 182 val = *vp++;
182 iov->iov_base = (void*)vp; 183 iov->iov_base = (void*)vp;
183 iov->iov_len -= sizeof(__be32); 184 iov->iov_len -= sizeof(__be32);
184 return ntohl(val); 185 return ntohl(val);
185 } 186 }
186 187
187 static inline void svc_putnl(struct kvec *iov, u32 val) 188 static inline void svc_putnl(struct kvec *iov, u32 val)
188 { 189 {
189 __be32 *vp = iov->iov_base + iov->iov_len; 190 __be32 *vp = iov->iov_base + iov->iov_len;
190 *vp = htonl(val); 191 *vp = htonl(val);
191 iov->iov_len += sizeof(__be32); 192 iov->iov_len += sizeof(__be32);
192 } 193 }
193 194
194 static inline __be32 svc_getu32(struct kvec *iov) 195 static inline __be32 svc_getu32(struct kvec *iov)
195 { 196 {
196 __be32 val, *vp; 197 __be32 val, *vp;
197 vp = iov->iov_base; 198 vp = iov->iov_base;
198 val = *vp++; 199 val = *vp++;
199 iov->iov_base = (void*)vp; 200 iov->iov_base = (void*)vp;
200 iov->iov_len -= sizeof(__be32); 201 iov->iov_len -= sizeof(__be32);
201 return val; 202 return val;
202 } 203 }
203 204
204 static inline void svc_ungetu32(struct kvec *iov) 205 static inline void svc_ungetu32(struct kvec *iov)
205 { 206 {
206 __be32 *vp = (__be32 *)iov->iov_base; 207 __be32 *vp = (__be32 *)iov->iov_base;
207 iov->iov_base = (void *)(vp - 1); 208 iov->iov_base = (void *)(vp - 1);
208 iov->iov_len += sizeof(*vp); 209 iov->iov_len += sizeof(*vp);
209 } 210 }
210 211
211 static inline void svc_putu32(struct kvec *iov, __be32 val) 212 static inline void svc_putu32(struct kvec *iov, __be32 val)
212 { 213 {
213 __be32 *vp = iov->iov_base + iov->iov_len; 214 __be32 *vp = iov->iov_base + iov->iov_len;
214 *vp = val; 215 *vp = val;
215 iov->iov_len += sizeof(__be32); 216 iov->iov_len += sizeof(__be32);
216 } 217 }
217 218
218 union svc_addr_u { 219 union svc_addr_u {
219 struct in_addr addr; 220 struct in_addr addr;
220 struct in6_addr addr6; 221 struct in6_addr addr6;
221 }; 222 };
222 223
223 /* 224 /*
224 * The context of a single thread, including the request currently being 225 * The context of a single thread, including the request currently being
225 * processed. 226 * processed.
226 */ 227 */
227 struct svc_rqst { 228 struct svc_rqst {
228 struct list_head rq_list; /* idle list */ 229 struct list_head rq_list; /* idle list */
229 struct list_head rq_all; /* all threads list */ 230 struct list_head rq_all; /* all threads list */
230 struct svc_xprt * rq_xprt; /* transport ptr */ 231 struct svc_xprt * rq_xprt; /* transport ptr */
231 struct sockaddr_storage rq_addr; /* peer address */ 232 struct sockaddr_storage rq_addr; /* peer address */
232 size_t rq_addrlen; 233 size_t rq_addrlen;
233 234
234 struct svc_serv * rq_server; /* RPC service definition */ 235 struct svc_serv * rq_server; /* RPC service definition */
235 struct svc_pool * rq_pool; /* thread pool */ 236 struct svc_pool * rq_pool; /* thread pool */
236 struct svc_procedure * rq_procinfo; /* procedure info */ 237 struct svc_procedure * rq_procinfo; /* procedure info */
237 struct auth_ops * rq_authop; /* authentication flavour */ 238 struct auth_ops * rq_authop; /* authentication flavour */
238 u32 rq_flavor; /* pseudoflavor */ 239 u32 rq_flavor; /* pseudoflavor */
239 struct svc_cred rq_cred; /* auth info */ 240 struct svc_cred rq_cred; /* auth info */
240 void * rq_xprt_ctxt; /* transport specific context ptr */ 241 void * rq_xprt_ctxt; /* transport specific context ptr */
241 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 242 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
242 int rq_usedeferral; /* use deferral */ 243 int rq_usedeferral; /* use deferral */
243 244
244 size_t rq_xprt_hlen; /* xprt header len */ 245 size_t rq_xprt_hlen; /* xprt header len */
245 struct xdr_buf rq_arg; 246 struct xdr_buf rq_arg;
246 struct xdr_buf rq_res; 247 struct xdr_buf rq_res;
247 struct page * rq_pages[RPCSVC_MAXPAGES]; 248 struct page * rq_pages[RPCSVC_MAXPAGES];
248 struct page * *rq_respages; /* points into rq_pages */ 249 struct page * *rq_respages; /* points into rq_pages */
249 int rq_resused; /* number of pages used for result */ 250 int rq_resused; /* number of pages used for result */
250 251
251 struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ 252 struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
252 253
253 __be32 rq_xid; /* transmission id */ 254 __be32 rq_xid; /* transmission id */
254 u32 rq_prog; /* program number */ 255 u32 rq_prog; /* program number */
255 u32 rq_vers; /* program version */ 256 u32 rq_vers; /* program version */
256 u32 rq_proc; /* procedure number */ 257 u32 rq_proc; /* procedure number */
257 u32 rq_prot; /* IP protocol */ 258 u32 rq_prot; /* IP protocol */
258 unsigned short 259 unsigned short
259 rq_secure : 1; /* secure port */ 260 rq_secure : 1; /* secure port */
260 261
261 union svc_addr_u rq_daddr; /* dest addr of request 262 union svc_addr_u rq_daddr; /* dest addr of request
262 * - reply from here */ 263 * - reply from here */
263 264
264 void * rq_argp; /* decoded arguments */ 265 void * rq_argp; /* decoded arguments */
265 void * rq_resp; /* xdr'd results */ 266 void * rq_resp; /* xdr'd results */
266 void * rq_auth_data; /* flavor-specific data */ 267 void * rq_auth_data; /* flavor-specific data */
267 268
268 int rq_reserved; /* space on socket outq 269 int rq_reserved; /* space on socket outq
269 * reserved for this request 270 * reserved for this request
270 */ 271 */
271 272
272 struct cache_req rq_chandle; /* handle passed to caches for 273 struct cache_req rq_chandle; /* handle passed to caches for
273 * request delaying 274 * request delaying
274 */ 275 */
275 /* Catering to nfsd */ 276 /* Catering to nfsd */
276 struct auth_domain * rq_client; /* RPC peer info */ 277 struct auth_domain * rq_client; /* RPC peer info */
277 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ 278 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
278 struct svc_cacherep * rq_cacherep; /* cache info */ 279 struct svc_cacherep * rq_cacherep; /* cache info */
279 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to 280 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
280 * determine what device number 281 * determine what device number
281 * to report (real or virtual) 282 * to report (real or virtual)
282 */ 283 */
283 int rq_splice_ok; /* turned off in gss privacy 284 int rq_splice_ok; /* turned off in gss privacy
284 * to prevent encrypting page 285 * to prevent encrypting page
285 * cache pages */ 286 * cache pages */
286 wait_queue_head_t rq_wait; /* synchronization */ 287 wait_queue_head_t rq_wait; /* synchronization */
287 struct task_struct *rq_task; /* service thread */ 288 struct task_struct *rq_task; /* service thread */
288 int rq_waking; /* 1 if thread is being woken */ 289 int rq_waking; /* 1 if thread is being woken */
289 }; 290 };
290 291
291 /* 292 /*
292 * Rigorous type checking on sockaddr type conversions 293 * Rigorous type checking on sockaddr type conversions
293 */ 294 */
294 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst) 295 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
295 { 296 {
296 return (struct sockaddr_in *) &rqst->rq_addr; 297 return (struct sockaddr_in *) &rqst->rq_addr;
297 } 298 }
298 299
299 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst) 300 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
300 { 301 {
301 return (struct sockaddr_in6 *) &rqst->rq_addr; 302 return (struct sockaddr_in6 *) &rqst->rq_addr;
302 } 303 }
303 304
304 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst) 305 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
305 { 306 {
306 return (struct sockaddr *) &rqst->rq_addr; 307 return (struct sockaddr *) &rqst->rq_addr;
307 } 308 }
308 309
309 /* 310 /*
310 * Check buffer bounds after decoding arguments 311 * Check buffer bounds after decoding arguments
311 */ 312 */
312 static inline int 313 static inline int
313 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) 314 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
314 { 315 {
315 char *cp = (char *)p; 316 char *cp = (char *)p;
316 struct kvec *vec = &rqstp->rq_arg.head[0]; 317 struct kvec *vec = &rqstp->rq_arg.head[0];
317 return cp >= (char*)vec->iov_base 318 return cp >= (char*)vec->iov_base
318 && cp <= (char*)vec->iov_base + vec->iov_len; 319 && cp <= (char*)vec->iov_base + vec->iov_len;
319 } 320 }
320 321
321 static inline int 322 static inline int
322 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p) 323 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
323 { 324 {
324 struct kvec *vec = &rqstp->rq_res.head[0]; 325 struct kvec *vec = &rqstp->rq_res.head[0];
325 char *cp = (char*)p; 326 char *cp = (char*)p;
326 327
327 vec->iov_len = cp - (char*)vec->iov_base; 328 vec->iov_len = cp - (char*)vec->iov_base;
328 329
329 return vec->iov_len <= PAGE_SIZE; 330 return vec->iov_len <= PAGE_SIZE;
330 } 331 }
331 332
332 static inline void svc_free_res_pages(struct svc_rqst *rqstp) 333 static inline void svc_free_res_pages(struct svc_rqst *rqstp)
333 { 334 {
334 while (rqstp->rq_resused) { 335 while (rqstp->rq_resused) {
335 struct page **pp = (rqstp->rq_respages + 336 struct page **pp = (rqstp->rq_respages +
336 --rqstp->rq_resused); 337 --rqstp->rq_resused);
337 if (*pp) { 338 if (*pp) {
338 put_page(*pp); 339 put_page(*pp);
339 *pp = NULL; 340 *pp = NULL;
340 } 341 }
341 } 342 }
342 } 343 }
343 344
344 struct svc_deferred_req { 345 struct svc_deferred_req {
345 u32 prot; /* protocol (UDP or TCP) */ 346 u32 prot; /* protocol (UDP or TCP) */
346 struct svc_xprt *xprt; 347 struct svc_xprt *xprt;
347 struct sockaddr_storage addr; /* where reply must go */ 348 struct sockaddr_storage addr; /* where reply must go */
348 size_t addrlen; 349 size_t addrlen;
349 union svc_addr_u daddr; /* where reply must come from */ 350 union svc_addr_u daddr; /* where reply must come from */
350 struct cache_deferred_req handle; 351 struct cache_deferred_req handle;
351 size_t xprt_hlen; 352 size_t xprt_hlen;
352 int argslen; 353 int argslen;
353 __be32 args[0]; 354 __be32 args[0];
354 }; 355 };
355 356
356 /* 357 /*
357 * List of RPC programs on the same transport endpoint 358 * List of RPC programs on the same transport endpoint
358 */ 359 */
359 struct svc_program { 360 struct svc_program {
360 struct svc_program * pg_next; /* other programs (same xprt) */ 361 struct svc_program * pg_next; /* other programs (same xprt) */
361 u32 pg_prog; /* program number */ 362 u32 pg_prog; /* program number */
362 unsigned int pg_lovers; /* lowest version */ 363 unsigned int pg_lovers; /* lowest version */
363 unsigned int pg_hivers; /* lowest version */ 364 unsigned int pg_hivers; /* lowest version */
364 unsigned int pg_nvers; /* number of versions */ 365 unsigned int pg_nvers; /* number of versions */
365 struct svc_version ** pg_vers; /* version array */ 366 struct svc_version ** pg_vers; /* version array */
366 char * pg_name; /* service name */ 367 char * pg_name; /* service name */
367 char * pg_class; /* class name: services sharing authentication */ 368 char * pg_class; /* class name: services sharing authentication */
368 struct svc_stat * pg_stats; /* rpc statistics */ 369 struct svc_stat * pg_stats; /* rpc statistics */
369 int (*pg_authenticate)(struct svc_rqst *); 370 int (*pg_authenticate)(struct svc_rqst *);
370 }; 371 };
371 372
372 /* 373 /*
373 * RPC program version 374 * RPC program version
374 */ 375 */
375 struct svc_version { 376 struct svc_version {
376 u32 vs_vers; /* version number */ 377 u32 vs_vers; /* version number */
377 u32 vs_nproc; /* number of procedures */ 378 u32 vs_nproc; /* number of procedures */
378 struct svc_procedure * vs_proc; /* per-procedure info */ 379 struct svc_procedure * vs_proc; /* per-procedure info */
379 u32 vs_xdrsize; /* xdrsize needed for this version */ 380 u32 vs_xdrsize; /* xdrsize needed for this version */
380 381
381 unsigned int vs_hidden : 1; /* Don't register with portmapper. 382 unsigned int vs_hidden : 1; /* Don't register with portmapper.
382 * Only used for nfsacl so far. */ 383 * Only used for nfsacl so far. */
383 384
384 /* Override dispatch function (e.g. when caching replies). 385 /* Override dispatch function (e.g. when caching replies).
385 * A return value of 0 means drop the request. 386 * A return value of 0 means drop the request.
386 * vs_dispatch == NULL means use default dispatcher. 387 * vs_dispatch == NULL means use default dispatcher.
387 */ 388 */
388 int (*vs_dispatch)(struct svc_rqst *, __be32 *); 389 int (*vs_dispatch)(struct svc_rqst *, __be32 *);
389 }; 390 };
390 391
391 /* 392 /*
392 * RPC procedure info 393 * RPC procedure info
393 */ 394 */
394 typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp); 395 typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
395 struct svc_procedure { 396 struct svc_procedure {
396 svc_procfunc pc_func; /* process the request */ 397 svc_procfunc pc_func; /* process the request */
397 kxdrproc_t pc_decode; /* XDR decode args */ 398 kxdrproc_t pc_decode; /* XDR decode args */
398 kxdrproc_t pc_encode; /* XDR encode result */ 399 kxdrproc_t pc_encode; /* XDR encode result */
399 kxdrproc_t pc_release; /* XDR free result */ 400 kxdrproc_t pc_release; /* XDR free result */
400 unsigned int pc_argsize; /* argument struct size */ 401 unsigned int pc_argsize; /* argument struct size */
401 unsigned int pc_ressize; /* result struct size */ 402 unsigned int pc_ressize; /* result struct size */
402 unsigned int pc_count; /* call count */ 403 unsigned int pc_count; /* call count */
403 unsigned int pc_cachetype; /* cache info (NFS) */ 404 unsigned int pc_cachetype; /* cache info (NFS) */
404 unsigned int pc_xdrressize; /* maximum size of XDR reply */ 405 unsigned int pc_xdrressize; /* maximum size of XDR reply */
405 }; 406 };
406 407
407 /* 408 /*
408 * Function prototypes. 409 * Function prototypes.
409 */ 410 */
410 struct svc_serv *svc_create(struct svc_program *, unsigned int, 411 struct svc_serv *svc_create(struct svc_program *, unsigned int,
411 void (*shutdown)(struct svc_serv *)); 412 void (*shutdown)(struct svc_serv *));
412 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 413 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
413 struct svc_pool *pool); 414 struct svc_pool *pool);
414 void svc_exit_thread(struct svc_rqst *); 415 void svc_exit_thread(struct svc_rqst *);
415 struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 416 struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
416 void (*shutdown)(struct svc_serv *), 417 void (*shutdown)(struct svc_serv *),
417 svc_thread_fn, struct module *); 418 svc_thread_fn, struct module *);
418 int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 419 int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
419 int svc_pool_stats_open(struct svc_serv *serv, struct file *file); 420 int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
420 void svc_destroy(struct svc_serv *); 421 void svc_destroy(struct svc_serv *);
421 int svc_process(struct svc_rqst *); 422 int svc_process(struct svc_rqst *);
422 int bc_svc_process(struct svc_serv *, struct rpc_rqst *, 423 int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
423 struct svc_rqst *); 424 struct svc_rqst *);
424 int svc_register(const struct svc_serv *, const int, 425 int svc_register(const struct svc_serv *, const int,
425 const unsigned short, const unsigned short); 426 const unsigned short, const unsigned short);
426 427
427 void svc_wake_up(struct svc_serv *); 428 void svc_wake_up(struct svc_serv *);
428 void svc_reserve(struct svc_rqst *rqstp, int space); 429 void svc_reserve(struct svc_rqst *rqstp, int space);
429 struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); 430 struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
430 char * svc_print_addr(struct svc_rqst *, char *, size_t); 431 char * svc_print_addr(struct svc_rqst *, char *, size_t);
431 432
432 #define RPC_MAX_ADDRBUFLEN (63U) 433 #define RPC_MAX_ADDRBUFLEN (63U)
433 434
434 /* 435 /*
435 * When we want to reduce the size of the reserved space in the response 436 * When we want to reduce the size of the reserved space in the response
436 * buffer, we need to take into account the size of any checksum data that 437 * buffer, we need to take into account the size of any checksum data that
437 * may be at the end of the packet. This is difficult to determine exactly 438 * may be at the end of the packet. This is difficult to determine exactly
438 * for all cases without actually generating the checksum, so we just use a 439 * for all cases without actually generating the checksum, so we just use a
439 * static value. 440 * static value.
440 */ 441 */
441 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space) 442 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
442 { 443 {
443 int added_space = 0; 444 int added_space = 0;
444 445
445 if (rqstp->rq_authop->flavour) 446 if (rqstp->rq_authop->flavour)
446 added_space = RPC_MAX_AUTH_SIZE; 447 added_space = RPC_MAX_AUTH_SIZE;
447 svc_reserve(rqstp, space + added_space); 448 svc_reserve(rqstp, space + added_space);
448 } 449 }
449 450
450 #endif /* SUNRPC_SVC_H */ 451 #endif /* SUNRPC_SVC_H */
451 452
1 /* 1 /*
2 * linux/net/sunrpc/svc.c 2 * linux/net/sunrpc/svc.c
3 * 3 *
4 * High-level RPC service routines 4 * High-level RPC service routines
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 * 7 *
8 * Multiple threads pools and NUMAisation 8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc. 9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com> 10 * by Greg Banks <gnb@melbourne.sgi.com>
11 */ 11 */
12 12
13 #include <linux/linkage.h> 13 #include <linux/linkage.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/errno.h> 15 #include <linux/errno.h>
16 #include <linux/net.h> 16 #include <linux/net.h>
17 #include <linux/in.h> 17 #include <linux/in.h>
18 #include <linux/mm.h> 18 #include <linux/mm.h>
19 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/kthread.h> 21 #include <linux/kthread.h>
22 22
23 #include <linux/sunrpc/types.h> 23 #include <linux/sunrpc/types.h>
24 #include <linux/sunrpc/xdr.h> 24 #include <linux/sunrpc/xdr.h>
25 #include <linux/sunrpc/stats.h> 25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svcsock.h> 26 #include <linux/sunrpc/svcsock.h>
27 #include <linux/sunrpc/clnt.h> 27 #include <linux/sunrpc/clnt.h>
28 #include <linux/sunrpc/bc_xprt.h> 28 #include <linux/sunrpc/bc_xprt.h>
29 29
30 #define RPCDBG_FACILITY RPCDBG_SVCDSP 30 #define RPCDBG_FACILITY RPCDBG_SVCDSP
31 31
32 static void svc_unregister(const struct svc_serv *serv); 32 static void svc_unregister(const struct svc_serv *serv);
33 33
34 #define svc_serv_is_pooled(serv) ((serv)->sv_function) 34 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
35 35
36 /* 36 /*
37 * Mode for mapping cpus to pools. 37 * Mode for mapping cpus to pools.
38 */ 38 */
39 enum { 39 enum {
40 SVC_POOL_AUTO = -1, /* choose one of the others */ 40 SVC_POOL_AUTO = -1, /* choose one of the others */
41 SVC_POOL_GLOBAL, /* no mapping, just a single global pool 41 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
42 * (legacy & UP mode) */ 42 * (legacy & UP mode) */
43 SVC_POOL_PERCPU, /* one pool per cpu */ 43 SVC_POOL_PERCPU, /* one pool per cpu */
44 SVC_POOL_PERNODE /* one pool per numa node */ 44 SVC_POOL_PERNODE /* one pool per numa node */
45 }; 45 };
46 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL 46 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
47 47
48 /* 48 /*
49 * Structure for mapping cpus to pools and vice versa. 49 * Structure for mapping cpus to pools and vice versa.
50 * Setup once during sunrpc initialisation. 50 * Setup once during sunrpc initialisation.
51 */ 51 */
52 static struct svc_pool_map { 52 static struct svc_pool_map {
53 int count; /* How many svc_servs use us */ 53 int count; /* How many svc_servs use us */
54 int mode; /* Note: int not enum to avoid 54 int mode; /* Note: int not enum to avoid
55 * warnings about "enumeration value 55 * warnings about "enumeration value
56 * not handled in switch" */ 56 * not handled in switch" */
57 unsigned int npools; 57 unsigned int npools;
58 unsigned int *pool_to; /* maps pool id to cpu or node */ 58 unsigned int *pool_to; /* maps pool id to cpu or node */
59 unsigned int *to_pool; /* maps cpu or node to pool id */ 59 unsigned int *to_pool; /* maps cpu or node to pool id */
60 } svc_pool_map = { 60 } svc_pool_map = {
61 .count = 0, 61 .count = 0,
62 .mode = SVC_POOL_DEFAULT 62 .mode = SVC_POOL_DEFAULT
63 }; 63 };
64 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ 64 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
65 65
66 static int 66 static int
67 param_set_pool_mode(const char *val, struct kernel_param *kp) 67 param_set_pool_mode(const char *val, struct kernel_param *kp)
68 { 68 {
69 int *ip = (int *)kp->arg; 69 int *ip = (int *)kp->arg;
70 struct svc_pool_map *m = &svc_pool_map; 70 struct svc_pool_map *m = &svc_pool_map;
71 int err; 71 int err;
72 72
73 mutex_lock(&svc_pool_map_mutex); 73 mutex_lock(&svc_pool_map_mutex);
74 74
75 err = -EBUSY; 75 err = -EBUSY;
76 if (m->count) 76 if (m->count)
77 goto out; 77 goto out;
78 78
79 err = 0; 79 err = 0;
80 if (!strncmp(val, "auto", 4)) 80 if (!strncmp(val, "auto", 4))
81 *ip = SVC_POOL_AUTO; 81 *ip = SVC_POOL_AUTO;
82 else if (!strncmp(val, "global", 6)) 82 else if (!strncmp(val, "global", 6))
83 *ip = SVC_POOL_GLOBAL; 83 *ip = SVC_POOL_GLOBAL;
84 else if (!strncmp(val, "percpu", 6)) 84 else if (!strncmp(val, "percpu", 6))
85 *ip = SVC_POOL_PERCPU; 85 *ip = SVC_POOL_PERCPU;
86 else if (!strncmp(val, "pernode", 7)) 86 else if (!strncmp(val, "pernode", 7))
87 *ip = SVC_POOL_PERNODE; 87 *ip = SVC_POOL_PERNODE;
88 else 88 else
89 err = -EINVAL; 89 err = -EINVAL;
90 90
91 out: 91 out:
92 mutex_unlock(&svc_pool_map_mutex); 92 mutex_unlock(&svc_pool_map_mutex);
93 return err; 93 return err;
94 } 94 }
95 95
96 static int 96 static int
97 param_get_pool_mode(char *buf, struct kernel_param *kp) 97 param_get_pool_mode(char *buf, struct kernel_param *kp)
98 { 98 {
99 int *ip = (int *)kp->arg; 99 int *ip = (int *)kp->arg;
100 100
101 switch (*ip) 101 switch (*ip)
102 { 102 {
103 case SVC_POOL_AUTO: 103 case SVC_POOL_AUTO:
104 return strlcpy(buf, "auto", 20); 104 return strlcpy(buf, "auto", 20);
105 case SVC_POOL_GLOBAL: 105 case SVC_POOL_GLOBAL:
106 return strlcpy(buf, "global", 20); 106 return strlcpy(buf, "global", 20);
107 case SVC_POOL_PERCPU: 107 case SVC_POOL_PERCPU:
108 return strlcpy(buf, "percpu", 20); 108 return strlcpy(buf, "percpu", 20);
109 case SVC_POOL_PERNODE: 109 case SVC_POOL_PERNODE:
110 return strlcpy(buf, "pernode", 20); 110 return strlcpy(buf, "pernode", 20);
111 default: 111 default:
112 return sprintf(buf, "%d", *ip); 112 return sprintf(buf, "%d", *ip);
113 } 113 }
114 } 114 }
115 115
116 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, 116 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
117 &svc_pool_map.mode, 0644); 117 &svc_pool_map.mode, 0644);
118 118
119 /* 119 /*
120 * Detect best pool mapping mode heuristically, 120 * Detect best pool mapping mode heuristically,
121 * according to the machine's topology. 121 * according to the machine's topology.
122 */ 122 */
123 static int 123 static int
124 svc_pool_map_choose_mode(void) 124 svc_pool_map_choose_mode(void)
125 { 125 {
126 unsigned int node; 126 unsigned int node;
127 127
128 if (num_online_nodes() > 1) { 128 if (num_online_nodes() > 1) {
129 /* 129 /*
130 * Actually have multiple NUMA nodes, 130 * Actually have multiple NUMA nodes,
131 * so split pools on NUMA node boundaries 131 * so split pools on NUMA node boundaries
132 */ 132 */
133 return SVC_POOL_PERNODE; 133 return SVC_POOL_PERNODE;
134 } 134 }
135 135
136 node = any_online_node(node_online_map); 136 node = any_online_node(node_online_map);
137 if (nr_cpus_node(node) > 2) { 137 if (nr_cpus_node(node) > 2) {
138 /* 138 /*
139 * Non-trivial SMP, or CONFIG_NUMA on 139 * Non-trivial SMP, or CONFIG_NUMA on
140 * non-NUMA hardware, e.g. with a generic 140 * non-NUMA hardware, e.g. with a generic
141 * x86_64 kernel on Xeons. In this case we 141 * x86_64 kernel on Xeons. In this case we
142 * want to divide the pools on cpu boundaries. 142 * want to divide the pools on cpu boundaries.
143 */ 143 */
144 return SVC_POOL_PERCPU; 144 return SVC_POOL_PERCPU;
145 } 145 }
146 146
147 /* default: one global pool */ 147 /* default: one global pool */
148 return SVC_POOL_GLOBAL; 148 return SVC_POOL_GLOBAL;
149 } 149 }
150 150
151 /* 151 /*
152 * Allocate the to_pool[] and pool_to[] arrays. 152 * Allocate the to_pool[] and pool_to[] arrays.
153 * Returns 0 on success or an errno. 153 * Returns 0 on success or an errno.
154 */ 154 */
155 static int 155 static int
156 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) 156 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
157 { 157 {
158 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 158 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
159 if (!m->to_pool) 159 if (!m->to_pool)
160 goto fail; 160 goto fail;
161 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); 161 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
162 if (!m->pool_to) 162 if (!m->pool_to)
163 goto fail_free; 163 goto fail_free;
164 164
165 return 0; 165 return 0;
166 166
167 fail_free: 167 fail_free:
168 kfree(m->to_pool); 168 kfree(m->to_pool);
169 fail: 169 fail:
170 return -ENOMEM; 170 return -ENOMEM;
171 } 171 }
172 172
173 /* 173 /*
174 * Initialise the pool map for SVC_POOL_PERCPU mode. 174 * Initialise the pool map for SVC_POOL_PERCPU mode.
175 * Returns number of pools or <0 on error. 175 * Returns number of pools or <0 on error.
176 */ 176 */
177 static int 177 static int
178 svc_pool_map_init_percpu(struct svc_pool_map *m) 178 svc_pool_map_init_percpu(struct svc_pool_map *m)
179 { 179 {
180 unsigned int maxpools = nr_cpu_ids; 180 unsigned int maxpools = nr_cpu_ids;
181 unsigned int pidx = 0; 181 unsigned int pidx = 0;
182 unsigned int cpu; 182 unsigned int cpu;
183 int err; 183 int err;
184 184
185 err = svc_pool_map_alloc_arrays(m, maxpools); 185 err = svc_pool_map_alloc_arrays(m, maxpools);
186 if (err) 186 if (err)
187 return err; 187 return err;
188 188
189 for_each_online_cpu(cpu) { 189 for_each_online_cpu(cpu) {
190 BUG_ON(pidx > maxpools); 190 BUG_ON(pidx > maxpools);
191 m->to_pool[cpu] = pidx; 191 m->to_pool[cpu] = pidx;
192 m->pool_to[pidx] = cpu; 192 m->pool_to[pidx] = cpu;
193 pidx++; 193 pidx++;
194 } 194 }
195 /* cpus brought online later all get mapped to pool0, sorry */ 195 /* cpus brought online later all get mapped to pool0, sorry */
196 196
197 return pidx; 197 return pidx;
198 }; 198 };
199 199
200 200
201 /* 201 /*
202 * Initialise the pool map for SVC_POOL_PERNODE mode. 202 * Initialise the pool map for SVC_POOL_PERNODE mode.
203 * Returns number of pools or <0 on error. 203 * Returns number of pools or <0 on error.
204 */ 204 */
205 static int 205 static int
206 svc_pool_map_init_pernode(struct svc_pool_map *m) 206 svc_pool_map_init_pernode(struct svc_pool_map *m)
207 { 207 {
208 unsigned int maxpools = nr_node_ids; 208 unsigned int maxpools = nr_node_ids;
209 unsigned int pidx = 0; 209 unsigned int pidx = 0;
210 unsigned int node; 210 unsigned int node;
211 int err; 211 int err;
212 212
213 err = svc_pool_map_alloc_arrays(m, maxpools); 213 err = svc_pool_map_alloc_arrays(m, maxpools);
214 if (err) 214 if (err)
215 return err; 215 return err;
216 216
217 for_each_node_with_cpus(node) { 217 for_each_node_with_cpus(node) {
218 /* some architectures (e.g. SN2) have cpuless nodes */ 218 /* some architectures (e.g. SN2) have cpuless nodes */
219 BUG_ON(pidx > maxpools); 219 BUG_ON(pidx > maxpools);
220 m->to_pool[node] = pidx; 220 m->to_pool[node] = pidx;
221 m->pool_to[pidx] = node; 221 m->pool_to[pidx] = node;
222 pidx++; 222 pidx++;
223 } 223 }
224 /* nodes brought online later all get mapped to pool0, sorry */ 224 /* nodes brought online later all get mapped to pool0, sorry */
225 225
226 return pidx; 226 return pidx;
227 } 227 }
228 228
229 229
230 /* 230 /*
231 * Add a reference to the global map of cpus to pools (and 231 * Add a reference to the global map of cpus to pools (and
232 * vice versa). Initialise the map if we're the first user. 232 * vice versa). Initialise the map if we're the first user.
233 * Returns the number of pools. 233 * Returns the number of pools.
234 */ 234 */
235 static unsigned int 235 static unsigned int
236 svc_pool_map_get(void) 236 svc_pool_map_get(void)
237 { 237 {
238 struct svc_pool_map *m = &svc_pool_map; 238 struct svc_pool_map *m = &svc_pool_map;
239 int npools = -1; 239 int npools = -1;
240 240
241 mutex_lock(&svc_pool_map_mutex); 241 mutex_lock(&svc_pool_map_mutex);
242 242
243 if (m->count++) { 243 if (m->count++) {
244 mutex_unlock(&svc_pool_map_mutex); 244 mutex_unlock(&svc_pool_map_mutex);
245 return m->npools; 245 return m->npools;
246 } 246 }
247 247
248 if (m->mode == SVC_POOL_AUTO) 248 if (m->mode == SVC_POOL_AUTO)
249 m->mode = svc_pool_map_choose_mode(); 249 m->mode = svc_pool_map_choose_mode();
250 250
251 switch (m->mode) { 251 switch (m->mode) {
252 case SVC_POOL_PERCPU: 252 case SVC_POOL_PERCPU:
253 npools = svc_pool_map_init_percpu(m); 253 npools = svc_pool_map_init_percpu(m);
254 break; 254 break;
255 case SVC_POOL_PERNODE: 255 case SVC_POOL_PERNODE:
256 npools = svc_pool_map_init_pernode(m); 256 npools = svc_pool_map_init_pernode(m);
257 break; 257 break;
258 } 258 }
259 259
260 if (npools < 0) { 260 if (npools < 0) {
261 /* default, or memory allocation failure */ 261 /* default, or memory allocation failure */
262 npools = 1; 262 npools = 1;
263 m->mode = SVC_POOL_GLOBAL; 263 m->mode = SVC_POOL_GLOBAL;
264 } 264 }
265 m->npools = npools; 265 m->npools = npools;
266 266
267 mutex_unlock(&svc_pool_map_mutex); 267 mutex_unlock(&svc_pool_map_mutex);
268 return m->npools; 268 return m->npools;
269 } 269 }
270 270
271 271
272 /* 272 /*
273 * Drop a reference to the global map of cpus to pools. 273 * Drop a reference to the global map of cpus to pools.
274 * When the last reference is dropped, the map data is 274 * When the last reference is dropped, the map data is
275 * freed; this allows the sysadmin to change the pool 275 * freed; this allows the sysadmin to change the pool
276 * mode using the pool_mode module option without 276 * mode using the pool_mode module option without
277 * rebooting or re-loading sunrpc.ko. 277 * rebooting or re-loading sunrpc.ko.
278 */ 278 */
279 static void 279 static void
280 svc_pool_map_put(void) 280 svc_pool_map_put(void)
281 { 281 {
282 struct svc_pool_map *m = &svc_pool_map; 282 struct svc_pool_map *m = &svc_pool_map;
283 283
284 mutex_lock(&svc_pool_map_mutex); 284 mutex_lock(&svc_pool_map_mutex);
285 285
286 if (!--m->count) { 286 if (!--m->count) {
287 m->mode = SVC_POOL_DEFAULT; 287 m->mode = SVC_POOL_DEFAULT;
288 kfree(m->to_pool); 288 kfree(m->to_pool);
289 kfree(m->pool_to); 289 kfree(m->pool_to);
290 m->npools = 0; 290 m->npools = 0;
291 } 291 }
292 292
293 mutex_unlock(&svc_pool_map_mutex); 293 mutex_unlock(&svc_pool_map_mutex);
294 } 294 }
295 295
296 296
297 /* 297 /*
298 * Set the given thread's cpus_allowed mask so that it 298 * Set the given thread's cpus_allowed mask so that it
299 * will only run on cpus in the given pool. 299 * will only run on cpus in the given pool.
300 */ 300 */
301 static inline void 301 static inline void
302 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) 302 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
303 { 303 {
304 struct svc_pool_map *m = &svc_pool_map; 304 struct svc_pool_map *m = &svc_pool_map;
305 unsigned int node = m->pool_to[pidx]; 305 unsigned int node = m->pool_to[pidx];
306 306
307 /* 307 /*
308 * The caller checks for sv_nrpools > 1, which 308 * The caller checks for sv_nrpools > 1, which
309 * implies that we've been initialized. 309 * implies that we've been initialized.
310 */ 310 */
311 BUG_ON(m->count == 0); 311 BUG_ON(m->count == 0);
312 312
313 switch (m->mode) { 313 switch (m->mode) {
314 case SVC_POOL_PERCPU: 314 case SVC_POOL_PERCPU:
315 { 315 {
316 set_cpus_allowed_ptr(task, cpumask_of(node)); 316 set_cpus_allowed_ptr(task, cpumask_of(node));
317 break; 317 break;
318 } 318 }
319 case SVC_POOL_PERNODE: 319 case SVC_POOL_PERNODE:
320 { 320 {
321 set_cpus_allowed_ptr(task, cpumask_of_node(node)); 321 set_cpus_allowed_ptr(task, cpumask_of_node(node));
322 break; 322 break;
323 } 323 }
324 } 324 }
325 } 325 }
326 326
327 /* 327 /*
328 * Use the mapping mode to choose a pool for a given CPU. 328 * Use the mapping mode to choose a pool for a given CPU.
329 * Used when enqueueing an incoming RPC. Always returns 329 * Used when enqueueing an incoming RPC. Always returns
330 * a non-NULL pool pointer. 330 * a non-NULL pool pointer.
331 */ 331 */
332 struct svc_pool * 332 struct svc_pool *
333 svc_pool_for_cpu(struct svc_serv *serv, int cpu) 333 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
334 { 334 {
335 struct svc_pool_map *m = &svc_pool_map; 335 struct svc_pool_map *m = &svc_pool_map;
336 unsigned int pidx = 0; 336 unsigned int pidx = 0;
337 337
338 /* 338 /*
339 * An uninitialised map happens in a pure client when 339 * An uninitialised map happens in a pure client when
340 * lockd is brought up, so silently treat it the 340 * lockd is brought up, so silently treat it the
341 * same as SVC_POOL_GLOBAL. 341 * same as SVC_POOL_GLOBAL.
342 */ 342 */
343 if (svc_serv_is_pooled(serv)) { 343 if (svc_serv_is_pooled(serv)) {
344 switch (m->mode) { 344 switch (m->mode) {
345 case SVC_POOL_PERCPU: 345 case SVC_POOL_PERCPU:
346 pidx = m->to_pool[cpu]; 346 pidx = m->to_pool[cpu];
347 break; 347 break;
348 case SVC_POOL_PERNODE: 348 case SVC_POOL_PERNODE:
349 pidx = m->to_pool[cpu_to_node(cpu)]; 349 pidx = m->to_pool[cpu_to_node(cpu)];
350 break; 350 break;
351 } 351 }
352 } 352 }
353 return &serv->sv_pools[pidx % serv->sv_nrpools]; 353 return &serv->sv_pools[pidx % serv->sv_nrpools];
354 } 354 }
355 355
356 356
357 /* 357 /*
358 * Create an RPC service 358 * Create an RPC service
359 */ 359 */
360 static struct svc_serv * 360 static struct svc_serv *
361 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, 361 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
362 void (*shutdown)(struct svc_serv *serv)) 362 void (*shutdown)(struct svc_serv *serv))
363 { 363 {
364 struct svc_serv *serv; 364 struct svc_serv *serv;
365 unsigned int vers; 365 unsigned int vers;
366 unsigned int xdrsize; 366 unsigned int xdrsize;
367 unsigned int i; 367 unsigned int i;
368 368
369 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 369 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
370 return NULL; 370 return NULL;
371 serv->sv_name = prog->pg_name; 371 serv->sv_name = prog->pg_name;
372 serv->sv_program = prog; 372 serv->sv_program = prog;
373 serv->sv_nrthreads = 1; 373 serv->sv_nrthreads = 1;
374 serv->sv_stats = prog->pg_stats; 374 serv->sv_stats = prog->pg_stats;
375 if (bufsize > RPCSVC_MAXPAYLOAD) 375 if (bufsize > RPCSVC_MAXPAYLOAD)
376 bufsize = RPCSVC_MAXPAYLOAD; 376 bufsize = RPCSVC_MAXPAYLOAD;
377 serv->sv_max_payload = bufsize? bufsize : 4096; 377 serv->sv_max_payload = bufsize? bufsize : 4096;
378 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); 378 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
379 serv->sv_shutdown = shutdown; 379 serv->sv_shutdown = shutdown;
380 xdrsize = 0; 380 xdrsize = 0;
381 while (prog) { 381 while (prog) {
382 prog->pg_lovers = prog->pg_nvers-1; 382 prog->pg_lovers = prog->pg_nvers-1;
383 for (vers=0; vers<prog->pg_nvers ; vers++) 383 for (vers=0; vers<prog->pg_nvers ; vers++)
384 if (prog->pg_vers[vers]) { 384 if (prog->pg_vers[vers]) {
385 prog->pg_hivers = vers; 385 prog->pg_hivers = vers;
386 if (prog->pg_lovers > vers) 386 if (prog->pg_lovers > vers)
387 prog->pg_lovers = vers; 387 prog->pg_lovers = vers;
388 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize) 388 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
389 xdrsize = prog->pg_vers[vers]->vs_xdrsize; 389 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
390 } 390 }
391 prog = prog->pg_next; 391 prog = prog->pg_next;
392 } 392 }
393 serv->sv_xdrsize = xdrsize; 393 serv->sv_xdrsize = xdrsize;
394 INIT_LIST_HEAD(&serv->sv_tempsocks); 394 INIT_LIST_HEAD(&serv->sv_tempsocks);
395 INIT_LIST_HEAD(&serv->sv_permsocks); 395 INIT_LIST_HEAD(&serv->sv_permsocks);
396 init_timer(&serv->sv_temptimer); 396 init_timer(&serv->sv_temptimer);
397 spin_lock_init(&serv->sv_lock); 397 spin_lock_init(&serv->sv_lock);
398 398
399 serv->sv_nrpools = npools; 399 serv->sv_nrpools = npools;
400 serv->sv_pools = 400 serv->sv_pools =
401 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), 401 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
402 GFP_KERNEL); 402 GFP_KERNEL);
403 if (!serv->sv_pools) { 403 if (!serv->sv_pools) {
404 kfree(serv); 404 kfree(serv);
405 return NULL; 405 return NULL;
406 } 406 }
407 407
408 for (i = 0; i < serv->sv_nrpools; i++) { 408 for (i = 0; i < serv->sv_nrpools; i++) {
409 struct svc_pool *pool = &serv->sv_pools[i]; 409 struct svc_pool *pool = &serv->sv_pools[i];
410 410
411 dprintk("svc: initialising pool %u for %s\n", 411 dprintk("svc: initialising pool %u for %s\n",
412 i, serv->sv_name); 412 i, serv->sv_name);
413 413
414 pool->sp_id = i; 414 pool->sp_id = i;
415 INIT_LIST_HEAD(&pool->sp_threads); 415 INIT_LIST_HEAD(&pool->sp_threads);
416 INIT_LIST_HEAD(&pool->sp_sockets); 416 INIT_LIST_HEAD(&pool->sp_sockets);
417 INIT_LIST_HEAD(&pool->sp_all_threads); 417 INIT_LIST_HEAD(&pool->sp_all_threads);
418 spin_lock_init(&pool->sp_lock); 418 spin_lock_init(&pool->sp_lock);
419 } 419 }
420 420
421 /* Remove any stale portmap registrations */ 421 /* Remove any stale portmap registrations */
422 svc_unregister(serv); 422 svc_unregister(serv);
423 423
424 return serv; 424 return serv;
425 } 425 }
426 426
427 struct svc_serv * 427 struct svc_serv *
428 svc_create(struct svc_program *prog, unsigned int bufsize, 428 svc_create(struct svc_program *prog, unsigned int bufsize,
429 void (*shutdown)(struct svc_serv *serv)) 429 void (*shutdown)(struct svc_serv *serv))
430 { 430 {
431 return __svc_create(prog, bufsize, /*npools*/1, shutdown); 431 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
432 } 432 }
433 EXPORT_SYMBOL_GPL(svc_create); 433 EXPORT_SYMBOL_GPL(svc_create);
434 434
435 struct svc_serv * 435 struct svc_serv *
436 svc_create_pooled(struct svc_program *prog, unsigned int bufsize, 436 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
437 void (*shutdown)(struct svc_serv *serv), 437 void (*shutdown)(struct svc_serv *serv),
438 svc_thread_fn func, struct module *mod) 438 svc_thread_fn func, struct module *mod)
439 { 439 {
440 struct svc_serv *serv; 440 struct svc_serv *serv;
441 unsigned int npools = svc_pool_map_get(); 441 unsigned int npools = svc_pool_map_get();
442 442
443 serv = __svc_create(prog, bufsize, npools, shutdown); 443 serv = __svc_create(prog, bufsize, npools, shutdown);
444 444
445 if (serv != NULL) { 445 if (serv != NULL) {
446 serv->sv_function = func; 446 serv->sv_function = func;
447 serv->sv_module = mod; 447 serv->sv_module = mod;
448 } 448 }
449 449
450 return serv; 450 return serv;
451 } 451 }
452 EXPORT_SYMBOL_GPL(svc_create_pooled); 452 EXPORT_SYMBOL_GPL(svc_create_pooled);
453 453
454 /* 454 /*
455 * Destroy an RPC service. Should be called with appropriate locking to 455 * Destroy an RPC service. Should be called with appropriate locking to
456 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. 456 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
457 */ 457 */
458 void 458 void
459 svc_destroy(struct svc_serv *serv) 459 svc_destroy(struct svc_serv *serv)
460 { 460 {
461 dprintk("svc: svc_destroy(%s, %d)\n", 461 dprintk("svc: svc_destroy(%s, %d)\n",
462 serv->sv_program->pg_name, 462 serv->sv_program->pg_name,
463 serv->sv_nrthreads); 463 serv->sv_nrthreads);
464 464
465 if (serv->sv_nrthreads) { 465 if (serv->sv_nrthreads) {
466 if (--(serv->sv_nrthreads) != 0) { 466 if (--(serv->sv_nrthreads) != 0) {
467 svc_sock_update_bufs(serv); 467 svc_sock_update_bufs(serv);
468 return; 468 return;
469 } 469 }
470 } else 470 } else
471 printk("svc_destroy: no threads for serv=%p!\n", serv); 471 printk("svc_destroy: no threads for serv=%p!\n", serv);
472 472
473 del_timer_sync(&serv->sv_temptimer); 473 del_timer_sync(&serv->sv_temptimer);
474 474
475 svc_close_all(&serv->sv_tempsocks); 475 svc_close_all(&serv->sv_tempsocks);
476 476
477 if (serv->sv_shutdown) 477 if (serv->sv_shutdown)
478 serv->sv_shutdown(serv); 478 serv->sv_shutdown(serv);
479 479
480 svc_close_all(&serv->sv_permsocks); 480 svc_close_all(&serv->sv_permsocks);
481 481
482 BUG_ON(!list_empty(&serv->sv_permsocks)); 482 BUG_ON(!list_empty(&serv->sv_permsocks));
483 BUG_ON(!list_empty(&serv->sv_tempsocks)); 483 BUG_ON(!list_empty(&serv->sv_tempsocks));
484 484
485 cache_clean_deferred(serv); 485 cache_clean_deferred(serv);
486 486
487 if (svc_serv_is_pooled(serv)) 487 if (svc_serv_is_pooled(serv))
488 svc_pool_map_put(); 488 svc_pool_map_put();
489 489
490 #if defined(CONFIG_NFS_V4_1)
491 svc_sock_destroy(serv->bc_xprt);
492 #endif /* CONFIG_NFS_V4_1 */
493
490 svc_unregister(serv); 494 svc_unregister(serv);
491 kfree(serv->sv_pools); 495 kfree(serv->sv_pools);
492 kfree(serv); 496 kfree(serv);
493 } 497 }
494 EXPORT_SYMBOL_GPL(svc_destroy); 498 EXPORT_SYMBOL_GPL(svc_destroy);
495 499
496 /* 500 /*
497 * Allocate an RPC server's buffer space. 501 * Allocate an RPC server's buffer space.
498 * We allocate pages and place them in rq_argpages. 502 * We allocate pages and place them in rq_argpages.
499 */ 503 */
500 static int 504 static int
501 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 505 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
502 { 506 {
503 unsigned int pages, arghi; 507 unsigned int pages, arghi;
504 508
505 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 509 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
506 * We assume one is at most one page 510 * We assume one is at most one page
507 */ 511 */
508 arghi = 0; 512 arghi = 0;
509 BUG_ON(pages > RPCSVC_MAXPAGES); 513 BUG_ON(pages > RPCSVC_MAXPAGES);
510 while (pages) { 514 while (pages) {
511 struct page *p = alloc_page(GFP_KERNEL); 515 struct page *p = alloc_page(GFP_KERNEL);
512 if (!p) 516 if (!p)
513 break; 517 break;
514 rqstp->rq_pages[arghi++] = p; 518 rqstp->rq_pages[arghi++] = p;
515 pages--; 519 pages--;
516 } 520 }
517 return pages == 0; 521 return pages == 0;
518 } 522 }
519 523
520 /* 524 /*
521 * Release an RPC server buffer 525 * Release an RPC server buffer
522 */ 526 */
523 static void 527 static void
524 svc_release_buffer(struct svc_rqst *rqstp) 528 svc_release_buffer(struct svc_rqst *rqstp)
525 { 529 {
526 unsigned int i; 530 unsigned int i;
527 531
528 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) 532 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
529 if (rqstp->rq_pages[i]) 533 if (rqstp->rq_pages[i])
530 put_page(rqstp->rq_pages[i]); 534 put_page(rqstp->rq_pages[i]);
531 } 535 }
532 536
533 struct svc_rqst * 537 struct svc_rqst *
534 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool) 538 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
535 { 539 {
536 struct svc_rqst *rqstp; 540 struct svc_rqst *rqstp;
537 541
538 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 542 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
539 if (!rqstp) 543 if (!rqstp)
540 goto out_enomem; 544 goto out_enomem;
541 545
542 init_waitqueue_head(&rqstp->rq_wait); 546 init_waitqueue_head(&rqstp->rq_wait);
543 547
544 serv->sv_nrthreads++; 548 serv->sv_nrthreads++;
545 spin_lock_bh(&pool->sp_lock); 549 spin_lock_bh(&pool->sp_lock);
546 pool->sp_nrthreads++; 550 pool->sp_nrthreads++;
547 list_add(&rqstp->rq_all, &pool->sp_all_threads); 551 list_add(&rqstp->rq_all, &pool->sp_all_threads);
548 spin_unlock_bh(&pool->sp_lock); 552 spin_unlock_bh(&pool->sp_lock);
549 rqstp->rq_server = serv; 553 rqstp->rq_server = serv;
550 rqstp->rq_pool = pool; 554 rqstp->rq_pool = pool;
551 555
552 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 556 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
553 if (!rqstp->rq_argp) 557 if (!rqstp->rq_argp)
554 goto out_thread; 558 goto out_thread;
555 559
556 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL); 560 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
557 if (!rqstp->rq_resp) 561 if (!rqstp->rq_resp)
558 goto out_thread; 562 goto out_thread;
559 563
560 if (!svc_init_buffer(rqstp, serv->sv_max_mesg)) 564 if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
561 goto out_thread; 565 goto out_thread;
562 566
563 return rqstp; 567 return rqstp;
564 out_thread: 568 out_thread:
565 svc_exit_thread(rqstp); 569 svc_exit_thread(rqstp);
566 out_enomem: 570 out_enomem:
567 return ERR_PTR(-ENOMEM); 571 return ERR_PTR(-ENOMEM);
568 } 572 }
569 EXPORT_SYMBOL_GPL(svc_prepare_thread); 573 EXPORT_SYMBOL_GPL(svc_prepare_thread);
570 574
571 /* 575 /*
572 * Choose a pool in which to create a new thread, for svc_set_num_threads 576 * Choose a pool in which to create a new thread, for svc_set_num_threads
573 */ 577 */
574 static inline struct svc_pool * 578 static inline struct svc_pool *
575 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 579 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
576 { 580 {
577 if (pool != NULL) 581 if (pool != NULL)
578 return pool; 582 return pool;
579 583
580 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 584 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
581 } 585 }
582 586
583 /* 587 /*
584 * Choose a thread to kill, for svc_set_num_threads 588 * Choose a thread to kill, for svc_set_num_threads
585 */ 589 */
586 static inline struct task_struct * 590 static inline struct task_struct *
587 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) 591 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
588 { 592 {
589 unsigned int i; 593 unsigned int i;
590 struct task_struct *task = NULL; 594 struct task_struct *task = NULL;
591 595
592 if (pool != NULL) { 596 if (pool != NULL) {
593 spin_lock_bh(&pool->sp_lock); 597 spin_lock_bh(&pool->sp_lock);
594 } else { 598 } else {
595 /* choose a pool in round-robin fashion */ 599 /* choose a pool in round-robin fashion */
596 for (i = 0; i < serv->sv_nrpools; i++) { 600 for (i = 0; i < serv->sv_nrpools; i++) {
597 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 601 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
598 spin_lock_bh(&pool->sp_lock); 602 spin_lock_bh(&pool->sp_lock);
599 if (!list_empty(&pool->sp_all_threads)) 603 if (!list_empty(&pool->sp_all_threads))
600 goto found_pool; 604 goto found_pool;
601 spin_unlock_bh(&pool->sp_lock); 605 spin_unlock_bh(&pool->sp_lock);
602 } 606 }
603 return NULL; 607 return NULL;
604 } 608 }
605 609
606 found_pool: 610 found_pool:
607 if (!list_empty(&pool->sp_all_threads)) { 611 if (!list_empty(&pool->sp_all_threads)) {
608 struct svc_rqst *rqstp; 612 struct svc_rqst *rqstp;
609 613
610 /* 614 /*
611 * Remove from the pool->sp_all_threads list 615 * Remove from the pool->sp_all_threads list
612 * so we don't try to kill it again. 616 * so we don't try to kill it again.
613 */ 617 */
614 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 618 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
615 list_del_init(&rqstp->rq_all); 619 list_del_init(&rqstp->rq_all);
616 task = rqstp->rq_task; 620 task = rqstp->rq_task;
617 } 621 }
618 spin_unlock_bh(&pool->sp_lock); 622 spin_unlock_bh(&pool->sp_lock);
619 623
620 return task; 624 return task;
621 } 625 }
622 626
623 /* 627 /*
624 * Create or destroy enough new threads to make the number 628 * Create or destroy enough new threads to make the number
625 * of threads the given number. If `pool' is non-NULL, applies 629 * of threads the given number. If `pool' is non-NULL, applies
626 * only to threads in that pool, otherwise round-robins between 630 * only to threads in that pool, otherwise round-robins between
627 * all pools. Must be called with a svc_get() reference and 631 * all pools. Must be called with a svc_get() reference and
628 * the BKL or another lock to protect access to svc_serv fields. 632 * the BKL or another lock to protect access to svc_serv fields.
629 * 633 *
630 * Destroying threads relies on the service threads filling in 634 * Destroying threads relies on the service threads filling in
631 * rqstp->rq_task, which only the nfs ones do. Assumes the serv 635 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
632 * has been created using svc_create_pooled(). 636 * has been created using svc_create_pooled().
633 * 637 *
634 * Based on code that used to be in nfsd_svc() but tweaked 638 * Based on code that used to be in nfsd_svc() but tweaked
635 * to be pool-aware. 639 * to be pool-aware.
636 */ 640 */
637 int 641 int
638 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) 642 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
639 { 643 {
640 struct svc_rqst *rqstp; 644 struct svc_rqst *rqstp;
641 struct task_struct *task; 645 struct task_struct *task;
642 struct svc_pool *chosen_pool; 646 struct svc_pool *chosen_pool;
643 int error = 0; 647 int error = 0;
644 unsigned int state = serv->sv_nrthreads-1; 648 unsigned int state = serv->sv_nrthreads-1;
645 649
646 if (pool == NULL) { 650 if (pool == NULL) {
647 /* The -1 assumes caller has done a svc_get() */ 651 /* The -1 assumes caller has done a svc_get() */
648 nrservs -= (serv->sv_nrthreads-1); 652 nrservs -= (serv->sv_nrthreads-1);
649 } else { 653 } else {
650 spin_lock_bh(&pool->sp_lock); 654 spin_lock_bh(&pool->sp_lock);
651 nrservs -= pool->sp_nrthreads; 655 nrservs -= pool->sp_nrthreads;
652 spin_unlock_bh(&pool->sp_lock); 656 spin_unlock_bh(&pool->sp_lock);
653 } 657 }
654 658
655 /* create new threads */ 659 /* create new threads */
656 while (nrservs > 0) { 660 while (nrservs > 0) {
657 nrservs--; 661 nrservs--;
658 chosen_pool = choose_pool(serv, pool, &state); 662 chosen_pool = choose_pool(serv, pool, &state);
659 663
660 rqstp = svc_prepare_thread(serv, chosen_pool); 664 rqstp = svc_prepare_thread(serv, chosen_pool);
661 if (IS_ERR(rqstp)) { 665 if (IS_ERR(rqstp)) {
662 error = PTR_ERR(rqstp); 666 error = PTR_ERR(rqstp);
663 break; 667 break;
664 } 668 }
665 669
666 __module_get(serv->sv_module); 670 __module_get(serv->sv_module);
667 task = kthread_create(serv->sv_function, rqstp, serv->sv_name); 671 task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
668 if (IS_ERR(task)) { 672 if (IS_ERR(task)) {
669 error = PTR_ERR(task); 673 error = PTR_ERR(task);
670 module_put(serv->sv_module); 674 module_put(serv->sv_module);
671 svc_exit_thread(rqstp); 675 svc_exit_thread(rqstp);
672 break; 676 break;
673 } 677 }
674 678
675 rqstp->rq_task = task; 679 rqstp->rq_task = task;
676 if (serv->sv_nrpools > 1) 680 if (serv->sv_nrpools > 1)
677 svc_pool_map_set_cpumask(task, chosen_pool->sp_id); 681 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
678 682
679 svc_sock_update_bufs(serv); 683 svc_sock_update_bufs(serv);
680 wake_up_process(task); 684 wake_up_process(task);
681 } 685 }
682 /* destroy old threads */ 686 /* destroy old threads */
683 while (nrservs < 0 && 687 while (nrservs < 0 &&
684 (task = choose_victim(serv, pool, &state)) != NULL) { 688 (task = choose_victim(serv, pool, &state)) != NULL) {
685 send_sig(SIGINT, task, 1); 689 send_sig(SIGINT, task, 1);
686 nrservs++; 690 nrservs++;
687 } 691 }
688 692
689 return error; 693 return error;
690 } 694 }
691 EXPORT_SYMBOL_GPL(svc_set_num_threads); 695 EXPORT_SYMBOL_GPL(svc_set_num_threads);
692 696
693 /* 697 /*
694 * Called from a server thread as it's exiting. Caller must hold the BKL or 698 * Called from a server thread as it's exiting. Caller must hold the BKL or
695 * the "service mutex", whichever is appropriate for the service. 699 * the "service mutex", whichever is appropriate for the service.
696 */ 700 */
697 void 701 void
698 svc_exit_thread(struct svc_rqst *rqstp) 702 svc_exit_thread(struct svc_rqst *rqstp)
699 { 703 {
700 struct svc_serv *serv = rqstp->rq_server; 704 struct svc_serv *serv = rqstp->rq_server;
701 struct svc_pool *pool = rqstp->rq_pool; 705 struct svc_pool *pool = rqstp->rq_pool;
702 706
703 svc_release_buffer(rqstp); 707 svc_release_buffer(rqstp);
704 kfree(rqstp->rq_resp); 708 kfree(rqstp->rq_resp);
705 kfree(rqstp->rq_argp); 709 kfree(rqstp->rq_argp);
706 kfree(rqstp->rq_auth_data); 710 kfree(rqstp->rq_auth_data);
707 711
708 spin_lock_bh(&pool->sp_lock); 712 spin_lock_bh(&pool->sp_lock);
709 pool->sp_nrthreads--; 713 pool->sp_nrthreads--;
710 list_del(&rqstp->rq_all); 714 list_del(&rqstp->rq_all);
711 spin_unlock_bh(&pool->sp_lock); 715 spin_unlock_bh(&pool->sp_lock);
712 716
713 kfree(rqstp); 717 kfree(rqstp);
714 718
715 /* Release the server */ 719 /* Release the server */
716 if (serv) 720 if (serv)
717 svc_destroy(serv); 721 svc_destroy(serv);
718 } 722 }
719 EXPORT_SYMBOL_GPL(svc_exit_thread); 723 EXPORT_SYMBOL_GPL(svc_exit_thread);
720 724
721 /* 725 /*
722 * Register an "inet" protocol family netid with the local 726 * Register an "inet" protocol family netid with the local
723 * rpcbind daemon via an rpcbind v4 SET request. 727 * rpcbind daemon via an rpcbind v4 SET request.
724 * 728 *
725 * No netconfig infrastructure is available in the kernel, so 729 * No netconfig infrastructure is available in the kernel, so
726 * we map IP_ protocol numbers to netids by hand. 730 * we map IP_ protocol numbers to netids by hand.
727 * 731 *
728 * Returns zero on success; a negative errno value is returned 732 * Returns zero on success; a negative errno value is returned
729 * if any error occurs. 733 * if any error occurs.
730 */ 734 */
731 static int __svc_rpcb_register4(const u32 program, const u32 version, 735 static int __svc_rpcb_register4(const u32 program, const u32 version,
732 const unsigned short protocol, 736 const unsigned short protocol,
733 const unsigned short port) 737 const unsigned short port)
734 { 738 {
735 const struct sockaddr_in sin = { 739 const struct sockaddr_in sin = {
736 .sin_family = AF_INET, 740 .sin_family = AF_INET,
737 .sin_addr.s_addr = htonl(INADDR_ANY), 741 .sin_addr.s_addr = htonl(INADDR_ANY),
738 .sin_port = htons(port), 742 .sin_port = htons(port),
739 }; 743 };
740 const char *netid; 744 const char *netid;
741 int error; 745 int error;
742 746
743 switch (protocol) { 747 switch (protocol) {
744 case IPPROTO_UDP: 748 case IPPROTO_UDP:
745 netid = RPCBIND_NETID_UDP; 749 netid = RPCBIND_NETID_UDP;
746 break; 750 break;
747 case IPPROTO_TCP: 751 case IPPROTO_TCP:
748 netid = RPCBIND_NETID_TCP; 752 netid = RPCBIND_NETID_TCP;
749 break; 753 break;
750 default: 754 default:
751 return -ENOPROTOOPT; 755 return -ENOPROTOOPT;
752 } 756 }
753 757
754 error = rpcb_v4_register(program, version, 758 error = rpcb_v4_register(program, version,
755 (const struct sockaddr *)&sin, netid); 759 (const struct sockaddr *)&sin, netid);
756 760
757 /* 761 /*
758 * User space didn't support rpcbind v4, so retry this 762 * User space didn't support rpcbind v4, so retry this
759 * registration request with the legacy rpcbind v2 protocol. 763 * registration request with the legacy rpcbind v2 protocol.
760 */ 764 */
761 if (error == -EPROTONOSUPPORT) 765 if (error == -EPROTONOSUPPORT)
762 error = rpcb_register(program, version, protocol, port); 766 error = rpcb_register(program, version, protocol, port);
763 767
764 return error; 768 return error;
765 } 769 }
766 770
767 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 771 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
768 /* 772 /*
769 * Register an "inet6" protocol family netid with the local 773 * Register an "inet6" protocol family netid with the local
770 * rpcbind daemon via an rpcbind v4 SET request. 774 * rpcbind daemon via an rpcbind v4 SET request.
771 * 775 *
772 * No netconfig infrastructure is available in the kernel, so 776 * No netconfig infrastructure is available in the kernel, so
773 * we map IP_ protocol numbers to netids by hand. 777 * we map IP_ protocol numbers to netids by hand.
774 * 778 *
775 * Returns zero on success; a negative errno value is returned 779 * Returns zero on success; a negative errno value is returned
776 * if any error occurs. 780 * if any error occurs.
777 */ 781 */
778 static int __svc_rpcb_register6(const u32 program, const u32 version, 782 static int __svc_rpcb_register6(const u32 program, const u32 version,
779 const unsigned short protocol, 783 const unsigned short protocol,
780 const unsigned short port) 784 const unsigned short port)
781 { 785 {
782 const struct sockaddr_in6 sin6 = { 786 const struct sockaddr_in6 sin6 = {
783 .sin6_family = AF_INET6, 787 .sin6_family = AF_INET6,
784 .sin6_addr = IN6ADDR_ANY_INIT, 788 .sin6_addr = IN6ADDR_ANY_INIT,
785 .sin6_port = htons(port), 789 .sin6_port = htons(port),
786 }; 790 };
787 const char *netid; 791 const char *netid;
788 int error; 792 int error;
789 793
790 switch (protocol) { 794 switch (protocol) {
791 case IPPROTO_UDP: 795 case IPPROTO_UDP:
792 netid = RPCBIND_NETID_UDP6; 796 netid = RPCBIND_NETID_UDP6;
793 break; 797 break;
794 case IPPROTO_TCP: 798 case IPPROTO_TCP:
795 netid = RPCBIND_NETID_TCP6; 799 netid = RPCBIND_NETID_TCP6;
796 break; 800 break;
797 default: 801 default:
798 return -ENOPROTOOPT; 802 return -ENOPROTOOPT;
799 } 803 }
800 804
801 error = rpcb_v4_register(program, version, 805 error = rpcb_v4_register(program, version,
802 (const struct sockaddr *)&sin6, netid); 806 (const struct sockaddr *)&sin6, netid);
803 807
804 /* 808 /*
805 * User space didn't support rpcbind version 4, so we won't 809 * User space didn't support rpcbind version 4, so we won't
806 * use a PF_INET6 listener. 810 * use a PF_INET6 listener.
807 */ 811 */
808 if (error == -EPROTONOSUPPORT) 812 if (error == -EPROTONOSUPPORT)
809 error = -EAFNOSUPPORT; 813 error = -EAFNOSUPPORT;
810 814
811 return error; 815 return error;
812 } 816 }
813 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 817 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
814 818
815 /* 819 /*
816 * Register a kernel RPC service via rpcbind version 4. 820 * Register a kernel RPC service via rpcbind version 4.
817 * 821 *
818 * Returns zero on success; a negative errno value is returned 822 * Returns zero on success; a negative errno value is returned
819 * if any error occurs. 823 * if any error occurs.
820 */ 824 */
821 static int __svc_register(const char *progname, 825 static int __svc_register(const char *progname,
822 const u32 program, const u32 version, 826 const u32 program, const u32 version,
823 const int family, 827 const int family,
824 const unsigned short protocol, 828 const unsigned short protocol,
825 const unsigned short port) 829 const unsigned short port)
826 { 830 {
827 int error = -EAFNOSUPPORT; 831 int error = -EAFNOSUPPORT;
828 832
829 switch (family) { 833 switch (family) {
830 case PF_INET: 834 case PF_INET:
831 error = __svc_rpcb_register4(program, version, 835 error = __svc_rpcb_register4(program, version,
832 protocol, port); 836 protocol, port);
833 break; 837 break;
834 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 838 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
835 case PF_INET6: 839 case PF_INET6:
836 error = __svc_rpcb_register6(program, version, 840 error = __svc_rpcb_register6(program, version,
837 protocol, port); 841 protocol, port);
838 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 842 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
839 } 843 }
840 844
841 if (error < 0) 845 if (error < 0)
842 printk(KERN_WARNING "svc: failed to register %sv%u RPC " 846 printk(KERN_WARNING "svc: failed to register %sv%u RPC "
843 "service (errno %d).\n", progname, version, -error); 847 "service (errno %d).\n", progname, version, -error);
844 return error; 848 return error;
845 } 849 }
846 850
847 /** 851 /**
848 * svc_register - register an RPC service with the local portmapper 852 * svc_register - register an RPC service with the local portmapper
849 * @serv: svc_serv struct for the service to register 853 * @serv: svc_serv struct for the service to register
850 * @family: protocol family of service's listener socket 854 * @family: protocol family of service's listener socket
851 * @proto: transport protocol number to advertise 855 * @proto: transport protocol number to advertise
852 * @port: port to advertise 856 * @port: port to advertise
853 * 857 *
854 * Service is registered for any address in the passed-in protocol family 858 * Service is registered for any address in the passed-in protocol family
855 */ 859 */
856 int svc_register(const struct svc_serv *serv, const int family, 860 int svc_register(const struct svc_serv *serv, const int family,
857 const unsigned short proto, const unsigned short port) 861 const unsigned short proto, const unsigned short port)
858 { 862 {
859 struct svc_program *progp; 863 struct svc_program *progp;
860 unsigned int i; 864 unsigned int i;
861 int error = 0; 865 int error = 0;
862 866
863 BUG_ON(proto == 0 && port == 0); 867 BUG_ON(proto == 0 && port == 0);
864 868
865 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 869 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
866 for (i = 0; i < progp->pg_nvers; i++) { 870 for (i = 0; i < progp->pg_nvers; i++) {
867 if (progp->pg_vers[i] == NULL) 871 if (progp->pg_vers[i] == NULL)
868 continue; 872 continue;
869 873
870 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n", 874 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
871 progp->pg_name, 875 progp->pg_name,
872 i, 876 i,
873 proto == IPPROTO_UDP? "udp" : "tcp", 877 proto == IPPROTO_UDP? "udp" : "tcp",
874 port, 878 port,
875 family, 879 family,
876 progp->pg_vers[i]->vs_hidden? 880 progp->pg_vers[i]->vs_hidden?
877 " (but not telling portmap)" : ""); 881 " (but not telling portmap)" : "");
878 882
879 if (progp->pg_vers[i]->vs_hidden) 883 if (progp->pg_vers[i]->vs_hidden)
880 continue; 884 continue;
881 885
882 error = __svc_register(progp->pg_name, progp->pg_prog, 886 error = __svc_register(progp->pg_name, progp->pg_prog,
883 i, family, proto, port); 887 i, family, proto, port);
884 if (error < 0) 888 if (error < 0)
885 break; 889 break;
886 } 890 }
887 } 891 }
888 892
889 return error; 893 return error;
890 } 894 }
891 895
892 /* 896 /*
893 * If user space is running rpcbind, it should take the v4 UNSET 897 * If user space is running rpcbind, it should take the v4 UNSET
894 * and clear everything for this [program, version]. If user space 898 * and clear everything for this [program, version]. If user space
895 * is running portmap, it will reject the v4 UNSET, but won't have 899 * is running portmap, it will reject the v4 UNSET, but won't have
896 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient 900 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
897 * in this case to clear all existing entries for [program, version]. 901 * in this case to clear all existing entries for [program, version].
898 */ 902 */
899 static void __svc_unregister(const u32 program, const u32 version, 903 static void __svc_unregister(const u32 program, const u32 version,
900 const char *progname) 904 const char *progname)
901 { 905 {
902 int error; 906 int error;
903 907
904 error = rpcb_v4_register(program, version, NULL, ""); 908 error = rpcb_v4_register(program, version, NULL, "");
905 909
906 /* 910 /*
907 * User space didn't support rpcbind v4, so retry this 911 * User space didn't support rpcbind v4, so retry this
908 * request with the legacy rpcbind v2 protocol. 912 * request with the legacy rpcbind v2 protocol.
909 */ 913 */
910 if (error == -EPROTONOSUPPORT) 914 if (error == -EPROTONOSUPPORT)
911 error = rpcb_register(program, version, 0, 0); 915 error = rpcb_register(program, version, 0, 0);
912 916
913 dprintk("svc: %s(%sv%u), error %d\n", 917 dprintk("svc: %s(%sv%u), error %d\n",
914 __func__, progname, version, error); 918 __func__, progname, version, error);
915 } 919 }
916 920
917 /* 921 /*
918 * All netids, bind addresses and ports registered for [program, version] 922 * All netids, bind addresses and ports registered for [program, version]
919 * are removed from the local rpcbind database (if the service is not 923 * are removed from the local rpcbind database (if the service is not
920 * hidden) to make way for a new instance of the service. 924 * hidden) to make way for a new instance of the service.
921 * 925 *
922 * The result of unregistration is reported via dprintk for those who want 926 * The result of unregistration is reported via dprintk for those who want
923 * verification of the result, but is otherwise not important. 927 * verification of the result, but is otherwise not important.
924 */ 928 */
925 static void svc_unregister(const struct svc_serv *serv) 929 static void svc_unregister(const struct svc_serv *serv)
926 { 930 {
927 struct svc_program *progp; 931 struct svc_program *progp;
928 unsigned long flags; 932 unsigned long flags;
929 unsigned int i; 933 unsigned int i;
930 934
931 clear_thread_flag(TIF_SIGPENDING); 935 clear_thread_flag(TIF_SIGPENDING);
932 936
933 for (progp = serv->sv_program; progp; progp = progp->pg_next) { 937 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
934 for (i = 0; i < progp->pg_nvers; i++) { 938 for (i = 0; i < progp->pg_nvers; i++) {
935 if (progp->pg_vers[i] == NULL) 939 if (progp->pg_vers[i] == NULL)
936 continue; 940 continue;
937 if (progp->pg_vers[i]->vs_hidden) 941 if (progp->pg_vers[i]->vs_hidden)
938 continue; 942 continue;
939 943
940 __svc_unregister(progp->pg_prog, i, progp->pg_name); 944 __svc_unregister(progp->pg_prog, i, progp->pg_name);
941 } 945 }
942 } 946 }
943 947
944 spin_lock_irqsave(&current->sighand->siglock, flags); 948 spin_lock_irqsave(&current->sighand->siglock, flags);
945 recalc_sigpending(); 949 recalc_sigpending();
946 spin_unlock_irqrestore(&current->sighand->siglock, flags); 950 spin_unlock_irqrestore(&current->sighand->siglock, flags);
947 } 951 }
948 952
949 /* 953 /*
950 * Printk the given error with the address of the client that caused it. 954 * Printk the given error with the address of the client that caused it.
951 */ 955 */
952 static int 956 static int
953 __attribute__ ((format (printf, 2, 3))) 957 __attribute__ ((format (printf, 2, 3)))
954 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 958 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
955 { 959 {
956 va_list args; 960 va_list args;
957 int r; 961 int r;
958 char buf[RPC_MAX_ADDRBUFLEN]; 962 char buf[RPC_MAX_ADDRBUFLEN];
959 963
960 if (!net_ratelimit()) 964 if (!net_ratelimit())
961 return 0; 965 return 0;
962 966
963 printk(KERN_WARNING "svc: %s: ", 967 printk(KERN_WARNING "svc: %s: ",
964 svc_print_addr(rqstp, buf, sizeof(buf))); 968 svc_print_addr(rqstp, buf, sizeof(buf)));
965 969
966 va_start(args, fmt); 970 va_start(args, fmt);
967 r = vprintk(fmt, args); 971 r = vprintk(fmt, args);
968 va_end(args); 972 va_end(args);
969 973
970 return r; 974 return r;
971 } 975 }
972 976
973 /* 977 /*
974 * Common routine for processing the RPC request. 978 * Common routine for processing the RPC request.
975 */ 979 */
976 static int 980 static int
977 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) 981 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
978 { 982 {
979 struct svc_program *progp; 983 struct svc_program *progp;
980 struct svc_version *versp = NULL; /* compiler food */ 984 struct svc_version *versp = NULL; /* compiler food */
981 struct svc_procedure *procp = NULL; 985 struct svc_procedure *procp = NULL;
982 struct svc_serv *serv = rqstp->rq_server; 986 struct svc_serv *serv = rqstp->rq_server;
983 kxdrproc_t xdr; 987 kxdrproc_t xdr;
984 __be32 *statp; 988 __be32 *statp;
985 u32 prog, vers, proc; 989 u32 prog, vers, proc;
986 __be32 auth_stat, rpc_stat; 990 __be32 auth_stat, rpc_stat;
987 int auth_res; 991 int auth_res;
988 __be32 *reply_statp; 992 __be32 *reply_statp;
989 993
990 rpc_stat = rpc_success; 994 rpc_stat = rpc_success;
991 995
992 if (argv->iov_len < 6*4) 996 if (argv->iov_len < 6*4)
993 goto err_short_len; 997 goto err_short_len;
994 998
995 /* Will be turned off only in gss privacy case: */ 999 /* Will be turned off only in gss privacy case: */
996 rqstp->rq_splice_ok = 1; 1000 rqstp->rq_splice_ok = 1;
997 /* Will be turned off only when NFSv4 Sessions are used */ 1001 /* Will be turned off only when NFSv4 Sessions are used */
998 rqstp->rq_usedeferral = 1; 1002 rqstp->rq_usedeferral = 1;
999 1003
1000 /* Setup reply header */ 1004 /* Setup reply header */
1001 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1005 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1002 1006
1003 svc_putu32(resv, rqstp->rq_xid); 1007 svc_putu32(resv, rqstp->rq_xid);
1004 1008
1005 vers = svc_getnl(argv); 1009 vers = svc_getnl(argv);
1006 1010
1007 /* First words of reply: */ 1011 /* First words of reply: */
1008 svc_putnl(resv, 1); /* REPLY */ 1012 svc_putnl(resv, 1); /* REPLY */
1009 1013
1010 if (vers != 2) /* RPC version number */ 1014 if (vers != 2) /* RPC version number */
1011 goto err_bad_rpc; 1015 goto err_bad_rpc;
1012 1016
1013 /* Save position in case we later decide to reject: */ 1017 /* Save position in case we later decide to reject: */
1014 reply_statp = resv->iov_base + resv->iov_len; 1018 reply_statp = resv->iov_base + resv->iov_len;
1015 1019
1016 svc_putnl(resv, 0); /* ACCEPT */ 1020 svc_putnl(resv, 0); /* ACCEPT */
1017 1021
1018 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */ 1022 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1019 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */ 1023 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
1020 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */ 1024 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
1021 1025
1022 progp = serv->sv_program; 1026 progp = serv->sv_program;
1023 1027
1024 for (progp = serv->sv_program; progp; progp = progp->pg_next) 1028 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1025 if (prog == progp->pg_prog) 1029 if (prog == progp->pg_prog)
1026 break; 1030 break;
1027 1031
1028 /* 1032 /*
1029 * Decode auth data, and add verifier to reply buffer. 1033 * Decode auth data, and add verifier to reply buffer.
1030 * We do this before anything else in order to get a decent 1034 * We do this before anything else in order to get a decent
1031 * auth verifier. 1035 * auth verifier.
1032 */ 1036 */
1033 auth_res = svc_authenticate(rqstp, &auth_stat); 1037 auth_res = svc_authenticate(rqstp, &auth_stat);
1034 /* Also give the program a chance to reject this call: */ 1038 /* Also give the program a chance to reject this call: */
1035 if (auth_res == SVC_OK && progp) { 1039 if (auth_res == SVC_OK && progp) {
1036 auth_stat = rpc_autherr_badcred; 1040 auth_stat = rpc_autherr_badcred;
1037 auth_res = progp->pg_authenticate(rqstp); 1041 auth_res = progp->pg_authenticate(rqstp);
1038 } 1042 }
1039 switch (auth_res) { 1043 switch (auth_res) {
1040 case SVC_OK: 1044 case SVC_OK:
1041 break; 1045 break;
1042 case SVC_GARBAGE: 1046 case SVC_GARBAGE:
1043 goto err_garbage; 1047 goto err_garbage;
1044 case SVC_SYSERR: 1048 case SVC_SYSERR:
1045 rpc_stat = rpc_system_err; 1049 rpc_stat = rpc_system_err;
1046 goto err_bad; 1050 goto err_bad;
1047 case SVC_DENIED: 1051 case SVC_DENIED:
1048 goto err_bad_auth; 1052 goto err_bad_auth;
1049 case SVC_DROP: 1053 case SVC_DROP:
1050 goto dropit; 1054 goto dropit;
1051 case SVC_COMPLETE: 1055 case SVC_COMPLETE:
1052 goto sendit; 1056 goto sendit;
1053 } 1057 }
1054 1058
1055 if (progp == NULL) 1059 if (progp == NULL)
1056 goto err_bad_prog; 1060 goto err_bad_prog;
1057 1061
1058 if (vers >= progp->pg_nvers || 1062 if (vers >= progp->pg_nvers ||
1059 !(versp = progp->pg_vers[vers])) 1063 !(versp = progp->pg_vers[vers]))
1060 goto err_bad_vers; 1064 goto err_bad_vers;
1061 1065
1062 procp = versp->vs_proc + proc; 1066 procp = versp->vs_proc + proc;
1063 if (proc >= versp->vs_nproc || !procp->pc_func) 1067 if (proc >= versp->vs_nproc || !procp->pc_func)
1064 goto err_bad_proc; 1068 goto err_bad_proc;
1065 rqstp->rq_procinfo = procp; 1069 rqstp->rq_procinfo = procp;
1066 1070
1067 /* Syntactic check complete */ 1071 /* Syntactic check complete */
1068 serv->sv_stats->rpccnt++; 1072 serv->sv_stats->rpccnt++;
1069 1073
1070 /* Build the reply header. */ 1074 /* Build the reply header. */
1071 statp = resv->iov_base +resv->iov_len; 1075 statp = resv->iov_base +resv->iov_len;
1072 svc_putnl(resv, RPC_SUCCESS); 1076 svc_putnl(resv, RPC_SUCCESS);
1073 1077
1074 /* Bump per-procedure stats counter */ 1078 /* Bump per-procedure stats counter */
1075 procp->pc_count++; 1079 procp->pc_count++;
1076 1080
1077 /* Initialize storage for argp and resp */ 1081 /* Initialize storage for argp and resp */
1078 memset(rqstp->rq_argp, 0, procp->pc_argsize); 1082 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1079 memset(rqstp->rq_resp, 0, procp->pc_ressize); 1083 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1080 1084
1081 /* un-reserve some of the out-queue now that we have a 1085 /* un-reserve some of the out-queue now that we have a
1082 * better idea of reply size 1086 * better idea of reply size
1083 */ 1087 */
1084 if (procp->pc_xdrressize) 1088 if (procp->pc_xdrressize)
1085 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); 1089 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1086 1090
1087 /* Call the function that processes the request. */ 1091 /* Call the function that processes the request. */
1088 if (!versp->vs_dispatch) { 1092 if (!versp->vs_dispatch) {
1089 /* Decode arguments */ 1093 /* Decode arguments */
1090 xdr = procp->pc_decode; 1094 xdr = procp->pc_decode;
1091 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp)) 1095 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1092 goto err_garbage; 1096 goto err_garbage;
1093 1097
1094 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 1098 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1095 1099
1096 /* Encode reply */ 1100 /* Encode reply */
1097 if (*statp == rpc_drop_reply) { 1101 if (*statp == rpc_drop_reply) {
1098 if (procp->pc_release) 1102 if (procp->pc_release)
1099 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1103 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1100 goto dropit; 1104 goto dropit;
1101 } 1105 }
1102 if (*statp == rpc_success && (xdr = procp->pc_encode) 1106 if (*statp == rpc_success && (xdr = procp->pc_encode)
1103 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { 1107 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1104 dprintk("svc: failed to encode reply\n"); 1108 dprintk("svc: failed to encode reply\n");
1105 /* serv->sv_stats->rpcsystemerr++; */ 1109 /* serv->sv_stats->rpcsystemerr++; */
1106 *statp = rpc_system_err; 1110 *statp = rpc_system_err;
1107 } 1111 }
1108 } else { 1112 } else {
1109 dprintk("svc: calling dispatcher\n"); 1113 dprintk("svc: calling dispatcher\n");
1110 if (!versp->vs_dispatch(rqstp, statp)) { 1114 if (!versp->vs_dispatch(rqstp, statp)) {
1111 /* Release reply info */ 1115 /* Release reply info */
1112 if (procp->pc_release) 1116 if (procp->pc_release)
1113 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1117 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1114 goto dropit; 1118 goto dropit;
1115 } 1119 }
1116 } 1120 }
1117 1121
1118 /* Check RPC status result */ 1122 /* Check RPC status result */
1119 if (*statp != rpc_success) 1123 if (*statp != rpc_success)
1120 resv->iov_len = ((void*)statp) - resv->iov_base + 4; 1124 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1121 1125
1122 /* Release reply info */ 1126 /* Release reply info */
1123 if (procp->pc_release) 1127 if (procp->pc_release)
1124 procp->pc_release(rqstp, NULL, rqstp->rq_resp); 1128 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1125 1129
1126 if (procp->pc_encode == NULL) 1130 if (procp->pc_encode == NULL)
1127 goto dropit; 1131 goto dropit;
1128 1132
1129 sendit: 1133 sendit:
1130 if (svc_authorise(rqstp)) 1134 if (svc_authorise(rqstp))
1131 goto dropit; 1135 goto dropit;
1132 return 1; /* Caller can now send it */ 1136 return 1; /* Caller can now send it */
1133 1137
1134 dropit: 1138 dropit:
1135 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1139 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1136 dprintk("svc: svc_process dropit\n"); 1140 dprintk("svc: svc_process dropit\n");
1137 svc_drop(rqstp); 1141 svc_drop(rqstp);
1138 return 0; 1142 return 0;
1139 1143
1140 err_short_len: 1144 err_short_len:
1141 svc_printk(rqstp, "short len %Zd, dropping request\n", 1145 svc_printk(rqstp, "short len %Zd, dropping request\n",
1142 argv->iov_len); 1146 argv->iov_len);
1143 1147
1144 goto dropit; /* drop request */ 1148 goto dropit; /* drop request */
1145 1149
1146 err_bad_rpc: 1150 err_bad_rpc:
1147 serv->sv_stats->rpcbadfmt++; 1151 serv->sv_stats->rpcbadfmt++;
1148 svc_putnl(resv, 1); /* REJECT */ 1152 svc_putnl(resv, 1); /* REJECT */
1149 svc_putnl(resv, 0); /* RPC_MISMATCH */ 1153 svc_putnl(resv, 0); /* RPC_MISMATCH */
1150 svc_putnl(resv, 2); /* Only RPCv2 supported */ 1154 svc_putnl(resv, 2); /* Only RPCv2 supported */
1151 svc_putnl(resv, 2); 1155 svc_putnl(resv, 2);
1152 goto sendit; 1156 goto sendit;
1153 1157
1154 err_bad_auth: 1158 err_bad_auth:
1155 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 1159 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1156 serv->sv_stats->rpcbadauth++; 1160 serv->sv_stats->rpcbadauth++;
1157 /* Restore write pointer to location of accept status: */ 1161 /* Restore write pointer to location of accept status: */
1158 xdr_ressize_check(rqstp, reply_statp); 1162 xdr_ressize_check(rqstp, reply_statp);
1159 svc_putnl(resv, 1); /* REJECT */ 1163 svc_putnl(resv, 1); /* REJECT */
1160 svc_putnl(resv, 1); /* AUTH_ERROR */ 1164 svc_putnl(resv, 1); /* AUTH_ERROR */
1161 svc_putnl(resv, ntohl(auth_stat)); /* status */ 1165 svc_putnl(resv, ntohl(auth_stat)); /* status */
1162 goto sendit; 1166 goto sendit;
1163 1167
1164 err_bad_prog: 1168 err_bad_prog:
1165 dprintk("svc: unknown program %d\n", prog); 1169 dprintk("svc: unknown program %d\n", prog);
1166 serv->sv_stats->rpcbadfmt++; 1170 serv->sv_stats->rpcbadfmt++;
1167 svc_putnl(resv, RPC_PROG_UNAVAIL); 1171 svc_putnl(resv, RPC_PROG_UNAVAIL);
1168 goto sendit; 1172 goto sendit;
1169 1173
1170 err_bad_vers: 1174 err_bad_vers:
1171 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", 1175 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1172 vers, prog, progp->pg_name); 1176 vers, prog, progp->pg_name);
1173 1177
1174 serv->sv_stats->rpcbadfmt++; 1178 serv->sv_stats->rpcbadfmt++;
1175 svc_putnl(resv, RPC_PROG_MISMATCH); 1179 svc_putnl(resv, RPC_PROG_MISMATCH);
1176 svc_putnl(resv, progp->pg_lovers); 1180 svc_putnl(resv, progp->pg_lovers);
1177 svc_putnl(resv, progp->pg_hivers); 1181 svc_putnl(resv, progp->pg_hivers);
1178 goto sendit; 1182 goto sendit;
1179 1183
1180 err_bad_proc: 1184 err_bad_proc:
1181 svc_printk(rqstp, "unknown procedure (%d)\n", proc); 1185 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1182 1186
1183 serv->sv_stats->rpcbadfmt++; 1187 serv->sv_stats->rpcbadfmt++;
1184 svc_putnl(resv, RPC_PROC_UNAVAIL); 1188 svc_putnl(resv, RPC_PROC_UNAVAIL);
1185 goto sendit; 1189 goto sendit;
1186 1190
1187 err_garbage: 1191 err_garbage:
1188 svc_printk(rqstp, "failed to decode args\n"); 1192 svc_printk(rqstp, "failed to decode args\n");
1189 1193
1190 rpc_stat = rpc_garbage_args; 1194 rpc_stat = rpc_garbage_args;
1191 err_bad: 1195 err_bad:
1192 serv->sv_stats->rpcbadfmt++; 1196 serv->sv_stats->rpcbadfmt++;
1193 svc_putnl(resv, ntohl(rpc_stat)); 1197 svc_putnl(resv, ntohl(rpc_stat));
1194 goto sendit; 1198 goto sendit;
1195 } 1199 }
1196 EXPORT_SYMBOL_GPL(svc_process); 1200 EXPORT_SYMBOL_GPL(svc_process);
1197 1201
1198 /* 1202 /*
1199 * Process the RPC request. 1203 * Process the RPC request.
1200 */ 1204 */
1201 int 1205 int
1202 svc_process(struct svc_rqst *rqstp) 1206 svc_process(struct svc_rqst *rqstp)
1203 { 1207 {
1204 struct kvec *argv = &rqstp->rq_arg.head[0]; 1208 struct kvec *argv = &rqstp->rq_arg.head[0];
1205 struct kvec *resv = &rqstp->rq_res.head[0]; 1209 struct kvec *resv = &rqstp->rq_res.head[0];
1206 struct svc_serv *serv = rqstp->rq_server; 1210 struct svc_serv *serv = rqstp->rq_server;
1207 u32 dir; 1211 u32 dir;
1208 int error; 1212 int error;
1209 1213
1210 /* 1214 /*
1211 * Setup response xdr_buf. 1215 * Setup response xdr_buf.
1212 * Initially it has just one page 1216 * Initially it has just one page
1213 */ 1217 */
1214 rqstp->rq_resused = 1; 1218 rqstp->rq_resused = 1;
1215 resv->iov_base = page_address(rqstp->rq_respages[0]); 1219 resv->iov_base = page_address(rqstp->rq_respages[0]);
1216 resv->iov_len = 0; 1220 resv->iov_len = 0;
1217 rqstp->rq_res.pages = rqstp->rq_respages + 1; 1221 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1218 rqstp->rq_res.len = 0; 1222 rqstp->rq_res.len = 0;
1219 rqstp->rq_res.page_base = 0; 1223 rqstp->rq_res.page_base = 0;
1220 rqstp->rq_res.page_len = 0; 1224 rqstp->rq_res.page_len = 0;
1221 rqstp->rq_res.buflen = PAGE_SIZE; 1225 rqstp->rq_res.buflen = PAGE_SIZE;
1222 rqstp->rq_res.tail[0].iov_base = NULL; 1226 rqstp->rq_res.tail[0].iov_base = NULL;
1223 rqstp->rq_res.tail[0].iov_len = 0; 1227 rqstp->rq_res.tail[0].iov_len = 0;
1224 1228
1225 rqstp->rq_xid = svc_getu32(argv); 1229 rqstp->rq_xid = svc_getu32(argv);
1226 1230
1227 dir = svc_getnl(argv); 1231 dir = svc_getnl(argv);
1228 if (dir != 0) { 1232 if (dir != 0) {
1229 /* direction != CALL */ 1233 /* direction != CALL */
1230 svc_printk(rqstp, "bad direction %d, dropping request\n", dir); 1234 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1231 serv->sv_stats->rpcbadfmt++; 1235 serv->sv_stats->rpcbadfmt++;
1232 svc_drop(rqstp); 1236 svc_drop(rqstp);
1233 return 0; 1237 return 0;
1234 } 1238 }
1235 1239
1236 error = svc_process_common(rqstp, argv, resv); 1240 error = svc_process_common(rqstp, argv, resv);
1237 if (error <= 0) 1241 if (error <= 0)
1238 return error; 1242 return error;
1239 1243
1240 return svc_send(rqstp); 1244 return svc_send(rqstp);
1241 } 1245 }
1242 1246
1243 #if defined(CONFIG_NFS_V4_1) 1247 #if defined(CONFIG_NFS_V4_1)
1244 /* 1248 /*
1245 * Process a backchannel RPC request that arrived over an existing 1249 * Process a backchannel RPC request that arrived over an existing
1246 * outbound connection 1250 * outbound connection
1247 */ 1251 */
1248 int 1252 int
1249 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, 1253 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1250 struct svc_rqst *rqstp) 1254 struct svc_rqst *rqstp)
1251 { 1255 {
1252 struct kvec *argv = &rqstp->rq_arg.head[0]; 1256 struct kvec *argv = &rqstp->rq_arg.head[0];
1253 struct kvec *resv = &rqstp->rq_res.head[0]; 1257 struct kvec *resv = &rqstp->rq_res.head[0];
1254 int error; 1258 int error;
1255 1259
1256 /* Build the svc_rqst used by the common processing routine */ 1260 /* Build the svc_rqst used by the common processing routine */
1257 rqstp->rq_xid = req->rq_xid; 1261 rqstp->rq_xid = req->rq_xid;
1258 rqstp->rq_prot = req->rq_xprt->prot; 1262 rqstp->rq_prot = req->rq_xprt->prot;
1259 rqstp->rq_server = serv; 1263 rqstp->rq_server = serv;
1260 1264
1261 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); 1265 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1262 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1266 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1263 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1267 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1264 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1268 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1265 1269
1266 /* reset result send buffer "put" position */ 1270 /* reset result send buffer "put" position */
1267 resv->iov_len = 0; 1271 resv->iov_len = 0;
1268 1272
1269 if (rqstp->rq_prot != IPPROTO_TCP) { 1273 if (rqstp->rq_prot != IPPROTO_TCP) {
1270 printk(KERN_ERR "No support for Non-TCP transports!\n"); 1274 printk(KERN_ERR "No support for Non-TCP transports!\n");
1271 BUG(); 1275 BUG();
1272 } 1276 }
1273 1277
1274 /* 1278 /*
1275 * Skip the next two words because they've already been 1279 * Skip the next two words because they've already been
1276 * processed in the trasport 1280 * processed in the trasport
1277 */ 1281 */
1278 svc_getu32(argv); /* XID */ 1282 svc_getu32(argv); /* XID */
1279 svc_getnl(argv); /* CALLDIR */ 1283 svc_getnl(argv); /* CALLDIR */
1280 1284
1281 error = svc_process_common(rqstp, argv, resv); 1285 error = svc_process_common(rqstp, argv, resv);
1282 if (error <= 0) 1286 if (error <= 0)
1283 return error; 1287 return error;
1284 1288
1285 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1289 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1286 return bc_send(req); 1290 return bc_send(req);
1287 } 1291 }
1288 EXPORT_SYMBOL(bc_svc_process); 1292 EXPORT_SYMBOL(bc_svc_process);
1289 #endif /* CONFIG_NFS_V4_1 */ 1293 #endif /* CONFIG_NFS_V4_1 */
1290 1294
1291 /* 1295 /*
1292 * Return (transport-specific) limit on the rpc payload. 1296 * Return (transport-specific) limit on the rpc payload.
1293 */ 1297 */
1294 u32 svc_max_payload(const struct svc_rqst *rqstp) 1298 u32 svc_max_payload(const struct svc_rqst *rqstp)
1295 { 1299 {
1296 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; 1300 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1297 1301
1298 if (rqstp->rq_server->sv_max_payload < max) 1302 if (rqstp->rq_server->sv_max_payload < max)
1299 max = rqstp->rq_server->sv_max_payload; 1303 max = rqstp->rq_server->sv_max_payload;
1300 return max; 1304 return max;
1301 } 1305 }
1302 EXPORT_SYMBOL_GPL(svc_max_payload); 1306 EXPORT_SYMBOL_GPL(svc_max_payload);
1303 1307