Commit 58f9612c6ea858f532021a0ce42ec53cb0a493b3

Authored by Trond Myklebust
1 parent d9b6cd9460

SUNRPC: Move remaining RPC client related task initialisation into clnt.c

Now that rpc_run_task() is the sole entry point for RPC calls, we can move
the remaining rpc_client-related initialisation of struct rpc_task from
sched.c into clnt.c.

Also move rpc_killall_tasks() into the same file, since that too is
relative to the rpc_clnt.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>

Showing 3 changed files with 89 additions and 73 deletions Inline Diff

include/linux/sunrpc/clnt.h
1 /* 1 /*
2 * linux/include/linux/sunrpc/clnt.h 2 * linux/include/linux/sunrpc/clnt.h
3 * 3 *
4 * Declarations for the high-level RPC client interface 4 * Declarations for the high-level RPC client interface
5 * 5 *
6 * Copyright (C) 1995, 1996, Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996, Olaf Kirch <okir@monad.swb.de>
7 */ 7 */
8 8
9 #ifndef _LINUX_SUNRPC_CLNT_H 9 #ifndef _LINUX_SUNRPC_CLNT_H
10 #define _LINUX_SUNRPC_CLNT_H 10 #define _LINUX_SUNRPC_CLNT_H
11 11
12 #include <linux/socket.h> 12 #include <linux/socket.h>
13 #include <linux/in.h> 13 #include <linux/in.h>
14 #include <linux/in6.h> 14 #include <linux/in6.h>
15 15
16 #include <linux/sunrpc/msg_prot.h> 16 #include <linux/sunrpc/msg_prot.h>
17 #include <linux/sunrpc/sched.h> 17 #include <linux/sunrpc/sched.h>
18 #include <linux/sunrpc/xprt.h> 18 #include <linux/sunrpc/xprt.h>
19 #include <linux/sunrpc/auth.h> 19 #include <linux/sunrpc/auth.h>
20 #include <linux/sunrpc/stats.h> 20 #include <linux/sunrpc/stats.h>
21 #include <linux/sunrpc/xdr.h> 21 #include <linux/sunrpc/xdr.h>
22 #include <linux/sunrpc/timer.h> 22 #include <linux/sunrpc/timer.h>
23 #include <asm/signal.h> 23 #include <asm/signal.h>
24 #include <linux/path.h> 24 #include <linux/path.h>
25 #include <net/ipv6.h> 25 #include <net/ipv6.h>
26 26
27 struct rpc_inode; 27 struct rpc_inode;
28 28
29 /* 29 /*
30 * The high-level client handle 30 * The high-level client handle
31 */ 31 */
32 struct rpc_clnt { 32 struct rpc_clnt {
33 struct kref cl_kref; /* Number of references */ 33 struct kref cl_kref; /* Number of references */
34 struct list_head cl_clients; /* Global list of clients */ 34 struct list_head cl_clients; /* Global list of clients */
35 struct list_head cl_tasks; /* List of tasks */ 35 struct list_head cl_tasks; /* List of tasks */
36 spinlock_t cl_lock; /* spinlock */ 36 spinlock_t cl_lock; /* spinlock */
37 struct rpc_xprt * cl_xprt; /* transport */ 37 struct rpc_xprt * cl_xprt; /* transport */
38 struct rpc_procinfo * cl_procinfo; /* procedure info */ 38 struct rpc_procinfo * cl_procinfo; /* procedure info */
39 u32 cl_prog, /* RPC program number */ 39 u32 cl_prog, /* RPC program number */
40 cl_vers, /* RPC version number */ 40 cl_vers, /* RPC version number */
41 cl_maxproc; /* max procedure number */ 41 cl_maxproc; /* max procedure number */
42 42
43 char * cl_server; /* server machine name */ 43 char * cl_server; /* server machine name */
44 char * cl_protname; /* protocol name */ 44 char * cl_protname; /* protocol name */
45 struct rpc_auth * cl_auth; /* authenticator */ 45 struct rpc_auth * cl_auth; /* authenticator */
46 struct rpc_stat * cl_stats; /* per-program statistics */ 46 struct rpc_stat * cl_stats; /* per-program statistics */
47 struct rpc_iostats * cl_metrics; /* per-client statistics */ 47 struct rpc_iostats * cl_metrics; /* per-client statistics */
48 48
49 unsigned int cl_softrtry : 1,/* soft timeouts */ 49 unsigned int cl_softrtry : 1,/* soft timeouts */
50 cl_discrtry : 1,/* disconnect before retry */ 50 cl_discrtry : 1,/* disconnect before retry */
51 cl_autobind : 1,/* use getport() */ 51 cl_autobind : 1,/* use getport() */
52 cl_chatty : 1;/* be verbose */ 52 cl_chatty : 1;/* be verbose */
53 53
54 struct rpc_rtt * cl_rtt; /* RTO estimator data */ 54 struct rpc_rtt * cl_rtt; /* RTO estimator data */
55 const struct rpc_timeout *cl_timeout; /* Timeout strategy */ 55 const struct rpc_timeout *cl_timeout; /* Timeout strategy */
56 56
57 int cl_nodelen; /* nodename length */ 57 int cl_nodelen; /* nodename length */
58 char cl_nodename[UNX_MAXNODENAME]; 58 char cl_nodename[UNX_MAXNODENAME];
59 struct path cl_path; 59 struct path cl_path;
60 struct rpc_clnt * cl_parent; /* Points to parent of clones */ 60 struct rpc_clnt * cl_parent; /* Points to parent of clones */
61 struct rpc_rtt cl_rtt_default; 61 struct rpc_rtt cl_rtt_default;
62 struct rpc_timeout cl_timeout_default; 62 struct rpc_timeout cl_timeout_default;
63 struct rpc_program * cl_program; 63 struct rpc_program * cl_program;
64 char cl_inline_name[32]; 64 char cl_inline_name[32];
65 char *cl_principal; /* target to authenticate to */ 65 char *cl_principal; /* target to authenticate to */
66 }; 66 };
67 67
68 /* 68 /*
69 * General RPC program info 69 * General RPC program info
70 */ 70 */
71 #define RPC_MAXVERSION 4 71 #define RPC_MAXVERSION 4
72 struct rpc_program { 72 struct rpc_program {
73 char * name; /* protocol name */ 73 char * name; /* protocol name */
74 u32 number; /* program number */ 74 u32 number; /* program number */
75 unsigned int nrvers; /* number of versions */ 75 unsigned int nrvers; /* number of versions */
76 struct rpc_version ** version; /* version array */ 76 struct rpc_version ** version; /* version array */
77 struct rpc_stat * stats; /* statistics */ 77 struct rpc_stat * stats; /* statistics */
78 char * pipe_dir_name; /* path to rpc_pipefs dir */ 78 char * pipe_dir_name; /* path to rpc_pipefs dir */
79 }; 79 };
80 80
81 struct rpc_version { 81 struct rpc_version {
82 u32 number; /* version number */ 82 u32 number; /* version number */
83 unsigned int nrprocs; /* number of procs */ 83 unsigned int nrprocs; /* number of procs */
84 struct rpc_procinfo * procs; /* procedure array */ 84 struct rpc_procinfo * procs; /* procedure array */
85 }; 85 };
86 86
87 /* 87 /*
88 * Procedure information 88 * Procedure information
89 */ 89 */
90 struct rpc_procinfo { 90 struct rpc_procinfo {
91 u32 p_proc; /* RPC procedure number */ 91 u32 p_proc; /* RPC procedure number */
92 kxdrproc_t p_encode; /* XDR encode function */ 92 kxdrproc_t p_encode; /* XDR encode function */
93 kxdrproc_t p_decode; /* XDR decode function */ 93 kxdrproc_t p_decode; /* XDR decode function */
94 unsigned int p_arglen; /* argument hdr length (u32) */ 94 unsigned int p_arglen; /* argument hdr length (u32) */
95 unsigned int p_replen; /* reply hdr length (u32) */ 95 unsigned int p_replen; /* reply hdr length (u32) */
96 unsigned int p_count; /* call count */ 96 unsigned int p_count; /* call count */
97 unsigned int p_timer; /* Which RTT timer to use */ 97 unsigned int p_timer; /* Which RTT timer to use */
98 u32 p_statidx; /* Which procedure to account */ 98 u32 p_statidx; /* Which procedure to account */
99 char * p_name; /* name of procedure */ 99 char * p_name; /* name of procedure */
100 }; 100 };
101 101
102 #ifdef __KERNEL__ 102 #ifdef __KERNEL__
103 103
104 struct rpc_create_args { 104 struct rpc_create_args {
105 int protocol; 105 int protocol;
106 struct sockaddr *address; 106 struct sockaddr *address;
107 size_t addrsize; 107 size_t addrsize;
108 struct sockaddr *saddress; 108 struct sockaddr *saddress;
109 const struct rpc_timeout *timeout; 109 const struct rpc_timeout *timeout;
110 char *servername; 110 char *servername;
111 struct rpc_program *program; 111 struct rpc_program *program;
112 u32 prognumber; /* overrides program->number */ 112 u32 prognumber; /* overrides program->number */
113 u32 version; 113 u32 version;
114 rpc_authflavor_t authflavor; 114 rpc_authflavor_t authflavor;
115 unsigned long flags; 115 unsigned long flags;
116 char *client_name; 116 char *client_name;
117 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 117 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
118 }; 118 };
119 119
120 /* Values for "flags" field */ 120 /* Values for "flags" field */
121 #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0) 121 #define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
122 #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2) 122 #define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
123 #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3) 123 #define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
124 #define RPC_CLNT_CREATE_NOPING (1UL << 4) 124 #define RPC_CLNT_CREATE_NOPING (1UL << 4)
125 #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) 125 #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5)
126 #define RPC_CLNT_CREATE_QUIET (1UL << 6) 126 #define RPC_CLNT_CREATE_QUIET (1UL << 6)
127 127
128 struct rpc_clnt *rpc_create(struct rpc_create_args *args); 128 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
129 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 129 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
130 struct rpc_program *, u32); 130 struct rpc_program *, u32);
131 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 131 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
132 void rpc_shutdown_client(struct rpc_clnt *); 132 void rpc_shutdown_client(struct rpc_clnt *);
133 void rpc_release_client(struct rpc_clnt *); 133 void rpc_release_client(struct rpc_clnt *);
134 void rpc_task_release_client(struct rpc_task *);
134 135
135 int rpcb_register(u32, u32, int, unsigned short); 136 int rpcb_register(u32, u32, int, unsigned short);
136 int rpcb_v4_register(const u32 program, const u32 version, 137 int rpcb_v4_register(const u32 program, const u32 version,
137 const struct sockaddr *address, 138 const struct sockaddr *address,
138 const char *netid); 139 const char *netid);
139 int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); 140 int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
140 void rpcb_getport_async(struct rpc_task *); 141 void rpcb_getport_async(struct rpc_task *);
141 142
142 void rpc_call_start(struct rpc_task *); 143 void rpc_call_start(struct rpc_task *);
143 int rpc_call_async(struct rpc_clnt *clnt, 144 int rpc_call_async(struct rpc_clnt *clnt,
144 const struct rpc_message *msg, int flags, 145 const struct rpc_message *msg, int flags,
145 const struct rpc_call_ops *tk_ops, 146 const struct rpc_call_ops *tk_ops,
146 void *calldata); 147 void *calldata);
147 int rpc_call_sync(struct rpc_clnt *clnt, 148 int rpc_call_sync(struct rpc_clnt *clnt,
148 const struct rpc_message *msg, int flags); 149 const struct rpc_message *msg, int flags);
149 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, 150 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
150 int flags); 151 int flags);
151 int rpc_restart_call_prepare(struct rpc_task *); 152 int rpc_restart_call_prepare(struct rpc_task *);
152 int rpc_restart_call(struct rpc_task *); 153 int rpc_restart_call(struct rpc_task *);
153 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 154 void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
154 size_t rpc_max_payload(struct rpc_clnt *); 155 size_t rpc_max_payload(struct rpc_clnt *);
155 void rpc_force_rebind(struct rpc_clnt *); 156 void rpc_force_rebind(struct rpc_clnt *);
156 size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); 157 size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
157 const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 158 const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
158 159
159 size_t rpc_ntop(const struct sockaddr *, char *, const size_t); 160 size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
160 size_t rpc_pton(const char *, const size_t, 161 size_t rpc_pton(const char *, const size_t,
161 struct sockaddr *, const size_t); 162 struct sockaddr *, const size_t);
162 char * rpc_sockaddr2uaddr(const struct sockaddr *); 163 char * rpc_sockaddr2uaddr(const struct sockaddr *);
163 size_t rpc_uaddr2sockaddr(const char *, const size_t, 164 size_t rpc_uaddr2sockaddr(const char *, const size_t,
164 struct sockaddr *, const size_t); 165 struct sockaddr *, const size_t);
165 166
166 static inline unsigned short rpc_get_port(const struct sockaddr *sap) 167 static inline unsigned short rpc_get_port(const struct sockaddr *sap)
167 { 168 {
168 switch (sap->sa_family) { 169 switch (sap->sa_family) {
169 case AF_INET: 170 case AF_INET:
170 return ntohs(((struct sockaddr_in *)sap)->sin_port); 171 return ntohs(((struct sockaddr_in *)sap)->sin_port);
171 case AF_INET6: 172 case AF_INET6:
172 return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); 173 return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
173 } 174 }
174 return 0; 175 return 0;
175 } 176 }
176 177
177 static inline void rpc_set_port(struct sockaddr *sap, 178 static inline void rpc_set_port(struct sockaddr *sap,
178 const unsigned short port) 179 const unsigned short port)
179 { 180 {
180 switch (sap->sa_family) { 181 switch (sap->sa_family) {
181 case AF_INET: 182 case AF_INET:
182 ((struct sockaddr_in *)sap)->sin_port = htons(port); 183 ((struct sockaddr_in *)sap)->sin_port = htons(port);
183 break; 184 break;
184 case AF_INET6: 185 case AF_INET6:
185 ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); 186 ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
186 break; 187 break;
187 } 188 }
188 } 189 }
189 190
190 #define IPV6_SCOPE_DELIMITER '%' 191 #define IPV6_SCOPE_DELIMITER '%'
191 #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") 192 #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
192 193
193 static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, 194 static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
194 const struct sockaddr *sap2) 195 const struct sockaddr *sap2)
195 { 196 {
196 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; 197 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
197 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; 198 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
198 199
199 return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; 200 return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
200 } 201 }
201 202
202 static inline bool __rpc_copy_addr4(struct sockaddr *dst, 203 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
203 const struct sockaddr *src) 204 const struct sockaddr *src)
204 { 205 {
205 const struct sockaddr_in *ssin = (struct sockaddr_in *) src; 206 const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
206 struct sockaddr_in *dsin = (struct sockaddr_in *) dst; 207 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
207 208
208 dsin->sin_family = ssin->sin_family; 209 dsin->sin_family = ssin->sin_family;
209 dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; 210 dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
210 return true; 211 return true;
211 } 212 }
212 213
213 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 214 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
214 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 215 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
215 const struct sockaddr *sap2) 216 const struct sockaddr *sap2)
216 { 217 {
217 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; 218 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
218 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; 219 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
219 return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); 220 return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
220 } 221 }
221 222
222 static inline bool __rpc_copy_addr6(struct sockaddr *dst, 223 static inline bool __rpc_copy_addr6(struct sockaddr *dst,
223 const struct sockaddr *src) 224 const struct sockaddr *src)
224 { 225 {
225 const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; 226 const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
226 struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; 227 struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
227 228
228 dsin6->sin6_family = ssin6->sin6_family; 229 dsin6->sin6_family = ssin6->sin6_family;
229 ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr); 230 ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
230 return true; 231 return true;
231 } 232 }
232 #else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ 233 #else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
233 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 234 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
234 const struct sockaddr *sap2) 235 const struct sockaddr *sap2)
235 { 236 {
236 return false; 237 return false;
237 } 238 }
238 239
239 static inline bool __rpc_copy_addr6(struct sockaddr *dst, 240 static inline bool __rpc_copy_addr6(struct sockaddr *dst,
240 const struct sockaddr *src) 241 const struct sockaddr *src)
241 { 242 {
242 return false; 243 return false;
243 } 244 }
244 #endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ 245 #endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
245 246
246 /** 247 /**
247 * rpc_cmp_addr - compare the address portion of two sockaddrs. 248 * rpc_cmp_addr - compare the address portion of two sockaddrs.
248 * @sap1: first sockaddr 249 * @sap1: first sockaddr
249 * @sap2: second sockaddr 250 * @sap2: second sockaddr
250 * 251 *
251 * Just compares the family and address portion. Ignores port, scope, etc. 252 * Just compares the family and address portion. Ignores port, scope, etc.
252 * Returns true if the addrs are equal, false if they aren't. 253 * Returns true if the addrs are equal, false if they aren't.
253 */ 254 */
254 static inline bool rpc_cmp_addr(const struct sockaddr *sap1, 255 static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
255 const struct sockaddr *sap2) 256 const struct sockaddr *sap2)
256 { 257 {
257 if (sap1->sa_family == sap2->sa_family) { 258 if (sap1->sa_family == sap2->sa_family) {
258 switch (sap1->sa_family) { 259 switch (sap1->sa_family) {
259 case AF_INET: 260 case AF_INET:
260 return __rpc_cmp_addr4(sap1, sap2); 261 return __rpc_cmp_addr4(sap1, sap2);
261 case AF_INET6: 262 case AF_INET6:
262 return __rpc_cmp_addr6(sap1, sap2); 263 return __rpc_cmp_addr6(sap1, sap2);
263 } 264 }
264 } 265 }
265 return false; 266 return false;
266 } 267 }
267 268
268 /** 269 /**
269 * rpc_copy_addr - copy the address portion of one sockaddr to another 270 * rpc_copy_addr - copy the address portion of one sockaddr to another
270 * @dst: destination sockaddr 271 * @dst: destination sockaddr
271 * @src: source sockaddr 272 * @src: source sockaddr
272 * 273 *
273 * Just copies the address portion and family. Ignores port, scope, etc. 274 * Just copies the address portion and family. Ignores port, scope, etc.
274 * Caller is responsible for making certain that dst is large enough to hold 275 * Caller is responsible for making certain that dst is large enough to hold
275 * the address in src. Returns true if address family is supported. Returns 276 * the address in src. Returns true if address family is supported. Returns
276 * false otherwise. 277 * false otherwise.
277 */ 278 */
278 static inline bool rpc_copy_addr(struct sockaddr *dst, 279 static inline bool rpc_copy_addr(struct sockaddr *dst,
279 const struct sockaddr *src) 280 const struct sockaddr *src)
280 { 281 {
281 switch (src->sa_family) { 282 switch (src->sa_family) {
282 case AF_INET: 283 case AF_INET:
283 return __rpc_copy_addr4(dst, src); 284 return __rpc_copy_addr4(dst, src);
284 case AF_INET6: 285 case AF_INET6:
285 return __rpc_copy_addr6(dst, src); 286 return __rpc_copy_addr6(dst, src);
286 } 287 }
287 return false; 288 return false;
288 } 289 }
289 290
290 /** 291 /**
291 * rpc_get_scope_id - return scopeid for a given sockaddr 292 * rpc_get_scope_id - return scopeid for a given sockaddr
292 * @sa: sockaddr to get scopeid from 293 * @sa: sockaddr to get scopeid from
293 * 294 *
294 * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if 295 * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
295 * not an AF_INET6 address. 296 * not an AF_INET6 address.
296 */ 297 */
297 static inline u32 rpc_get_scope_id(const struct sockaddr *sa) 298 static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
298 { 299 {
299 if (sa->sa_family != AF_INET6) 300 if (sa->sa_family != AF_INET6)
300 return 0; 301 return 0;
301 302
302 return ((struct sockaddr_in6 *) sa)->sin6_scope_id; 303 return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
303 } 304 }
304 305
305 #endif /* __KERNEL__ */ 306 #endif /* __KERNEL__ */
306 #endif /* _LINUX_SUNRPC_CLNT_H */ 307 #endif /* _LINUX_SUNRPC_CLNT_H */
307 308
1 /* 1 /*
2 * linux/net/sunrpc/clnt.c 2 * linux/net/sunrpc/clnt.c
3 * 3 *
4 * This file contains the high-level RPC interface. 4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous 5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests. 6 * and asynchronous requests.
7 * 7 *
8 * - RPC header generation and argument serialization. 8 * - RPC header generation and argument serialization.
9 * - Credential refresh. 9 * - Credential refresh.
10 * - TCP connect handling. 10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because 11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale 12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit. 13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer. 14 * This may be have to be moved to the VFS layer.
15 * 15 *
16 * NB: BSD uses a more intelligent approach to guessing when a request 16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure. 17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value. 18 * We currently make do with a constant timeout value.
19 * 19 *
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 */ 22 */
23 23
24 #include <asm/system.h> 24 #include <asm/system.h>
25 25
26 #include <linux/module.h> 26 #include <linux/module.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/kallsyms.h> 28 #include <linux/kallsyms.h>
29 #include <linux/mm.h> 29 #include <linux/mm.h>
30 #include <linux/namei.h> 30 #include <linux/namei.h>
31 #include <linux/mount.h> 31 #include <linux/mount.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/utsname.h> 33 #include <linux/utsname.h>
34 #include <linux/workqueue.h> 34 #include <linux/workqueue.h>
35 #include <linux/in6.h> 35 #include <linux/in6.h>
36 36
37 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/clnt.h>
38 #include <linux/sunrpc/rpc_pipe_fs.h> 38 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #include <linux/sunrpc/metrics.h> 39 #include <linux/sunrpc/metrics.h>
40 #include <linux/sunrpc/bc_xprt.h> 40 #include <linux/sunrpc/bc_xprt.h>
41 41
42 #include "sunrpc.h" 42 #include "sunrpc.h"
43 43
44 #ifdef RPC_DEBUG 44 #ifdef RPC_DEBUG
45 # define RPCDBG_FACILITY RPCDBG_CALL 45 # define RPCDBG_FACILITY RPCDBG_CALL
46 #endif 46 #endif
47 47
48 #define dprint_status(t) \ 48 #define dprint_status(t) \
49 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 49 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
50 __func__, t->tk_status) 50 __func__, t->tk_status)
51 51
52 /* 52 /*
53 * All RPC clients are linked into this list 53 * All RPC clients are linked into this list
54 */ 54 */
55 static LIST_HEAD(all_clients); 55 static LIST_HEAD(all_clients);
56 static DEFINE_SPINLOCK(rpc_client_lock); 56 static DEFINE_SPINLOCK(rpc_client_lock);
57 57
58 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 58 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
59 59
60 60
61 static void call_start(struct rpc_task *task); 61 static void call_start(struct rpc_task *task);
62 static void call_reserve(struct rpc_task *task); 62 static void call_reserve(struct rpc_task *task);
63 static void call_reserveresult(struct rpc_task *task); 63 static void call_reserveresult(struct rpc_task *task);
64 static void call_allocate(struct rpc_task *task); 64 static void call_allocate(struct rpc_task *task);
65 static void call_decode(struct rpc_task *task); 65 static void call_decode(struct rpc_task *task);
66 static void call_bind(struct rpc_task *task); 66 static void call_bind(struct rpc_task *task);
67 static void call_bind_status(struct rpc_task *task); 67 static void call_bind_status(struct rpc_task *task);
68 static void call_transmit(struct rpc_task *task); 68 static void call_transmit(struct rpc_task *task);
69 #if defined(CONFIG_NFS_V4_1) 69 #if defined(CONFIG_NFS_V4_1)
70 static void call_bc_transmit(struct rpc_task *task); 70 static void call_bc_transmit(struct rpc_task *task);
71 #endif /* CONFIG_NFS_V4_1 */ 71 #endif /* CONFIG_NFS_V4_1 */
72 static void call_status(struct rpc_task *task); 72 static void call_status(struct rpc_task *task);
73 static void call_transmit_status(struct rpc_task *task); 73 static void call_transmit_status(struct rpc_task *task);
74 static void call_refresh(struct rpc_task *task); 74 static void call_refresh(struct rpc_task *task);
75 static void call_refreshresult(struct rpc_task *task); 75 static void call_refreshresult(struct rpc_task *task);
76 static void call_timeout(struct rpc_task *task); 76 static void call_timeout(struct rpc_task *task);
77 static void call_connect(struct rpc_task *task); 77 static void call_connect(struct rpc_task *task);
78 static void call_connect_status(struct rpc_task *task); 78 static void call_connect_status(struct rpc_task *task);
79 79
80 static __be32 *rpc_encode_header(struct rpc_task *task); 80 static __be32 *rpc_encode_header(struct rpc_task *task);
81 static __be32 *rpc_verify_header(struct rpc_task *task); 81 static __be32 *rpc_verify_header(struct rpc_task *task);
82 static int rpc_ping(struct rpc_clnt *clnt); 82 static int rpc_ping(struct rpc_clnt *clnt);
83 83
84 static void rpc_register_client(struct rpc_clnt *clnt) 84 static void rpc_register_client(struct rpc_clnt *clnt)
85 { 85 {
86 spin_lock(&rpc_client_lock); 86 spin_lock(&rpc_client_lock);
87 list_add(&clnt->cl_clients, &all_clients); 87 list_add(&clnt->cl_clients, &all_clients);
88 spin_unlock(&rpc_client_lock); 88 spin_unlock(&rpc_client_lock);
89 } 89 }
90 90
91 static void rpc_unregister_client(struct rpc_clnt *clnt) 91 static void rpc_unregister_client(struct rpc_clnt *clnt)
92 { 92 {
93 spin_lock(&rpc_client_lock); 93 spin_lock(&rpc_client_lock);
94 list_del(&clnt->cl_clients); 94 list_del(&clnt->cl_clients);
95 spin_unlock(&rpc_client_lock); 95 spin_unlock(&rpc_client_lock);
96 } 96 }
97 97
98 static int 98 static int
99 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 99 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
100 { 100 {
101 static uint32_t clntid; 101 static uint32_t clntid;
102 struct nameidata nd; 102 struct nameidata nd;
103 struct path path; 103 struct path path;
104 char name[15]; 104 char name[15];
105 struct qstr q = { 105 struct qstr q = {
106 .name = name, 106 .name = name,
107 }; 107 };
108 int error; 108 int error;
109 109
110 clnt->cl_path.mnt = ERR_PTR(-ENOENT); 110 clnt->cl_path.mnt = ERR_PTR(-ENOENT);
111 clnt->cl_path.dentry = ERR_PTR(-ENOENT); 111 clnt->cl_path.dentry = ERR_PTR(-ENOENT);
112 if (dir_name == NULL) 112 if (dir_name == NULL)
113 return 0; 113 return 0;
114 114
115 path.mnt = rpc_get_mount(); 115 path.mnt = rpc_get_mount();
116 if (IS_ERR(path.mnt)) 116 if (IS_ERR(path.mnt))
117 return PTR_ERR(path.mnt); 117 return PTR_ERR(path.mnt);
118 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); 118 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
119 if (error) 119 if (error)
120 goto err; 120 goto err;
121 121
122 for (;;) { 122 for (;;) {
123 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 123 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
124 name[sizeof(name) - 1] = '\0'; 124 name[sizeof(name) - 1] = '\0';
125 q.hash = full_name_hash(q.name, q.len); 125 q.hash = full_name_hash(q.name, q.len);
126 path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); 126 path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
127 if (!IS_ERR(path.dentry)) 127 if (!IS_ERR(path.dentry))
128 break; 128 break;
129 error = PTR_ERR(path.dentry); 129 error = PTR_ERR(path.dentry);
130 if (error != -EEXIST) { 130 if (error != -EEXIST) {
131 printk(KERN_INFO "RPC: Couldn't create pipefs entry" 131 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
132 " %s/%s, error %d\n", 132 " %s/%s, error %d\n",
133 dir_name, name, error); 133 dir_name, name, error);
134 goto err_path_put; 134 goto err_path_put;
135 } 135 }
136 } 136 }
137 path_put(&nd.path); 137 path_put(&nd.path);
138 clnt->cl_path = path; 138 clnt->cl_path = path;
139 return 0; 139 return 0;
140 err_path_put: 140 err_path_put:
141 path_put(&nd.path); 141 path_put(&nd.path);
142 err: 142 err:
143 rpc_put_mount(); 143 rpc_put_mount();
144 return error; 144 return error;
145 } 145 }
146 146
147 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 147 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
148 { 148 {
149 struct rpc_program *program = args->program; 149 struct rpc_program *program = args->program;
150 struct rpc_version *version; 150 struct rpc_version *version;
151 struct rpc_clnt *clnt = NULL; 151 struct rpc_clnt *clnt = NULL;
152 struct rpc_auth *auth; 152 struct rpc_auth *auth;
153 int err; 153 int err;
154 size_t len; 154 size_t len;
155 155
156 /* sanity check the name before trying to print it */ 156 /* sanity check the name before trying to print it */
157 err = -EINVAL; 157 err = -EINVAL;
158 len = strlen(args->servername); 158 len = strlen(args->servername);
159 if (len > RPC_MAXNETNAMELEN) 159 if (len > RPC_MAXNETNAMELEN)
160 goto out_no_rpciod; 160 goto out_no_rpciod;
161 len++; 161 len++;
162 162
163 dprintk("RPC: creating %s client for %s (xprt %p)\n", 163 dprintk("RPC: creating %s client for %s (xprt %p)\n",
164 program->name, args->servername, xprt); 164 program->name, args->servername, xprt);
165 165
166 err = rpciod_up(); 166 err = rpciod_up();
167 if (err) 167 if (err)
168 goto out_no_rpciod; 168 goto out_no_rpciod;
169 err = -EINVAL; 169 err = -EINVAL;
170 if (!xprt) 170 if (!xprt)
171 goto out_no_xprt; 171 goto out_no_xprt;
172 172
173 if (args->version >= program->nrvers) 173 if (args->version >= program->nrvers)
174 goto out_err; 174 goto out_err;
175 version = program->version[args->version]; 175 version = program->version[args->version];
176 if (version == NULL) 176 if (version == NULL)
177 goto out_err; 177 goto out_err;
178 178
179 err = -ENOMEM; 179 err = -ENOMEM;
180 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 180 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
181 if (!clnt) 181 if (!clnt)
182 goto out_err; 182 goto out_err;
183 clnt->cl_parent = clnt; 183 clnt->cl_parent = clnt;
184 184
185 clnt->cl_server = clnt->cl_inline_name; 185 clnt->cl_server = clnt->cl_inline_name;
186 if (len > sizeof(clnt->cl_inline_name)) { 186 if (len > sizeof(clnt->cl_inline_name)) {
187 char *buf = kmalloc(len, GFP_KERNEL); 187 char *buf = kmalloc(len, GFP_KERNEL);
188 if (buf != NULL) 188 if (buf != NULL)
189 clnt->cl_server = buf; 189 clnt->cl_server = buf;
190 else 190 else
191 len = sizeof(clnt->cl_inline_name); 191 len = sizeof(clnt->cl_inline_name);
192 } 192 }
193 strlcpy(clnt->cl_server, args->servername, len); 193 strlcpy(clnt->cl_server, args->servername, len);
194 194
195 clnt->cl_xprt = xprt; 195 clnt->cl_xprt = xprt;
196 clnt->cl_procinfo = version->procs; 196 clnt->cl_procinfo = version->procs;
197 clnt->cl_maxproc = version->nrprocs; 197 clnt->cl_maxproc = version->nrprocs;
198 clnt->cl_protname = program->name; 198 clnt->cl_protname = program->name;
199 clnt->cl_prog = args->prognumber ? : program->number; 199 clnt->cl_prog = args->prognumber ? : program->number;
200 clnt->cl_vers = version->number; 200 clnt->cl_vers = version->number;
201 clnt->cl_stats = program->stats; 201 clnt->cl_stats = program->stats;
202 clnt->cl_metrics = rpc_alloc_iostats(clnt); 202 clnt->cl_metrics = rpc_alloc_iostats(clnt);
203 err = -ENOMEM; 203 err = -ENOMEM;
204 if (clnt->cl_metrics == NULL) 204 if (clnt->cl_metrics == NULL)
205 goto out_no_stats; 205 goto out_no_stats;
206 clnt->cl_program = program; 206 clnt->cl_program = program;
207 INIT_LIST_HEAD(&clnt->cl_tasks); 207 INIT_LIST_HEAD(&clnt->cl_tasks);
208 spin_lock_init(&clnt->cl_lock); 208 spin_lock_init(&clnt->cl_lock);
209 209
210 if (!xprt_bound(clnt->cl_xprt)) 210 if (!xprt_bound(clnt->cl_xprt))
211 clnt->cl_autobind = 1; 211 clnt->cl_autobind = 1;
212 212
213 clnt->cl_timeout = xprt->timeout; 213 clnt->cl_timeout = xprt->timeout;
214 if (args->timeout != NULL) { 214 if (args->timeout != NULL) {
215 memcpy(&clnt->cl_timeout_default, args->timeout, 215 memcpy(&clnt->cl_timeout_default, args->timeout,
216 sizeof(clnt->cl_timeout_default)); 216 sizeof(clnt->cl_timeout_default));
217 clnt->cl_timeout = &clnt->cl_timeout_default; 217 clnt->cl_timeout = &clnt->cl_timeout_default;
218 } 218 }
219 219
220 clnt->cl_rtt = &clnt->cl_rtt_default; 220 clnt->cl_rtt = &clnt->cl_rtt_default;
221 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); 221 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
222 clnt->cl_principal = NULL; 222 clnt->cl_principal = NULL;
223 if (args->client_name) { 223 if (args->client_name) {
224 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); 224 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
225 if (!clnt->cl_principal) 225 if (!clnt->cl_principal)
226 goto out_no_principal; 226 goto out_no_principal;
227 } 227 }
228 228
229 kref_init(&clnt->cl_kref); 229 kref_init(&clnt->cl_kref);
230 230
231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
232 if (err < 0) 232 if (err < 0)
233 goto out_no_path; 233 goto out_no_path;
234 234
235 auth = rpcauth_create(args->authflavor, clnt); 235 auth = rpcauth_create(args->authflavor, clnt);
236 if (IS_ERR(auth)) { 236 if (IS_ERR(auth)) {
237 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 237 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
238 args->authflavor); 238 args->authflavor);
239 err = PTR_ERR(auth); 239 err = PTR_ERR(auth);
240 goto out_no_auth; 240 goto out_no_auth;
241 } 241 }
242 242
243 /* save the nodename */ 243 /* save the nodename */
244 clnt->cl_nodelen = strlen(init_utsname()->nodename); 244 clnt->cl_nodelen = strlen(init_utsname()->nodename);
245 if (clnt->cl_nodelen > UNX_MAXNODENAME) 245 if (clnt->cl_nodelen > UNX_MAXNODENAME)
246 clnt->cl_nodelen = UNX_MAXNODENAME; 246 clnt->cl_nodelen = UNX_MAXNODENAME;
247 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); 247 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
248 rpc_register_client(clnt); 248 rpc_register_client(clnt);
249 return clnt; 249 return clnt;
250 250
251 out_no_auth: 251 out_no_auth:
252 if (!IS_ERR(clnt->cl_path.dentry)) { 252 if (!IS_ERR(clnt->cl_path.dentry)) {
253 rpc_remove_client_dir(clnt->cl_path.dentry); 253 rpc_remove_client_dir(clnt->cl_path.dentry);
254 rpc_put_mount(); 254 rpc_put_mount();
255 } 255 }
256 out_no_path: 256 out_no_path:
257 kfree(clnt->cl_principal); 257 kfree(clnt->cl_principal);
258 out_no_principal: 258 out_no_principal:
259 rpc_free_iostats(clnt->cl_metrics); 259 rpc_free_iostats(clnt->cl_metrics);
260 out_no_stats: 260 out_no_stats:
261 if (clnt->cl_server != clnt->cl_inline_name) 261 if (clnt->cl_server != clnt->cl_inline_name)
262 kfree(clnt->cl_server); 262 kfree(clnt->cl_server);
263 kfree(clnt); 263 kfree(clnt);
264 out_err: 264 out_err:
265 xprt_put(xprt); 265 xprt_put(xprt);
266 out_no_xprt: 266 out_no_xprt:
267 rpciod_down(); 267 rpciod_down();
268 out_no_rpciod: 268 out_no_rpciod:
269 return ERR_PTR(err); 269 return ERR_PTR(err);
270 } 270 }
271 271
272 /* 272 /*
273 * rpc_create - create an RPC client and transport with one call 273 * rpc_create - create an RPC client and transport with one call
274 * @args: rpc_clnt create argument structure 274 * @args: rpc_clnt create argument structure
275 * 275 *
276 * Creates and initializes an RPC transport and an RPC client. 276 * Creates and initializes an RPC transport and an RPC client.
277 * 277 *
278 * It can ping the server in order to determine if it is up, and to see if 278 * It can ping the server in order to determine if it is up, and to see if
279 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 279 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
280 * this behavior so asynchronous tasks can also use rpc_create. 280 * this behavior so asynchronous tasks can also use rpc_create.
281 */ 281 */
282 struct rpc_clnt *rpc_create(struct rpc_create_args *args) 282 struct rpc_clnt *rpc_create(struct rpc_create_args *args)
283 { 283 {
284 struct rpc_xprt *xprt; 284 struct rpc_xprt *xprt;
285 struct rpc_clnt *clnt; 285 struct rpc_clnt *clnt;
286 struct xprt_create xprtargs = { 286 struct xprt_create xprtargs = {
287 .ident = args->protocol, 287 .ident = args->protocol,
288 .srcaddr = args->saddress, 288 .srcaddr = args->saddress,
289 .dstaddr = args->address, 289 .dstaddr = args->address,
290 .addrlen = args->addrsize, 290 .addrlen = args->addrsize,
291 .bc_xprt = args->bc_xprt, 291 .bc_xprt = args->bc_xprt,
292 }; 292 };
293 char servername[48]; 293 char servername[48];
294 294
295 /* 295 /*
296 * If the caller chooses not to specify a hostname, whip 296 * If the caller chooses not to specify a hostname, whip
297 * up a string representation of the passed-in address. 297 * up a string representation of the passed-in address.
298 */ 298 */
299 if (args->servername == NULL) { 299 if (args->servername == NULL) {
300 servername[0] = '\0'; 300 servername[0] = '\0';
301 switch (args->address->sa_family) { 301 switch (args->address->sa_family) {
302 case AF_INET: { 302 case AF_INET: {
303 struct sockaddr_in *sin = 303 struct sockaddr_in *sin =
304 (struct sockaddr_in *)args->address; 304 (struct sockaddr_in *)args->address;
305 snprintf(servername, sizeof(servername), "%pI4", 305 snprintf(servername, sizeof(servername), "%pI4",
306 &sin->sin_addr.s_addr); 306 &sin->sin_addr.s_addr);
307 break; 307 break;
308 } 308 }
309 case AF_INET6: { 309 case AF_INET6: {
310 struct sockaddr_in6 *sin = 310 struct sockaddr_in6 *sin =
311 (struct sockaddr_in6 *)args->address; 311 (struct sockaddr_in6 *)args->address;
312 snprintf(servername, sizeof(servername), "%pI6", 312 snprintf(servername, sizeof(servername), "%pI6",
313 &sin->sin6_addr); 313 &sin->sin6_addr);
314 break; 314 break;
315 } 315 }
316 default: 316 default:
317 /* caller wants default server name, but 317 /* caller wants default server name, but
318 * address family isn't recognized. */ 318 * address family isn't recognized. */
319 return ERR_PTR(-EINVAL); 319 return ERR_PTR(-EINVAL);
320 } 320 }
321 args->servername = servername; 321 args->servername = servername;
322 } 322 }
323 323
324 xprt = xprt_create_transport(&xprtargs); 324 xprt = xprt_create_transport(&xprtargs);
325 if (IS_ERR(xprt)) 325 if (IS_ERR(xprt))
326 return (struct rpc_clnt *)xprt; 326 return (struct rpc_clnt *)xprt;
327 327
328 /* 328 /*
329 * By default, kernel RPC client connects from a reserved port. 329 * By default, kernel RPC client connects from a reserved port.
330 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 330 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
331 * but it is always enabled for rpciod, which handles the connect 331 * but it is always enabled for rpciod, which handles the connect
332 * operation. 332 * operation.
333 */ 333 */
334 xprt->resvport = 1; 334 xprt->resvport = 1;
335 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 335 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
336 xprt->resvport = 0; 336 xprt->resvport = 0;
337 337
338 clnt = rpc_new_client(args, xprt); 338 clnt = rpc_new_client(args, xprt);
339 if (IS_ERR(clnt)) 339 if (IS_ERR(clnt))
340 return clnt; 340 return clnt;
341 341
342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 342 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
343 int err = rpc_ping(clnt); 343 int err = rpc_ping(clnt);
344 if (err != 0) { 344 if (err != 0) {
345 rpc_shutdown_client(clnt); 345 rpc_shutdown_client(clnt);
346 return ERR_PTR(err); 346 return ERR_PTR(err);
347 } 347 }
348 } 348 }
349 349
350 clnt->cl_softrtry = 1; 350 clnt->cl_softrtry = 1;
351 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 351 if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
352 clnt->cl_softrtry = 0; 352 clnt->cl_softrtry = 0;
353 353
354 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 354 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
355 clnt->cl_autobind = 1; 355 clnt->cl_autobind = 1;
356 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 356 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
357 clnt->cl_discrtry = 1; 357 clnt->cl_discrtry = 1;
358 if (!(args->flags & RPC_CLNT_CREATE_QUIET)) 358 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
359 clnt->cl_chatty = 1; 359 clnt->cl_chatty = 1;
360 360
361 return clnt; 361 return clnt;
362 } 362 }
363 EXPORT_SYMBOL_GPL(rpc_create); 363 EXPORT_SYMBOL_GPL(rpc_create);
364 364
365 /* 365 /*
366 * This function clones the RPC client structure. It allows us to share the 366 * This function clones the RPC client structure. It allows us to share the
367 * same transport while varying parameters such as the authentication 367 * same transport while varying parameters such as the authentication
368 * flavour. 368 * flavour.
369 */ 369 */
370 struct rpc_clnt * 370 struct rpc_clnt *
371 rpc_clone_client(struct rpc_clnt *clnt) 371 rpc_clone_client(struct rpc_clnt *clnt)
372 { 372 {
373 struct rpc_clnt *new; 373 struct rpc_clnt *new;
374 int err = -ENOMEM; 374 int err = -ENOMEM;
375 375
376 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 376 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
377 if (!new) 377 if (!new)
378 goto out_no_clnt; 378 goto out_no_clnt;
379 new->cl_parent = clnt; 379 new->cl_parent = clnt;
380 /* Turn off autobind on clones */ 380 /* Turn off autobind on clones */
381 new->cl_autobind = 0; 381 new->cl_autobind = 0;
382 INIT_LIST_HEAD(&new->cl_tasks); 382 INIT_LIST_HEAD(&new->cl_tasks);
383 spin_lock_init(&new->cl_lock); 383 spin_lock_init(&new->cl_lock);
384 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); 384 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
385 new->cl_metrics = rpc_alloc_iostats(clnt); 385 new->cl_metrics = rpc_alloc_iostats(clnt);
386 if (new->cl_metrics == NULL) 386 if (new->cl_metrics == NULL)
387 goto out_no_stats; 387 goto out_no_stats;
388 if (clnt->cl_principal) { 388 if (clnt->cl_principal) {
389 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); 389 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
390 if (new->cl_principal == NULL) 390 if (new->cl_principal == NULL)
391 goto out_no_principal; 391 goto out_no_principal;
392 } 392 }
393 kref_init(&new->cl_kref); 393 kref_init(&new->cl_kref);
394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
395 if (err != 0) 395 if (err != 0)
396 goto out_no_path; 396 goto out_no_path;
397 if (new->cl_auth) 397 if (new->cl_auth)
398 atomic_inc(&new->cl_auth->au_count); 398 atomic_inc(&new->cl_auth->au_count);
399 xprt_get(clnt->cl_xprt); 399 xprt_get(clnt->cl_xprt);
400 kref_get(&clnt->cl_kref); 400 kref_get(&clnt->cl_kref);
401 rpc_register_client(new); 401 rpc_register_client(new);
402 rpciod_up(); 402 rpciod_up();
403 return new; 403 return new;
404 out_no_path: 404 out_no_path:
405 kfree(new->cl_principal); 405 kfree(new->cl_principal);
406 out_no_principal: 406 out_no_principal:
407 rpc_free_iostats(new->cl_metrics); 407 rpc_free_iostats(new->cl_metrics);
408 out_no_stats: 408 out_no_stats:
409 kfree(new); 409 kfree(new);
410 out_no_clnt: 410 out_no_clnt:
411 dprintk("RPC: %s: returned error %d\n", __func__, err); 411 dprintk("RPC: %s: returned error %d\n", __func__, err);
412 return ERR_PTR(err); 412 return ERR_PTR(err);
413 } 413 }
414 EXPORT_SYMBOL_GPL(rpc_clone_client); 414 EXPORT_SYMBOL_GPL(rpc_clone_client);
415 415
416 /* 416 /*
417 * Kill all tasks for the given client.
418 * XXX: kill their descendants as well?
419 */
420 void rpc_killall_tasks(struct rpc_clnt *clnt)
421 {
422 struct rpc_task *rovr;
423
424
425 if (list_empty(&clnt->cl_tasks))
426 return;
427 dprintk("RPC: killing all tasks for client %p\n", clnt);
428 /*
429 * Spin lock all_tasks to prevent changes...
430 */
431 spin_lock(&clnt->cl_lock);
432 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
433 if (!RPC_IS_ACTIVATED(rovr))
434 continue;
435 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
436 rovr->tk_flags |= RPC_TASK_KILLED;
437 rpc_exit(rovr, -EIO);
438 rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr);
439 }
440 }
441 spin_unlock(&clnt->cl_lock);
442 }
443 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
444
445 /*
417 * Properly shut down an RPC client, terminating all outstanding 446 * Properly shut down an RPC client, terminating all outstanding
418 * requests. 447 * requests.
419 */ 448 */
420 void rpc_shutdown_client(struct rpc_clnt *clnt) 449 void rpc_shutdown_client(struct rpc_clnt *clnt)
421 { 450 {
422 dprintk("RPC: shutting down %s client for %s\n", 451 dprintk("RPC: shutting down %s client for %s\n",
423 clnt->cl_protname, clnt->cl_server); 452 clnt->cl_protname, clnt->cl_server);
424 453
425 while (!list_empty(&clnt->cl_tasks)) { 454 while (!list_empty(&clnt->cl_tasks)) {
426 rpc_killall_tasks(clnt); 455 rpc_killall_tasks(clnt);
427 wait_event_timeout(destroy_wait, 456 wait_event_timeout(destroy_wait,
428 list_empty(&clnt->cl_tasks), 1*HZ); 457 list_empty(&clnt->cl_tasks), 1*HZ);
429 } 458 }
430 459
431 rpc_release_client(clnt); 460 rpc_release_client(clnt);
432 } 461 }
433 EXPORT_SYMBOL_GPL(rpc_shutdown_client); 462 EXPORT_SYMBOL_GPL(rpc_shutdown_client);
434 463
435 /* 464 /*
436 * Free an RPC client 465 * Free an RPC client
437 */ 466 */
438 static void 467 static void
439 rpc_free_client(struct kref *kref) 468 rpc_free_client(struct kref *kref)
440 { 469 {
441 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 470 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
442 471
443 dprintk("RPC: destroying %s client for %s\n", 472 dprintk("RPC: destroying %s client for %s\n",
444 clnt->cl_protname, clnt->cl_server); 473 clnt->cl_protname, clnt->cl_server);
445 if (!IS_ERR(clnt->cl_path.dentry)) { 474 if (!IS_ERR(clnt->cl_path.dentry)) {
446 rpc_remove_client_dir(clnt->cl_path.dentry); 475 rpc_remove_client_dir(clnt->cl_path.dentry);
447 rpc_put_mount(); 476 rpc_put_mount();
448 } 477 }
449 if (clnt->cl_parent != clnt) { 478 if (clnt->cl_parent != clnt) {
450 rpc_release_client(clnt->cl_parent); 479 rpc_release_client(clnt->cl_parent);
451 goto out_free; 480 goto out_free;
452 } 481 }
453 if (clnt->cl_server != clnt->cl_inline_name) 482 if (clnt->cl_server != clnt->cl_inline_name)
454 kfree(clnt->cl_server); 483 kfree(clnt->cl_server);
455 out_free: 484 out_free:
456 rpc_unregister_client(clnt); 485 rpc_unregister_client(clnt);
457 rpc_free_iostats(clnt->cl_metrics); 486 rpc_free_iostats(clnt->cl_metrics);
458 kfree(clnt->cl_principal); 487 kfree(clnt->cl_principal);
459 clnt->cl_metrics = NULL; 488 clnt->cl_metrics = NULL;
460 xprt_put(clnt->cl_xprt); 489 xprt_put(clnt->cl_xprt);
461 rpciod_down(); 490 rpciod_down();
462 kfree(clnt); 491 kfree(clnt);
463 } 492 }
464 493
465 /* 494 /*
466 * Free an RPC client 495 * Free an RPC client
467 */ 496 */
468 static void 497 static void
469 rpc_free_auth(struct kref *kref) 498 rpc_free_auth(struct kref *kref)
470 { 499 {
471 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref); 500 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
472 501
473 if (clnt->cl_auth == NULL) { 502 if (clnt->cl_auth == NULL) {
474 rpc_free_client(kref); 503 rpc_free_client(kref);
475 return; 504 return;
476 } 505 }
477 506
478 /* 507 /*
479 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to 508 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
480 * release remaining GSS contexts. This mechanism ensures 509 * release remaining GSS contexts. This mechanism ensures
481 * that it can do so safely. 510 * that it can do so safely.
482 */ 511 */
483 kref_init(kref); 512 kref_init(kref);
484 rpcauth_release(clnt->cl_auth); 513 rpcauth_release(clnt->cl_auth);
485 clnt->cl_auth = NULL; 514 clnt->cl_auth = NULL;
486 kref_put(kref, rpc_free_client); 515 kref_put(kref, rpc_free_client);
487 } 516 }
488 517
489 /* 518 /*
490 * Release reference to the RPC client 519 * Release reference to the RPC client
491 */ 520 */
492 void 521 void
493 rpc_release_client(struct rpc_clnt *clnt) 522 rpc_release_client(struct rpc_clnt *clnt)
494 { 523 {
495 dprintk("RPC: rpc_release_client(%p)\n", clnt); 524 dprintk("RPC: rpc_release_client(%p)\n", clnt);
496 525
497 if (list_empty(&clnt->cl_tasks)) 526 if (list_empty(&clnt->cl_tasks))
498 wake_up(&destroy_wait); 527 wake_up(&destroy_wait);
499 kref_put(&clnt->cl_kref, rpc_free_auth); 528 kref_put(&clnt->cl_kref, rpc_free_auth);
500 } 529 }
501 530
502 /** 531 /**
503 * rpc_bind_new_program - bind a new RPC program to an existing client 532 * rpc_bind_new_program - bind a new RPC program to an existing client
504 * @old: old rpc_client 533 * @old: old rpc_client
505 * @program: rpc program to set 534 * @program: rpc program to set
506 * @vers: rpc program version 535 * @vers: rpc program version
507 * 536 *
508 * Clones the rpc client and sets up a new RPC program. This is mainly 537 * Clones the rpc client and sets up a new RPC program. This is mainly
509 * of use for enabling different RPC programs to share the same transport. 538 * of use for enabling different RPC programs to share the same transport.
510 * The Sun NFSv2/v3 ACL protocol can do this. 539 * The Sun NFSv2/v3 ACL protocol can do this.
511 */ 540 */
512 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 541 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
513 struct rpc_program *program, 542 struct rpc_program *program,
514 u32 vers) 543 u32 vers)
515 { 544 {
516 struct rpc_clnt *clnt; 545 struct rpc_clnt *clnt;
517 struct rpc_version *version; 546 struct rpc_version *version;
518 int err; 547 int err;
519 548
520 BUG_ON(vers >= program->nrvers || !program->version[vers]); 549 BUG_ON(vers >= program->nrvers || !program->version[vers]);
521 version = program->version[vers]; 550 version = program->version[vers];
522 clnt = rpc_clone_client(old); 551 clnt = rpc_clone_client(old);
523 if (IS_ERR(clnt)) 552 if (IS_ERR(clnt))
524 goto out; 553 goto out;
525 clnt->cl_procinfo = version->procs; 554 clnt->cl_procinfo = version->procs;
526 clnt->cl_maxproc = version->nrprocs; 555 clnt->cl_maxproc = version->nrprocs;
527 clnt->cl_protname = program->name; 556 clnt->cl_protname = program->name;
528 clnt->cl_prog = program->number; 557 clnt->cl_prog = program->number;
529 clnt->cl_vers = version->number; 558 clnt->cl_vers = version->number;
530 clnt->cl_stats = program->stats; 559 clnt->cl_stats = program->stats;
531 err = rpc_ping(clnt); 560 err = rpc_ping(clnt);
532 if (err != 0) { 561 if (err != 0) {
533 rpc_shutdown_client(clnt); 562 rpc_shutdown_client(clnt);
534 clnt = ERR_PTR(err); 563 clnt = ERR_PTR(err);
535 } 564 }
536 out: 565 out:
537 return clnt; 566 return clnt;
538 } 567 }
539 EXPORT_SYMBOL_GPL(rpc_bind_new_program); 568 EXPORT_SYMBOL_GPL(rpc_bind_new_program);
540 569
570 void rpc_task_release_client(struct rpc_task *task)
571 {
572 struct rpc_clnt *clnt = task->tk_client;
573
574 if (clnt != NULL) {
575 /* Remove from client task list */
576 spin_lock(&clnt->cl_lock);
577 list_del(&task->tk_task);
578 spin_unlock(&clnt->cl_lock);
579 task->tk_client = NULL;
580
581 rpc_release_client(clnt);
582 }
583 }
584
585 static
586 void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
587 {
588 if (clnt != NULL) {
589 rpc_task_release_client(task);
590 task->tk_client = clnt;
591 kref_get(&clnt->cl_kref);
592 if (clnt->cl_softrtry)
593 task->tk_flags |= RPC_TASK_SOFT;
594 /* Add to the client's list of all tasks */
595 spin_lock(&clnt->cl_lock);
596 list_add_tail(&task->tk_task, &clnt->cl_tasks);
597 spin_unlock(&clnt->cl_lock);
598 }
599 }
600
601 static void
602 rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
603 {
604 if (msg != NULL) {
605 task->tk_msg.rpc_proc = msg->rpc_proc;
606 task->tk_msg.rpc_argp = msg->rpc_argp;
607 task->tk_msg.rpc_resp = msg->rpc_resp;
608 /* Bind the user cred */
609 rpcauth_bindcred(task, msg->rpc_cred, task->tk_flags);
610 }
611 }
612
541 /* 613 /*
542 * Default callback for async RPC calls 614 * Default callback for async RPC calls
543 */ 615 */
544 static void 616 static void
545 rpc_default_callback(struct rpc_task *task, void *data) 617 rpc_default_callback(struct rpc_task *task, void *data)
546 { 618 {
547 } 619 }
548 620
549 static const struct rpc_call_ops rpc_default_ops = { 621 static const struct rpc_call_ops rpc_default_ops = {
550 .rpc_call_done = rpc_default_callback, 622 .rpc_call_done = rpc_default_callback,
551 }; 623 };
552 624
553 /** 625 /**
554 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it 626 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
555 * @task_setup_data: pointer to task initialisation data 627 * @task_setup_data: pointer to task initialisation data
556 */ 628 */
557 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) 629 struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
558 { 630 {
559 struct rpc_task *task; 631 struct rpc_task *task;
560 632
561 task = rpc_new_task(task_setup_data); 633 task = rpc_new_task(task_setup_data);
562 if (IS_ERR(task)) 634 if (IS_ERR(task))
563 goto out; 635 goto out;
636
637 rpc_task_set_client(task, task_setup_data->rpc_client);
638 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
639
640 if (task->tk_status != 0) {
641 int ret = task->tk_status;
642 rpc_put_task(task);
643 return ERR_PTR(ret);
644 }
645
646 if (task->tk_action == NULL)
647 rpc_call_start(task);
564 648
565 atomic_inc(&task->tk_count); 649 atomic_inc(&task->tk_count);
566 rpc_execute(task); 650 rpc_execute(task);
567 out: 651 out:
568 return task; 652 return task;
569 } 653 }
570 EXPORT_SYMBOL_GPL(rpc_run_task); 654 EXPORT_SYMBOL_GPL(rpc_run_task);
571 655
572 /** 656 /**
573 * rpc_call_sync - Perform a synchronous RPC call 657 * rpc_call_sync - Perform a synchronous RPC call
574 * @clnt: pointer to RPC client 658 * @clnt: pointer to RPC client
575 * @msg: RPC call parameters 659 * @msg: RPC call parameters
576 * @flags: RPC call flags 660 * @flags: RPC call flags
577 */ 661 */
578 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) 662 int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
579 { 663 {
580 struct rpc_task *task; 664 struct rpc_task *task;
581 struct rpc_task_setup task_setup_data = { 665 struct rpc_task_setup task_setup_data = {
582 .rpc_client = clnt, 666 .rpc_client = clnt,
583 .rpc_message = msg, 667 .rpc_message = msg,
584 .callback_ops = &rpc_default_ops, 668 .callback_ops = &rpc_default_ops,
585 .flags = flags, 669 .flags = flags,
586 }; 670 };
587 int status; 671 int status;
588 672
589 BUG_ON(flags & RPC_TASK_ASYNC); 673 BUG_ON(flags & RPC_TASK_ASYNC);
590 674
591 task = rpc_run_task(&task_setup_data); 675 task = rpc_run_task(&task_setup_data);
592 if (IS_ERR(task)) 676 if (IS_ERR(task))
593 return PTR_ERR(task); 677 return PTR_ERR(task);
594 status = task->tk_status; 678 status = task->tk_status;
595 rpc_put_task(task); 679 rpc_put_task(task);
596 return status; 680 return status;
597 } 681 }
598 EXPORT_SYMBOL_GPL(rpc_call_sync); 682 EXPORT_SYMBOL_GPL(rpc_call_sync);
599 683
600 /** 684 /**
601 * rpc_call_async - Perform an asynchronous RPC call 685 * rpc_call_async - Perform an asynchronous RPC call
602 * @clnt: pointer to RPC client 686 * @clnt: pointer to RPC client
603 * @msg: RPC call parameters 687 * @msg: RPC call parameters
604 * @flags: RPC call flags 688 * @flags: RPC call flags
605 * @tk_ops: RPC call ops 689 * @tk_ops: RPC call ops
606 * @data: user call data 690 * @data: user call data
607 */ 691 */
608 int 692 int
609 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, 693 rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
610 const struct rpc_call_ops *tk_ops, void *data) 694 const struct rpc_call_ops *tk_ops, void *data)
611 { 695 {
612 struct rpc_task *task; 696 struct rpc_task *task;
613 struct rpc_task_setup task_setup_data = { 697 struct rpc_task_setup task_setup_data = {
614 .rpc_client = clnt, 698 .rpc_client = clnt,
615 .rpc_message = msg, 699 .rpc_message = msg,
616 .callback_ops = tk_ops, 700 .callback_ops = tk_ops,
617 .callback_data = data, 701 .callback_data = data,
618 .flags = flags|RPC_TASK_ASYNC, 702 .flags = flags|RPC_TASK_ASYNC,
619 }; 703 };
620 704
621 task = rpc_run_task(&task_setup_data); 705 task = rpc_run_task(&task_setup_data);
622 if (IS_ERR(task)) 706 if (IS_ERR(task))
623 return PTR_ERR(task); 707 return PTR_ERR(task);
624 rpc_put_task(task); 708 rpc_put_task(task);
625 return 0; 709 return 0;
626 } 710 }
627 EXPORT_SYMBOL_GPL(rpc_call_async); 711 EXPORT_SYMBOL_GPL(rpc_call_async);
628 712
629 #if defined(CONFIG_NFS_V4_1) 713 #if defined(CONFIG_NFS_V4_1)
630 /** 714 /**
631 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run 715 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
632 * rpc_execute against it 716 * rpc_execute against it
633 * @req: RPC request 717 * @req: RPC request
634 * @tk_ops: RPC call ops 718 * @tk_ops: RPC call ops
635 */ 719 */
636 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 720 struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
637 const struct rpc_call_ops *tk_ops) 721 const struct rpc_call_ops *tk_ops)
638 { 722 {
639 struct rpc_task *task; 723 struct rpc_task *task;
640 struct xdr_buf *xbufp = &req->rq_snd_buf; 724 struct xdr_buf *xbufp = &req->rq_snd_buf;
641 struct rpc_task_setup task_setup_data = { 725 struct rpc_task_setup task_setup_data = {
642 .callback_ops = tk_ops, 726 .callback_ops = tk_ops,
643 }; 727 };
644 728
645 dprintk("RPC: rpc_run_bc_task req= %p\n", req); 729 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
646 /* 730 /*
647 * Create an rpc_task to send the data 731 * Create an rpc_task to send the data
648 */ 732 */
649 task = rpc_new_task(&task_setup_data); 733 task = rpc_new_task(&task_setup_data);
650 if (IS_ERR(task)) { 734 if (IS_ERR(task)) {
651 xprt_free_bc_request(req); 735 xprt_free_bc_request(req);
652 goto out; 736 goto out;
653 } 737 }
654 task->tk_rqstp = req; 738 task->tk_rqstp = req;
655 739
656 /* 740 /*
657 * Set up the xdr_buf length. 741 * Set up the xdr_buf length.
658 * This also indicates that the buffer is XDR encoded already. 742 * This also indicates that the buffer is XDR encoded already.
659 */ 743 */
660 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + 744 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
661 xbufp->tail[0].iov_len; 745 xbufp->tail[0].iov_len;
662 746
663 task->tk_action = call_bc_transmit; 747 task->tk_action = call_bc_transmit;
664 atomic_inc(&task->tk_count); 748 atomic_inc(&task->tk_count);
665 BUG_ON(atomic_read(&task->tk_count) != 2); 749 BUG_ON(atomic_read(&task->tk_count) != 2);
666 rpc_execute(task); 750 rpc_execute(task);
667 751
668 out: 752 out:
669 dprintk("RPC: rpc_run_bc_task: task= %p\n", task); 753 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
670 return task; 754 return task;
671 } 755 }
672 #endif /* CONFIG_NFS_V4_1 */ 756 #endif /* CONFIG_NFS_V4_1 */
673 757
674 void 758 void
675 rpc_call_start(struct rpc_task *task) 759 rpc_call_start(struct rpc_task *task)
676 { 760 {
677 task->tk_action = call_start; 761 task->tk_action = call_start;
678 } 762 }
679 EXPORT_SYMBOL_GPL(rpc_call_start); 763 EXPORT_SYMBOL_GPL(rpc_call_start);
680 764
681 /** 765 /**
682 * rpc_peeraddr - extract remote peer address from clnt's xprt 766 * rpc_peeraddr - extract remote peer address from clnt's xprt
683 * @clnt: RPC client structure 767 * @clnt: RPC client structure
684 * @buf: target buffer 768 * @buf: target buffer
685 * @bufsize: length of target buffer 769 * @bufsize: length of target buffer
686 * 770 *
687 * Returns the number of bytes that are actually in the stored address. 771 * Returns the number of bytes that are actually in the stored address.
688 */ 772 */
689 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 773 size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
690 { 774 {
691 size_t bytes; 775 size_t bytes;
692 struct rpc_xprt *xprt = clnt->cl_xprt; 776 struct rpc_xprt *xprt = clnt->cl_xprt;
693 777
694 bytes = sizeof(xprt->addr); 778 bytes = sizeof(xprt->addr);
695 if (bytes > bufsize) 779 if (bytes > bufsize)
696 bytes = bufsize; 780 bytes = bufsize;
697 memcpy(buf, &clnt->cl_xprt->addr, bytes); 781 memcpy(buf, &clnt->cl_xprt->addr, bytes);
698 return xprt->addrlen; 782 return xprt->addrlen;
699 } 783 }
700 EXPORT_SYMBOL_GPL(rpc_peeraddr); 784 EXPORT_SYMBOL_GPL(rpc_peeraddr);
701 785
702 /** 786 /**
703 * rpc_peeraddr2str - return remote peer address in printable format 787 * rpc_peeraddr2str - return remote peer address in printable format
704 * @clnt: RPC client structure 788 * @clnt: RPC client structure
705 * @format: address format 789 * @format: address format
706 * 790 *
707 */ 791 */
708 const char *rpc_peeraddr2str(struct rpc_clnt *clnt, 792 const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
709 enum rpc_display_format_t format) 793 enum rpc_display_format_t format)
710 { 794 {
711 struct rpc_xprt *xprt = clnt->cl_xprt; 795 struct rpc_xprt *xprt = clnt->cl_xprt;
712 796
713 if (xprt->address_strings[format] != NULL) 797 if (xprt->address_strings[format] != NULL)
714 return xprt->address_strings[format]; 798 return xprt->address_strings[format];
715 else 799 else
716 return "unprintable"; 800 return "unprintable";
717 } 801 }
718 EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 802 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
719 803
720 void 804 void
721 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 805 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
722 { 806 {
723 struct rpc_xprt *xprt = clnt->cl_xprt; 807 struct rpc_xprt *xprt = clnt->cl_xprt;
724 if (xprt->ops->set_buffer_size) 808 if (xprt->ops->set_buffer_size)
725 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 809 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
726 } 810 }
727 EXPORT_SYMBOL_GPL(rpc_setbufsize); 811 EXPORT_SYMBOL_GPL(rpc_setbufsize);
728 812
729 /* 813 /*
730 * Return size of largest payload RPC client can support, in bytes 814 * Return size of largest payload RPC client can support, in bytes
731 * 815 *
732 * For stream transports, this is one RPC record fragment (see RFC 816 * For stream transports, this is one RPC record fragment (see RFC
733 * 1831), as we don't support multi-record requests yet. For datagram 817 * 1831), as we don't support multi-record requests yet. For datagram
734 * transports, this is the size of an IP packet minus the IP, UDP, and 818 * transports, this is the size of an IP packet minus the IP, UDP, and
735 * RPC header sizes. 819 * RPC header sizes.
736 */ 820 */
737 size_t rpc_max_payload(struct rpc_clnt *clnt) 821 size_t rpc_max_payload(struct rpc_clnt *clnt)
738 { 822 {
739 return clnt->cl_xprt->max_payload; 823 return clnt->cl_xprt->max_payload;
740 } 824 }
741 EXPORT_SYMBOL_GPL(rpc_max_payload); 825 EXPORT_SYMBOL_GPL(rpc_max_payload);
742 826
743 /** 827 /**
744 * rpc_force_rebind - force transport to check that remote port is unchanged 828 * rpc_force_rebind - force transport to check that remote port is unchanged
745 * @clnt: client to rebind 829 * @clnt: client to rebind
746 * 830 *
747 */ 831 */
748 void rpc_force_rebind(struct rpc_clnt *clnt) 832 void rpc_force_rebind(struct rpc_clnt *clnt)
749 { 833 {
750 if (clnt->cl_autobind) 834 if (clnt->cl_autobind)
751 xprt_clear_bound(clnt->cl_xprt); 835 xprt_clear_bound(clnt->cl_xprt);
752 } 836 }
753 EXPORT_SYMBOL_GPL(rpc_force_rebind); 837 EXPORT_SYMBOL_GPL(rpc_force_rebind);
754 838
755 /* 839 /*
756 * Restart an (async) RPC call from the call_prepare state. 840 * Restart an (async) RPC call from the call_prepare state.
757 * Usually called from within the exit handler. 841 * Usually called from within the exit handler.
758 */ 842 */
759 int 843 int
760 rpc_restart_call_prepare(struct rpc_task *task) 844 rpc_restart_call_prepare(struct rpc_task *task)
761 { 845 {
762 if (RPC_ASSASSINATED(task)) 846 if (RPC_ASSASSINATED(task))
763 return 0; 847 return 0;
764 task->tk_action = rpc_prepare_task; 848 task->tk_action = rpc_prepare_task;
765 return 1; 849 return 1;
766 } 850 }
767 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); 851 EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
768 852
769 /* 853 /*
770 * Restart an (async) RPC call. Usually called from within the 854 * Restart an (async) RPC call. Usually called from within the
771 * exit handler. 855 * exit handler.
772 */ 856 */
773 int 857 int
774 rpc_restart_call(struct rpc_task *task) 858 rpc_restart_call(struct rpc_task *task)
775 { 859 {
776 if (RPC_ASSASSINATED(task)) 860 if (RPC_ASSASSINATED(task))
777 return 0; 861 return 0;
778 task->tk_action = call_start; 862 task->tk_action = call_start;
779 return 1; 863 return 1;
780 } 864 }
781 EXPORT_SYMBOL_GPL(rpc_restart_call); 865 EXPORT_SYMBOL_GPL(rpc_restart_call);
782 866
783 #ifdef RPC_DEBUG 867 #ifdef RPC_DEBUG
784 static const char *rpc_proc_name(const struct rpc_task *task) 868 static const char *rpc_proc_name(const struct rpc_task *task)
785 { 869 {
786 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 870 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
787 871
788 if (proc) { 872 if (proc) {
789 if (proc->p_name) 873 if (proc->p_name)
790 return proc->p_name; 874 return proc->p_name;
791 else 875 else
792 return "NULL"; 876 return "NULL";
793 } else 877 } else
794 return "no proc"; 878 return "no proc";
795 } 879 }
796 #endif 880 #endif
797 881
798 /* 882 /*
799 * 0. Initial state 883 * 0. Initial state
800 * 884 *
801 * Other FSM states can be visited zero or more times, but 885 * Other FSM states can be visited zero or more times, but
802 * this state is visited exactly once for each RPC. 886 * this state is visited exactly once for each RPC.
803 */ 887 */
804 static void 888 static void
805 call_start(struct rpc_task *task) 889 call_start(struct rpc_task *task)
806 { 890 {
807 struct rpc_clnt *clnt = task->tk_client; 891 struct rpc_clnt *clnt = task->tk_client;
808 892
809 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, 893 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
810 clnt->cl_protname, clnt->cl_vers, 894 clnt->cl_protname, clnt->cl_vers,
811 rpc_proc_name(task), 895 rpc_proc_name(task),
812 (RPC_IS_ASYNC(task) ? "async" : "sync")); 896 (RPC_IS_ASYNC(task) ? "async" : "sync"));
813 897
814 /* Increment call count */ 898 /* Increment call count */
815 task->tk_msg.rpc_proc->p_count++; 899 task->tk_msg.rpc_proc->p_count++;
816 clnt->cl_stats->rpccnt++; 900 clnt->cl_stats->rpccnt++;
817 task->tk_action = call_reserve; 901 task->tk_action = call_reserve;
818 } 902 }
819 903
820 /* 904 /*
821 * 1. Reserve an RPC call slot 905 * 1. Reserve an RPC call slot
822 */ 906 */
823 static void 907 static void
824 call_reserve(struct rpc_task *task) 908 call_reserve(struct rpc_task *task)
825 { 909 {
826 dprint_status(task); 910 dprint_status(task);
827 911
828 if (!rpcauth_uptodatecred(task)) { 912 if (!rpcauth_uptodatecred(task)) {
829 task->tk_action = call_refresh; 913 task->tk_action = call_refresh;
830 return; 914 return;
831 } 915 }
832 916
833 task->tk_status = 0; 917 task->tk_status = 0;
834 task->tk_action = call_reserveresult; 918 task->tk_action = call_reserveresult;
835 xprt_reserve(task); 919 xprt_reserve(task);
836 } 920 }
837 921
838 /* 922 /*
839 * 1b. Grok the result of xprt_reserve() 923 * 1b. Grok the result of xprt_reserve()
840 */ 924 */
841 static void 925 static void
842 call_reserveresult(struct rpc_task *task) 926 call_reserveresult(struct rpc_task *task)
843 { 927 {
844 int status = task->tk_status; 928 int status = task->tk_status;
845 929
846 dprint_status(task); 930 dprint_status(task);
847 931
848 /* 932 /*
849 * After a call to xprt_reserve(), we must have either 933 * After a call to xprt_reserve(), we must have either
850 * a request slot or else an error status. 934 * a request slot or else an error status.
851 */ 935 */
852 task->tk_status = 0; 936 task->tk_status = 0;
853 if (status >= 0) { 937 if (status >= 0) {
854 if (task->tk_rqstp) { 938 if (task->tk_rqstp) {
855 task->tk_action = call_allocate; 939 task->tk_action = call_allocate;
856 return; 940 return;
857 } 941 }
858 942
859 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 943 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
860 __func__, status); 944 __func__, status);
861 rpc_exit(task, -EIO); 945 rpc_exit(task, -EIO);
862 return; 946 return;
863 } 947 }
864 948
865 /* 949 /*
866 * Even though there was an error, we may have acquired 950 * Even though there was an error, we may have acquired
867 * a request slot somehow. Make sure not to leak it. 951 * a request slot somehow. Make sure not to leak it.
868 */ 952 */
869 if (task->tk_rqstp) { 953 if (task->tk_rqstp) {
870 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 954 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
871 __func__, status); 955 __func__, status);
872 xprt_release(task); 956 xprt_release(task);
873 } 957 }
874 958
875 switch (status) { 959 switch (status) {
876 case -EAGAIN: /* woken up; retry */ 960 case -EAGAIN: /* woken up; retry */
877 task->tk_action = call_reserve; 961 task->tk_action = call_reserve;
878 return; 962 return;
879 case -EIO: /* probably a shutdown */ 963 case -EIO: /* probably a shutdown */
880 break; 964 break;
881 default: 965 default:
882 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 966 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
883 __func__, status); 967 __func__, status);
884 break; 968 break;
885 } 969 }
886 rpc_exit(task, status); 970 rpc_exit(task, status);
887 } 971 }
888 972
889 /* 973 /*
890 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 974 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
891 * (Note: buffer memory is freed in xprt_release). 975 * (Note: buffer memory is freed in xprt_release).
892 */ 976 */
893 static void 977 static void
894 call_allocate(struct rpc_task *task) 978 call_allocate(struct rpc_task *task)
895 { 979 {
896 unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack; 980 unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack;
897 struct rpc_rqst *req = task->tk_rqstp; 981 struct rpc_rqst *req = task->tk_rqstp;
898 struct rpc_xprt *xprt = task->tk_xprt; 982 struct rpc_xprt *xprt = task->tk_xprt;
899 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 983 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
900 984
901 dprint_status(task); 985 dprint_status(task);
902 986
903 task->tk_status = 0; 987 task->tk_status = 0;
904 task->tk_action = call_bind; 988 task->tk_action = call_bind;
905 989
906 if (req->rq_buffer) 990 if (req->rq_buffer)
907 return; 991 return;
908 992
909 if (proc->p_proc != 0) { 993 if (proc->p_proc != 0) {
910 BUG_ON(proc->p_arglen == 0); 994 BUG_ON(proc->p_arglen == 0);
911 if (proc->p_decode != NULL) 995 if (proc->p_decode != NULL)
912 BUG_ON(proc->p_replen == 0); 996 BUG_ON(proc->p_replen == 0);
913 } 997 }
914 998
915 /* 999 /*
916 * Calculate the size (in quads) of the RPC call 1000 * Calculate the size (in quads) of the RPC call
917 * and reply headers, and convert both values 1001 * and reply headers, and convert both values
918 * to byte sizes. 1002 * to byte sizes.
919 */ 1003 */
920 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; 1004 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
921 req->rq_callsize <<= 2; 1005 req->rq_callsize <<= 2;
922 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 1006 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
923 req->rq_rcvsize <<= 2; 1007 req->rq_rcvsize <<= 2;
924 1008
925 req->rq_buffer = xprt->ops->buf_alloc(task, 1009 req->rq_buffer = xprt->ops->buf_alloc(task,
926 req->rq_callsize + req->rq_rcvsize); 1010 req->rq_callsize + req->rq_rcvsize);
927 if (req->rq_buffer != NULL) 1011 if (req->rq_buffer != NULL)
928 return; 1012 return;
929 1013
930 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 1014 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
931 1015
932 if (RPC_IS_ASYNC(task) || !signalled()) { 1016 if (RPC_IS_ASYNC(task) || !signalled()) {
933 task->tk_action = call_allocate; 1017 task->tk_action = call_allocate;
934 rpc_delay(task, HZ>>4); 1018 rpc_delay(task, HZ>>4);
935 return; 1019 return;
936 } 1020 }
937 1021
938 rpc_exit(task, -ERESTARTSYS); 1022 rpc_exit(task, -ERESTARTSYS);
939 } 1023 }
940 1024
941 static inline int 1025 static inline int
942 rpc_task_need_encode(struct rpc_task *task) 1026 rpc_task_need_encode(struct rpc_task *task)
943 { 1027 {
944 return task->tk_rqstp->rq_snd_buf.len == 0; 1028 return task->tk_rqstp->rq_snd_buf.len == 0;
945 } 1029 }
946 1030
947 static inline void 1031 static inline void
948 rpc_task_force_reencode(struct rpc_task *task) 1032 rpc_task_force_reencode(struct rpc_task *task)
949 { 1033 {
950 task->tk_rqstp->rq_snd_buf.len = 0; 1034 task->tk_rqstp->rq_snd_buf.len = 0;
951 task->tk_rqstp->rq_bytes_sent = 0; 1035 task->tk_rqstp->rq_bytes_sent = 0;
952 } 1036 }
953 1037
954 static inline void 1038 static inline void
955 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 1039 rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
956 { 1040 {
957 buf->head[0].iov_base = start; 1041 buf->head[0].iov_base = start;
958 buf->head[0].iov_len = len; 1042 buf->head[0].iov_len = len;
959 buf->tail[0].iov_len = 0; 1043 buf->tail[0].iov_len = 0;
960 buf->page_len = 0; 1044 buf->page_len = 0;
961 buf->flags = 0; 1045 buf->flags = 0;
962 buf->len = 0; 1046 buf->len = 0;
963 buf->buflen = len; 1047 buf->buflen = len;
964 } 1048 }
965 1049
966 /* 1050 /*
967 * 3. Encode arguments of an RPC call 1051 * 3. Encode arguments of an RPC call
968 */ 1052 */
969 static void 1053 static void
970 rpc_xdr_encode(struct rpc_task *task) 1054 rpc_xdr_encode(struct rpc_task *task)
971 { 1055 {
972 struct rpc_rqst *req = task->tk_rqstp; 1056 struct rpc_rqst *req = task->tk_rqstp;
973 kxdrproc_t encode; 1057 kxdrproc_t encode;
974 __be32 *p; 1058 __be32 *p;
975 1059
976 dprint_status(task); 1060 dprint_status(task);
977 1061
978 rpc_xdr_buf_init(&req->rq_snd_buf, 1062 rpc_xdr_buf_init(&req->rq_snd_buf,
979 req->rq_buffer, 1063 req->rq_buffer,
980 req->rq_callsize); 1064 req->rq_callsize);
981 rpc_xdr_buf_init(&req->rq_rcv_buf, 1065 rpc_xdr_buf_init(&req->rq_rcv_buf,
982 (char *)req->rq_buffer + req->rq_callsize, 1066 (char *)req->rq_buffer + req->rq_callsize,
983 req->rq_rcvsize); 1067 req->rq_rcvsize);
984 1068
985 p = rpc_encode_header(task); 1069 p = rpc_encode_header(task);
986 if (p == NULL) { 1070 if (p == NULL) {
987 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); 1071 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
988 rpc_exit(task, -EIO); 1072 rpc_exit(task, -EIO);
989 return; 1073 return;
990 } 1074 }
991 1075
992 encode = task->tk_msg.rpc_proc->p_encode; 1076 encode = task->tk_msg.rpc_proc->p_encode;
993 if (encode == NULL) 1077 if (encode == NULL)
994 return; 1078 return;
995 1079
996 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 1080 task->tk_status = rpcauth_wrap_req(task, encode, req, p,
997 task->tk_msg.rpc_argp); 1081 task->tk_msg.rpc_argp);
998 } 1082 }
999 1083
1000 /* 1084 /*
1001 * 4. Get the server port number if not yet set 1085 * 4. Get the server port number if not yet set
1002 */ 1086 */
1003 static void 1087 static void
1004 call_bind(struct rpc_task *task) 1088 call_bind(struct rpc_task *task)
1005 { 1089 {
1006 struct rpc_xprt *xprt = task->tk_xprt; 1090 struct rpc_xprt *xprt = task->tk_xprt;
1007 1091
1008 dprint_status(task); 1092 dprint_status(task);
1009 1093
1010 task->tk_action = call_connect; 1094 task->tk_action = call_connect;
1011 if (!xprt_bound(xprt)) { 1095 if (!xprt_bound(xprt)) {
1012 task->tk_action = call_bind_status; 1096 task->tk_action = call_bind_status;
1013 task->tk_timeout = xprt->bind_timeout; 1097 task->tk_timeout = xprt->bind_timeout;
1014 xprt->ops->rpcbind(task); 1098 xprt->ops->rpcbind(task);
1015 } 1099 }
1016 } 1100 }
1017 1101
1018 /* 1102 /*
1019 * 4a. Sort out bind result 1103 * 4a. Sort out bind result
1020 */ 1104 */
1021 static void 1105 static void
1022 call_bind_status(struct rpc_task *task) 1106 call_bind_status(struct rpc_task *task)
1023 { 1107 {
1024 int status = -EIO; 1108 int status = -EIO;
1025 1109
1026 if (task->tk_status >= 0) { 1110 if (task->tk_status >= 0) {
1027 dprint_status(task); 1111 dprint_status(task);
1028 task->tk_status = 0; 1112 task->tk_status = 0;
1029 task->tk_action = call_connect; 1113 task->tk_action = call_connect;
1030 return; 1114 return;
1031 } 1115 }
1032 1116
1033 switch (task->tk_status) { 1117 switch (task->tk_status) {
1034 case -ENOMEM: 1118 case -ENOMEM:
1035 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1119 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1036 rpc_delay(task, HZ >> 2); 1120 rpc_delay(task, HZ >> 2);
1037 goto retry_timeout; 1121 goto retry_timeout;
1038 case -EACCES: 1122 case -EACCES:
1039 dprintk("RPC: %5u remote rpcbind: RPC program/version " 1123 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1040 "unavailable\n", task->tk_pid); 1124 "unavailable\n", task->tk_pid);
1041 /* fail immediately if this is an RPC ping */ 1125 /* fail immediately if this is an RPC ping */
1042 if (task->tk_msg.rpc_proc->p_proc == 0) { 1126 if (task->tk_msg.rpc_proc->p_proc == 0) {
1043 status = -EOPNOTSUPP; 1127 status = -EOPNOTSUPP;
1044 break; 1128 break;
1045 } 1129 }
1046 rpc_delay(task, 3*HZ); 1130 rpc_delay(task, 3*HZ);
1047 goto retry_timeout; 1131 goto retry_timeout;
1048 case -ETIMEDOUT: 1132 case -ETIMEDOUT:
1049 dprintk("RPC: %5u rpcbind request timed out\n", 1133 dprintk("RPC: %5u rpcbind request timed out\n",
1050 task->tk_pid); 1134 task->tk_pid);
1051 goto retry_timeout; 1135 goto retry_timeout;
1052 case -EPFNOSUPPORT: 1136 case -EPFNOSUPPORT:
1053 /* server doesn't support any rpcbind version we know of */ 1137 /* server doesn't support any rpcbind version we know of */
1054 dprintk("RPC: %5u unrecognized remote rpcbind service\n", 1138 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
1055 task->tk_pid); 1139 task->tk_pid);
1056 break; 1140 break;
1057 case -EPROTONOSUPPORT: 1141 case -EPROTONOSUPPORT:
1058 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 1142 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
1059 task->tk_pid); 1143 task->tk_pid);
1060 task->tk_status = 0; 1144 task->tk_status = 0;
1061 task->tk_action = call_bind; 1145 task->tk_action = call_bind;
1062 return; 1146 return;
1063 case -ECONNREFUSED: /* connection problems */ 1147 case -ECONNREFUSED: /* connection problems */
1064 case -ECONNRESET: 1148 case -ECONNRESET:
1065 case -ENOTCONN: 1149 case -ENOTCONN:
1066 case -EHOSTDOWN: 1150 case -EHOSTDOWN:
1067 case -EHOSTUNREACH: 1151 case -EHOSTUNREACH:
1068 case -ENETUNREACH: 1152 case -ENETUNREACH:
1069 case -EPIPE: 1153 case -EPIPE:
1070 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 1154 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1071 task->tk_pid, task->tk_status); 1155 task->tk_pid, task->tk_status);
1072 if (!RPC_IS_SOFTCONN(task)) { 1156 if (!RPC_IS_SOFTCONN(task)) {
1073 rpc_delay(task, 5*HZ); 1157 rpc_delay(task, 5*HZ);
1074 goto retry_timeout; 1158 goto retry_timeout;
1075 } 1159 }
1076 status = task->tk_status; 1160 status = task->tk_status;
1077 break; 1161 break;
1078 default: 1162 default:
1079 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 1163 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
1080 task->tk_pid, -task->tk_status); 1164 task->tk_pid, -task->tk_status);
1081 } 1165 }
1082 1166
1083 rpc_exit(task, status); 1167 rpc_exit(task, status);
1084 return; 1168 return;
1085 1169
1086 retry_timeout: 1170 retry_timeout:
1087 task->tk_action = call_timeout; 1171 task->tk_action = call_timeout;
1088 } 1172 }
1089 1173
1090 /* 1174 /*
1091 * 4b. Connect to the RPC server 1175 * 4b. Connect to the RPC server
1092 */ 1176 */
1093 static void 1177 static void
1094 call_connect(struct rpc_task *task) 1178 call_connect(struct rpc_task *task)
1095 { 1179 {
1096 struct rpc_xprt *xprt = task->tk_xprt; 1180 struct rpc_xprt *xprt = task->tk_xprt;
1097 1181
1098 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 1182 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
1099 task->tk_pid, xprt, 1183 task->tk_pid, xprt,
1100 (xprt_connected(xprt) ? "is" : "is not")); 1184 (xprt_connected(xprt) ? "is" : "is not"));
1101 1185
1102 task->tk_action = call_transmit; 1186 task->tk_action = call_transmit;
1103 if (!xprt_connected(xprt)) { 1187 if (!xprt_connected(xprt)) {
1104 task->tk_action = call_connect_status; 1188 task->tk_action = call_connect_status;
1105 if (task->tk_status < 0) 1189 if (task->tk_status < 0)
1106 return; 1190 return;
1107 xprt_connect(task); 1191 xprt_connect(task);
1108 } 1192 }
1109 } 1193 }
1110 1194
1111 /* 1195 /*
1112 * 4c. Sort out connect result 1196 * 4c. Sort out connect result
1113 */ 1197 */
1114 static void 1198 static void
1115 call_connect_status(struct rpc_task *task) 1199 call_connect_status(struct rpc_task *task)
1116 { 1200 {
1117 struct rpc_clnt *clnt = task->tk_client; 1201 struct rpc_clnt *clnt = task->tk_client;
1118 int status = task->tk_status; 1202 int status = task->tk_status;
1119 1203
1120 dprint_status(task); 1204 dprint_status(task);
1121 1205
1122 task->tk_status = 0; 1206 task->tk_status = 0;
1123 if (status >= 0 || status == -EAGAIN) { 1207 if (status >= 0 || status == -EAGAIN) {
1124 clnt->cl_stats->netreconn++; 1208 clnt->cl_stats->netreconn++;
1125 task->tk_action = call_transmit; 1209 task->tk_action = call_transmit;
1126 return; 1210 return;
1127 } 1211 }
1128 1212
1129 switch (status) { 1213 switch (status) {
1130 /* if soft mounted, test if we've timed out */ 1214 /* if soft mounted, test if we've timed out */
1131 case -ETIMEDOUT: 1215 case -ETIMEDOUT:
1132 task->tk_action = call_timeout; 1216 task->tk_action = call_timeout;
1133 break; 1217 break;
1134 default: 1218 default:
1135 rpc_exit(task, -EIO); 1219 rpc_exit(task, -EIO);
1136 } 1220 }
1137 } 1221 }
1138 1222
1139 /* 1223 /*
1140 * 5. Transmit the RPC request, and wait for reply 1224 * 5. Transmit the RPC request, and wait for reply
1141 */ 1225 */
1142 static void 1226 static void
1143 call_transmit(struct rpc_task *task) 1227 call_transmit(struct rpc_task *task)
1144 { 1228 {
1145 dprint_status(task); 1229 dprint_status(task);
1146 1230
1147 task->tk_action = call_status; 1231 task->tk_action = call_status;
1148 if (task->tk_status < 0) 1232 if (task->tk_status < 0)
1149 return; 1233 return;
1150 task->tk_status = xprt_prepare_transmit(task); 1234 task->tk_status = xprt_prepare_transmit(task);
1151 if (task->tk_status != 0) 1235 if (task->tk_status != 0)
1152 return; 1236 return;
1153 task->tk_action = call_transmit_status; 1237 task->tk_action = call_transmit_status;
1154 /* Encode here so that rpcsec_gss can use correct sequence number. */ 1238 /* Encode here so that rpcsec_gss can use correct sequence number. */
1155 if (rpc_task_need_encode(task)) { 1239 if (rpc_task_need_encode(task)) {
1156 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 1240 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
1157 rpc_xdr_encode(task); 1241 rpc_xdr_encode(task);
1158 /* Did the encode result in an error condition? */ 1242 /* Did the encode result in an error condition? */
1159 if (task->tk_status != 0) { 1243 if (task->tk_status != 0) {
1160 /* Was the error nonfatal? */ 1244 /* Was the error nonfatal? */
1161 if (task->tk_status == -EAGAIN) 1245 if (task->tk_status == -EAGAIN)
1162 rpc_delay(task, HZ >> 4); 1246 rpc_delay(task, HZ >> 4);
1163 else 1247 else
1164 rpc_exit(task, task->tk_status); 1248 rpc_exit(task, task->tk_status);
1165 return; 1249 return;
1166 } 1250 }
1167 } 1251 }
1168 xprt_transmit(task); 1252 xprt_transmit(task);
1169 if (task->tk_status < 0) 1253 if (task->tk_status < 0)
1170 return; 1254 return;
1171 /* 1255 /*
1172 * On success, ensure that we call xprt_end_transmit() before sleeping 1256 * On success, ensure that we call xprt_end_transmit() before sleeping
1173 * in order to allow access to the socket to other RPC requests. 1257 * in order to allow access to the socket to other RPC requests.
1174 */ 1258 */
1175 call_transmit_status(task); 1259 call_transmit_status(task);
1176 if (rpc_reply_expected(task)) 1260 if (rpc_reply_expected(task))
1177 return; 1261 return;
1178 task->tk_action = rpc_exit_task; 1262 task->tk_action = rpc_exit_task;
1179 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1263 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1180 } 1264 }
1181 1265
1182 /* 1266 /*
1183 * 5a. Handle cleanup after a transmission 1267 * 5a. Handle cleanup after a transmission
1184 */ 1268 */
1185 static void 1269 static void
1186 call_transmit_status(struct rpc_task *task) 1270 call_transmit_status(struct rpc_task *task)
1187 { 1271 {
1188 task->tk_action = call_status; 1272 task->tk_action = call_status;
1189 1273
1190 /* 1274 /*
1191 * Common case: success. Force the compiler to put this 1275 * Common case: success. Force the compiler to put this
1192 * test first. 1276 * test first.
1193 */ 1277 */
1194 if (task->tk_status == 0) { 1278 if (task->tk_status == 0) {
1195 xprt_end_transmit(task); 1279 xprt_end_transmit(task);
1196 rpc_task_force_reencode(task); 1280 rpc_task_force_reencode(task);
1197 return; 1281 return;
1198 } 1282 }
1199 1283
1200 switch (task->tk_status) { 1284 switch (task->tk_status) {
1201 case -EAGAIN: 1285 case -EAGAIN:
1202 break; 1286 break;
1203 default: 1287 default:
1204 dprint_status(task); 1288 dprint_status(task);
1205 xprt_end_transmit(task); 1289 xprt_end_transmit(task);
1206 rpc_task_force_reencode(task); 1290 rpc_task_force_reencode(task);
1207 break; 1291 break;
1208 /* 1292 /*
1209 * Special cases: if we've been waiting on the 1293 * Special cases: if we've been waiting on the
1210 * socket's write_space() callback, or if the 1294 * socket's write_space() callback, or if the
1211 * socket just returned a connection error, 1295 * socket just returned a connection error,
1212 * then hold onto the transport lock. 1296 * then hold onto the transport lock.
1213 */ 1297 */
1214 case -ECONNREFUSED: 1298 case -ECONNREFUSED:
1215 case -EHOSTDOWN: 1299 case -EHOSTDOWN:
1216 case -EHOSTUNREACH: 1300 case -EHOSTUNREACH:
1217 case -ENETUNREACH: 1301 case -ENETUNREACH:
1218 if (RPC_IS_SOFTCONN(task)) { 1302 if (RPC_IS_SOFTCONN(task)) {
1219 xprt_end_transmit(task); 1303 xprt_end_transmit(task);
1220 rpc_exit(task, task->tk_status); 1304 rpc_exit(task, task->tk_status);
1221 break; 1305 break;
1222 } 1306 }
1223 case -ECONNRESET: 1307 case -ECONNRESET:
1224 case -ENOTCONN: 1308 case -ENOTCONN:
1225 case -EPIPE: 1309 case -EPIPE:
1226 rpc_task_force_reencode(task); 1310 rpc_task_force_reencode(task);
1227 } 1311 }
1228 } 1312 }
1229 1313
1230 #if defined(CONFIG_NFS_V4_1) 1314 #if defined(CONFIG_NFS_V4_1)
1231 /* 1315 /*
1232 * 5b. Send the backchannel RPC reply. On error, drop the reply. In 1316 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1233 * addition, disconnect on connectivity errors. 1317 * addition, disconnect on connectivity errors.
1234 */ 1318 */
1235 static void 1319 static void
1236 call_bc_transmit(struct rpc_task *task) 1320 call_bc_transmit(struct rpc_task *task)
1237 { 1321 {
1238 struct rpc_rqst *req = task->tk_rqstp; 1322 struct rpc_rqst *req = task->tk_rqstp;
1239 1323
1240 BUG_ON(task->tk_status != 0); 1324 BUG_ON(task->tk_status != 0);
1241 task->tk_status = xprt_prepare_transmit(task); 1325 task->tk_status = xprt_prepare_transmit(task);
1242 if (task->tk_status == -EAGAIN) { 1326 if (task->tk_status == -EAGAIN) {
1243 /* 1327 /*
1244 * Could not reserve the transport. Try again after the 1328 * Could not reserve the transport. Try again after the
1245 * transport is released. 1329 * transport is released.
1246 */ 1330 */
1247 task->tk_status = 0; 1331 task->tk_status = 0;
1248 task->tk_action = call_bc_transmit; 1332 task->tk_action = call_bc_transmit;
1249 return; 1333 return;
1250 } 1334 }
1251 1335
1252 task->tk_action = rpc_exit_task; 1336 task->tk_action = rpc_exit_task;
1253 if (task->tk_status < 0) { 1337 if (task->tk_status < 0) {
1254 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1338 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1255 "error: %d\n", task->tk_status); 1339 "error: %d\n", task->tk_status);
1256 return; 1340 return;
1257 } 1341 }
1258 1342
1259 xprt_transmit(task); 1343 xprt_transmit(task);
1260 xprt_end_transmit(task); 1344 xprt_end_transmit(task);
1261 dprint_status(task); 1345 dprint_status(task);
1262 switch (task->tk_status) { 1346 switch (task->tk_status) {
1263 case 0: 1347 case 0:
1264 /* Success */ 1348 /* Success */
1265 break; 1349 break;
1266 case -EHOSTDOWN: 1350 case -EHOSTDOWN:
1267 case -EHOSTUNREACH: 1351 case -EHOSTUNREACH:
1268 case -ENETUNREACH: 1352 case -ENETUNREACH:
1269 case -ETIMEDOUT: 1353 case -ETIMEDOUT:
1270 /* 1354 /*
1271 * Problem reaching the server. Disconnect and let the 1355 * Problem reaching the server. Disconnect and let the
1272 * forechannel reestablish the connection. The server will 1356 * forechannel reestablish the connection. The server will
1273 * have to retransmit the backchannel request and we'll 1357 * have to retransmit the backchannel request and we'll
1274 * reprocess it. Since these ops are idempotent, there's no 1358 * reprocess it. Since these ops are idempotent, there's no
1275 * need to cache our reply at this time. 1359 * need to cache our reply at this time.
1276 */ 1360 */
1277 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1361 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1278 "error: %d\n", task->tk_status); 1362 "error: %d\n", task->tk_status);
1279 xprt_conditional_disconnect(task->tk_xprt, 1363 xprt_conditional_disconnect(task->tk_xprt,
1280 req->rq_connect_cookie); 1364 req->rq_connect_cookie);
1281 break; 1365 break;
1282 default: 1366 default:
1283 /* 1367 /*
1284 * We were unable to reply and will have to drop the 1368 * We were unable to reply and will have to drop the
1285 * request. The server should reconnect and retransmit. 1369 * request. The server should reconnect and retransmit.
1286 */ 1370 */
1287 BUG_ON(task->tk_status == -EAGAIN); 1371 BUG_ON(task->tk_status == -EAGAIN);
1288 printk(KERN_NOTICE "RPC: Could not send backchannel reply " 1372 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1289 "error: %d\n", task->tk_status); 1373 "error: %d\n", task->tk_status);
1290 break; 1374 break;
1291 } 1375 }
1292 rpc_wake_up_queued_task(&req->rq_xprt->pending, task); 1376 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1293 } 1377 }
1294 #endif /* CONFIG_NFS_V4_1 */ 1378 #endif /* CONFIG_NFS_V4_1 */
1295 1379
1296 /* 1380 /*
1297 * 6. Sort out the RPC call status 1381 * 6. Sort out the RPC call status
1298 */ 1382 */
1299 static void 1383 static void
1300 call_status(struct rpc_task *task) 1384 call_status(struct rpc_task *task)
1301 { 1385 {
1302 struct rpc_clnt *clnt = task->tk_client; 1386 struct rpc_clnt *clnt = task->tk_client;
1303 struct rpc_rqst *req = task->tk_rqstp; 1387 struct rpc_rqst *req = task->tk_rqstp;
1304 int status; 1388 int status;
1305 1389
1306 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) 1390 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1307 task->tk_status = req->rq_reply_bytes_recvd; 1391 task->tk_status = req->rq_reply_bytes_recvd;
1308 1392
1309 dprint_status(task); 1393 dprint_status(task);
1310 1394
1311 status = task->tk_status; 1395 status = task->tk_status;
1312 if (status >= 0) { 1396 if (status >= 0) {
1313 task->tk_action = call_decode; 1397 task->tk_action = call_decode;
1314 return; 1398 return;
1315 } 1399 }
1316 1400
1317 task->tk_status = 0; 1401 task->tk_status = 0;
1318 switch(status) { 1402 switch(status) {
1319 case -EHOSTDOWN: 1403 case -EHOSTDOWN:
1320 case -EHOSTUNREACH: 1404 case -EHOSTUNREACH:
1321 case -ENETUNREACH: 1405 case -ENETUNREACH:
1322 /* 1406 /*
1323 * Delay any retries for 3 seconds, then handle as if it 1407 * Delay any retries for 3 seconds, then handle as if it
1324 * were a timeout. 1408 * were a timeout.
1325 */ 1409 */
1326 rpc_delay(task, 3*HZ); 1410 rpc_delay(task, 3*HZ);
1327 case -ETIMEDOUT: 1411 case -ETIMEDOUT:
1328 task->tk_action = call_timeout; 1412 task->tk_action = call_timeout;
1329 if (task->tk_client->cl_discrtry) 1413 if (task->tk_client->cl_discrtry)
1330 xprt_conditional_disconnect(task->tk_xprt, 1414 xprt_conditional_disconnect(task->tk_xprt,
1331 req->rq_connect_cookie); 1415 req->rq_connect_cookie);
1332 break; 1416 break;
1333 case -ECONNRESET: 1417 case -ECONNRESET:
1334 case -ECONNREFUSED: 1418 case -ECONNREFUSED:
1335 rpc_force_rebind(clnt); 1419 rpc_force_rebind(clnt);
1336 rpc_delay(task, 3*HZ); 1420 rpc_delay(task, 3*HZ);
1337 case -EPIPE: 1421 case -EPIPE:
1338 case -ENOTCONN: 1422 case -ENOTCONN:
1339 task->tk_action = call_bind; 1423 task->tk_action = call_bind;
1340 break; 1424 break;
1341 case -EAGAIN: 1425 case -EAGAIN:
1342 task->tk_action = call_transmit; 1426 task->tk_action = call_transmit;
1343 break; 1427 break;
1344 case -EIO: 1428 case -EIO:
1345 /* shutdown or soft timeout */ 1429 /* shutdown or soft timeout */
1346 rpc_exit(task, status); 1430 rpc_exit(task, status);
1347 break; 1431 break;
1348 default: 1432 default:
1349 if (clnt->cl_chatty) 1433 if (clnt->cl_chatty)
1350 printk("%s: RPC call returned error %d\n", 1434 printk("%s: RPC call returned error %d\n",
1351 clnt->cl_protname, -status); 1435 clnt->cl_protname, -status);
1352 rpc_exit(task, status); 1436 rpc_exit(task, status);
1353 } 1437 }
1354 } 1438 }
1355 1439
1356 /* 1440 /*
1357 * 6a. Handle RPC timeout 1441 * 6a. Handle RPC timeout
1358 * We do not release the request slot, so we keep using the 1442 * We do not release the request slot, so we keep using the
1359 * same XID for all retransmits. 1443 * same XID for all retransmits.
1360 */ 1444 */
1361 static void 1445 static void
1362 call_timeout(struct rpc_task *task) 1446 call_timeout(struct rpc_task *task)
1363 { 1447 {
1364 struct rpc_clnt *clnt = task->tk_client; 1448 struct rpc_clnt *clnt = task->tk_client;
1365 1449
1366 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1450 if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
1367 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); 1451 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1368 goto retry; 1452 goto retry;
1369 } 1453 }
1370 1454
1371 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1455 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
1372 task->tk_timeouts++; 1456 task->tk_timeouts++;
1373 1457
1374 if (RPC_IS_SOFTCONN(task)) { 1458 if (RPC_IS_SOFTCONN(task)) {
1375 rpc_exit(task, -ETIMEDOUT); 1459 rpc_exit(task, -ETIMEDOUT);
1376 return; 1460 return;
1377 } 1461 }
1378 if (RPC_IS_SOFT(task)) { 1462 if (RPC_IS_SOFT(task)) {
1379 if (clnt->cl_chatty) 1463 if (clnt->cl_chatty)
1380 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1464 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1381 clnt->cl_protname, clnt->cl_server); 1465 clnt->cl_protname, clnt->cl_server);
1382 rpc_exit(task, -EIO); 1466 rpc_exit(task, -EIO);
1383 return; 1467 return;
1384 } 1468 }
1385 1469
1386 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1470 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1387 task->tk_flags |= RPC_CALL_MAJORSEEN; 1471 task->tk_flags |= RPC_CALL_MAJORSEEN;
1388 if (clnt->cl_chatty) 1472 if (clnt->cl_chatty)
1389 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1473 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1390 clnt->cl_protname, clnt->cl_server); 1474 clnt->cl_protname, clnt->cl_server);
1391 } 1475 }
1392 rpc_force_rebind(clnt); 1476 rpc_force_rebind(clnt);
1393 /* 1477 /*
1394 * Did our request time out due to an RPCSEC_GSS out-of-sequence 1478 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1395 * event? RFC2203 requires the server to drop all such requests. 1479 * event? RFC2203 requires the server to drop all such requests.
1396 */ 1480 */
1397 rpcauth_invalcred(task); 1481 rpcauth_invalcred(task);
1398 1482
1399 retry: 1483 retry:
1400 clnt->cl_stats->rpcretrans++; 1484 clnt->cl_stats->rpcretrans++;
1401 task->tk_action = call_bind; 1485 task->tk_action = call_bind;
1402 task->tk_status = 0; 1486 task->tk_status = 0;
1403 } 1487 }
1404 1488
1405 /* 1489 /*
1406 * 7. Decode the RPC reply 1490 * 7. Decode the RPC reply
1407 */ 1491 */
1408 static void 1492 static void
1409 call_decode(struct rpc_task *task) 1493 call_decode(struct rpc_task *task)
1410 { 1494 {
1411 struct rpc_clnt *clnt = task->tk_client; 1495 struct rpc_clnt *clnt = task->tk_client;
1412 struct rpc_rqst *req = task->tk_rqstp; 1496 struct rpc_rqst *req = task->tk_rqstp;
1413 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1497 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1414 __be32 *p; 1498 __be32 *p;
1415 1499
1416 dprintk("RPC: %5u call_decode (status %d)\n", 1500 dprintk("RPC: %5u call_decode (status %d)\n",
1417 task->tk_pid, task->tk_status); 1501 task->tk_pid, task->tk_status);
1418 1502
1419 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1503 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1420 if (clnt->cl_chatty) 1504 if (clnt->cl_chatty)
1421 printk(KERN_NOTICE "%s: server %s OK\n", 1505 printk(KERN_NOTICE "%s: server %s OK\n",
1422 clnt->cl_protname, clnt->cl_server); 1506 clnt->cl_protname, clnt->cl_server);
1423 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1507 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1424 } 1508 }
1425 1509
1426 /* 1510 /*
1427 * Ensure that we see all writes made by xprt_complete_rqst() 1511 * Ensure that we see all writes made by xprt_complete_rqst()
1428 * before it changed req->rq_reply_bytes_recvd. 1512 * before it changed req->rq_reply_bytes_recvd.
1429 */ 1513 */
1430 smp_rmb(); 1514 smp_rmb();
1431 req->rq_rcv_buf.len = req->rq_private_buf.len; 1515 req->rq_rcv_buf.len = req->rq_private_buf.len;
1432 1516
1433 /* Check that the softirq receive buffer is valid */ 1517 /* Check that the softirq receive buffer is valid */
1434 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1518 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1435 sizeof(req->rq_rcv_buf)) != 0); 1519 sizeof(req->rq_rcv_buf)) != 0);
1436 1520
1437 if (req->rq_rcv_buf.len < 12) { 1521 if (req->rq_rcv_buf.len < 12) {
1438 if (!RPC_IS_SOFT(task)) { 1522 if (!RPC_IS_SOFT(task)) {
1439 task->tk_action = call_bind; 1523 task->tk_action = call_bind;
1440 clnt->cl_stats->rpcretrans++; 1524 clnt->cl_stats->rpcretrans++;
1441 goto out_retry; 1525 goto out_retry;
1442 } 1526 }
1443 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", 1527 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1444 clnt->cl_protname, task->tk_status); 1528 clnt->cl_protname, task->tk_status);
1445 task->tk_action = call_timeout; 1529 task->tk_action = call_timeout;
1446 goto out_retry; 1530 goto out_retry;
1447 } 1531 }
1448 1532
1449 p = rpc_verify_header(task); 1533 p = rpc_verify_header(task);
1450 if (IS_ERR(p)) { 1534 if (IS_ERR(p)) {
1451 if (p == ERR_PTR(-EAGAIN)) 1535 if (p == ERR_PTR(-EAGAIN))
1452 goto out_retry; 1536 goto out_retry;
1453 return; 1537 return;
1454 } 1538 }
1455 1539
1456 task->tk_action = rpc_exit_task; 1540 task->tk_action = rpc_exit_task;
1457 1541
1458 if (decode) { 1542 if (decode) {
1459 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1543 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1460 task->tk_msg.rpc_resp); 1544 task->tk_msg.rpc_resp);
1461 } 1545 }
1462 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1546 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1463 task->tk_status); 1547 task->tk_status);
1464 return; 1548 return;
1465 out_retry: 1549 out_retry:
1466 task->tk_status = 0; 1550 task->tk_status = 0;
1467 /* Note: rpc_verify_header() may have freed the RPC slot */ 1551 /* Note: rpc_verify_header() may have freed the RPC slot */
1468 if (task->tk_rqstp == req) { 1552 if (task->tk_rqstp == req) {
1469 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; 1553 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1470 if (task->tk_client->cl_discrtry) 1554 if (task->tk_client->cl_discrtry)
1471 xprt_conditional_disconnect(task->tk_xprt, 1555 xprt_conditional_disconnect(task->tk_xprt,
1472 req->rq_connect_cookie); 1556 req->rq_connect_cookie);
1473 } 1557 }
1474 } 1558 }
1475 1559
1476 /* 1560 /*
1477 * 8. Refresh the credentials if rejected by the server 1561 * 8. Refresh the credentials if rejected by the server
1478 */ 1562 */
1479 static void 1563 static void
1480 call_refresh(struct rpc_task *task) 1564 call_refresh(struct rpc_task *task)
1481 { 1565 {
1482 dprint_status(task); 1566 dprint_status(task);
1483 1567
1484 task->tk_action = call_refreshresult; 1568 task->tk_action = call_refreshresult;
1485 task->tk_status = 0; 1569 task->tk_status = 0;
1486 task->tk_client->cl_stats->rpcauthrefresh++; 1570 task->tk_client->cl_stats->rpcauthrefresh++;
1487 rpcauth_refreshcred(task); 1571 rpcauth_refreshcred(task);
1488 } 1572 }
1489 1573
1490 /* 1574 /*
1491 * 8a. Process the results of a credential refresh 1575 * 8a. Process the results of a credential refresh
1492 */ 1576 */
1493 static void 1577 static void
1494 call_refreshresult(struct rpc_task *task) 1578 call_refreshresult(struct rpc_task *task)
1495 { 1579 {
1496 int status = task->tk_status; 1580 int status = task->tk_status;
1497 1581
1498 dprint_status(task); 1582 dprint_status(task);
1499 1583
1500 task->tk_status = 0; 1584 task->tk_status = 0;
1501 task->tk_action = call_reserve; 1585 task->tk_action = call_reserve;
1502 if (status >= 0 && rpcauth_uptodatecred(task)) 1586 if (status >= 0 && rpcauth_uptodatecred(task))
1503 return; 1587 return;
1504 if (status == -EACCES) { 1588 if (status == -EACCES) {
1505 rpc_exit(task, -EACCES); 1589 rpc_exit(task, -EACCES);
1506 return; 1590 return;
1507 } 1591 }
1508 task->tk_action = call_refresh; 1592 task->tk_action = call_refresh;
1509 if (status != -ETIMEDOUT) 1593 if (status != -ETIMEDOUT)
1510 rpc_delay(task, 3*HZ); 1594 rpc_delay(task, 3*HZ);
1511 } 1595 }
1512 1596
1513 static __be32 * 1597 static __be32 *
1514 rpc_encode_header(struct rpc_task *task) 1598 rpc_encode_header(struct rpc_task *task)
1515 { 1599 {
1516 struct rpc_clnt *clnt = task->tk_client; 1600 struct rpc_clnt *clnt = task->tk_client;
1517 struct rpc_rqst *req = task->tk_rqstp; 1601 struct rpc_rqst *req = task->tk_rqstp;
1518 __be32 *p = req->rq_svec[0].iov_base; 1602 __be32 *p = req->rq_svec[0].iov_base;
1519 1603
1520 /* FIXME: check buffer size? */ 1604 /* FIXME: check buffer size? */
1521 1605
1522 p = xprt_skip_transport_header(task->tk_xprt, p); 1606 p = xprt_skip_transport_header(task->tk_xprt, p);
1523 *p++ = req->rq_xid; /* XID */ 1607 *p++ = req->rq_xid; /* XID */
1524 *p++ = htonl(RPC_CALL); /* CALL */ 1608 *p++ = htonl(RPC_CALL); /* CALL */
1525 *p++ = htonl(RPC_VERSION); /* RPC version */ 1609 *p++ = htonl(RPC_VERSION); /* RPC version */
1526 *p++ = htonl(clnt->cl_prog); /* program number */ 1610 *p++ = htonl(clnt->cl_prog); /* program number */
1527 *p++ = htonl(clnt->cl_vers); /* program version */ 1611 *p++ = htonl(clnt->cl_vers); /* program version */
1528 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1612 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
1529 p = rpcauth_marshcred(task, p); 1613 p = rpcauth_marshcred(task, p);
1530 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1614 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1531 return p; 1615 return p;
1532 } 1616 }
1533 1617
1534 static __be32 * 1618 static __be32 *
1535 rpc_verify_header(struct rpc_task *task) 1619 rpc_verify_header(struct rpc_task *task)
1536 { 1620 {
1537 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1621 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1538 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1622 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1539 __be32 *p = iov->iov_base; 1623 __be32 *p = iov->iov_base;
1540 u32 n; 1624 u32 n;
1541 int error = -EACCES; 1625 int error = -EACCES;
1542 1626
1543 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1627 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1544 /* RFC-1014 says that the representation of XDR data must be a 1628 /* RFC-1014 says that the representation of XDR data must be a
1545 * multiple of four bytes 1629 * multiple of four bytes
1546 * - if it isn't pointer subtraction in the NFS client may give 1630 * - if it isn't pointer subtraction in the NFS client may give
1547 * undefined results 1631 * undefined results
1548 */ 1632 */
1549 dprintk("RPC: %5u %s: XDR representation not a multiple of" 1633 dprintk("RPC: %5u %s: XDR representation not a multiple of"
1550 " 4 bytes: 0x%x\n", task->tk_pid, __func__, 1634 " 4 bytes: 0x%x\n", task->tk_pid, __func__,
1551 task->tk_rqstp->rq_rcv_buf.len); 1635 task->tk_rqstp->rq_rcv_buf.len);
1552 goto out_eio; 1636 goto out_eio;
1553 } 1637 }
1554 if ((len -= 3) < 0) 1638 if ((len -= 3) < 0)
1555 goto out_overflow; 1639 goto out_overflow;
1556 1640
1557 p += 1; /* skip XID */ 1641 p += 1; /* skip XID */
1558 if ((n = ntohl(*p++)) != RPC_REPLY) { 1642 if ((n = ntohl(*p++)) != RPC_REPLY) {
1559 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1643 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1560 task->tk_pid, __func__, n); 1644 task->tk_pid, __func__, n);
1561 goto out_garbage; 1645 goto out_garbage;
1562 } 1646 }
1563 1647
1564 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1648 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1565 if (--len < 0) 1649 if (--len < 0)
1566 goto out_overflow; 1650 goto out_overflow;
1567 switch ((n = ntohl(*p++))) { 1651 switch ((n = ntohl(*p++))) {
1568 case RPC_AUTH_ERROR: 1652 case RPC_AUTH_ERROR:
1569 break; 1653 break;
1570 case RPC_MISMATCH: 1654 case RPC_MISMATCH:
1571 dprintk("RPC: %5u %s: RPC call version " 1655 dprintk("RPC: %5u %s: RPC call version "
1572 "mismatch!\n", 1656 "mismatch!\n",
1573 task->tk_pid, __func__); 1657 task->tk_pid, __func__);
1574 error = -EPROTONOSUPPORT; 1658 error = -EPROTONOSUPPORT;
1575 goto out_err; 1659 goto out_err;
1576 default: 1660 default:
1577 dprintk("RPC: %5u %s: RPC call rejected, " 1661 dprintk("RPC: %5u %s: RPC call rejected, "
1578 "unknown error: %x\n", 1662 "unknown error: %x\n",
1579 task->tk_pid, __func__, n); 1663 task->tk_pid, __func__, n);
1580 goto out_eio; 1664 goto out_eio;
1581 } 1665 }
1582 if (--len < 0) 1666 if (--len < 0)
1583 goto out_overflow; 1667 goto out_overflow;
1584 switch ((n = ntohl(*p++))) { 1668 switch ((n = ntohl(*p++))) {
1585 case RPC_AUTH_REJECTEDCRED: 1669 case RPC_AUTH_REJECTEDCRED:
1586 case RPC_AUTH_REJECTEDVERF: 1670 case RPC_AUTH_REJECTEDVERF:
1587 case RPCSEC_GSS_CREDPROBLEM: 1671 case RPCSEC_GSS_CREDPROBLEM:
1588 case RPCSEC_GSS_CTXPROBLEM: 1672 case RPCSEC_GSS_CTXPROBLEM:
1589 if (!task->tk_cred_retry) 1673 if (!task->tk_cred_retry)
1590 break; 1674 break;
1591 task->tk_cred_retry--; 1675 task->tk_cred_retry--;
1592 dprintk("RPC: %5u %s: retry stale creds\n", 1676 dprintk("RPC: %5u %s: retry stale creds\n",
1593 task->tk_pid, __func__); 1677 task->tk_pid, __func__);
1594 rpcauth_invalcred(task); 1678 rpcauth_invalcred(task);
1595 /* Ensure we obtain a new XID! */ 1679 /* Ensure we obtain a new XID! */
1596 xprt_release(task); 1680 xprt_release(task);
1597 task->tk_action = call_refresh; 1681 task->tk_action = call_refresh;
1598 goto out_retry; 1682 goto out_retry;
1599 case RPC_AUTH_BADCRED: 1683 case RPC_AUTH_BADCRED:
1600 case RPC_AUTH_BADVERF: 1684 case RPC_AUTH_BADVERF:
1601 /* possibly garbled cred/verf? */ 1685 /* possibly garbled cred/verf? */
1602 if (!task->tk_garb_retry) 1686 if (!task->tk_garb_retry)
1603 break; 1687 break;
1604 task->tk_garb_retry--; 1688 task->tk_garb_retry--;
1605 dprintk("RPC: %5u %s: retry garbled creds\n", 1689 dprintk("RPC: %5u %s: retry garbled creds\n",
1606 task->tk_pid, __func__); 1690 task->tk_pid, __func__);
1607 task->tk_action = call_bind; 1691 task->tk_action = call_bind;
1608 goto out_retry; 1692 goto out_retry;
1609 case RPC_AUTH_TOOWEAK: 1693 case RPC_AUTH_TOOWEAK:
1610 printk(KERN_NOTICE "RPC: server %s requires stronger " 1694 printk(KERN_NOTICE "RPC: server %s requires stronger "
1611 "authentication.\n", task->tk_client->cl_server); 1695 "authentication.\n", task->tk_client->cl_server);
1612 break; 1696 break;
1613 default: 1697 default:
1614 dprintk("RPC: %5u %s: unknown auth error: %x\n", 1698 dprintk("RPC: %5u %s: unknown auth error: %x\n",
1615 task->tk_pid, __func__, n); 1699 task->tk_pid, __func__, n);
1616 error = -EIO; 1700 error = -EIO;
1617 } 1701 }
1618 dprintk("RPC: %5u %s: call rejected %d\n", 1702 dprintk("RPC: %5u %s: call rejected %d\n",
1619 task->tk_pid, __func__, n); 1703 task->tk_pid, __func__, n);
1620 goto out_err; 1704 goto out_err;
1621 } 1705 }
1622 if (!(p = rpcauth_checkverf(task, p))) { 1706 if (!(p = rpcauth_checkverf(task, p))) {
1623 dprintk("RPC: %5u %s: auth check failed\n", 1707 dprintk("RPC: %5u %s: auth check failed\n",
1624 task->tk_pid, __func__); 1708 task->tk_pid, __func__);
1625 goto out_garbage; /* bad verifier, retry */ 1709 goto out_garbage; /* bad verifier, retry */
1626 } 1710 }
1627 len = p - (__be32 *)iov->iov_base - 1; 1711 len = p - (__be32 *)iov->iov_base - 1;
1628 if (len < 0) 1712 if (len < 0)
1629 goto out_overflow; 1713 goto out_overflow;
1630 switch ((n = ntohl(*p++))) { 1714 switch ((n = ntohl(*p++))) {
1631 case RPC_SUCCESS: 1715 case RPC_SUCCESS:
1632 return p; 1716 return p;
1633 case RPC_PROG_UNAVAIL: 1717 case RPC_PROG_UNAVAIL:
1634 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1718 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
1635 task->tk_pid, __func__, 1719 task->tk_pid, __func__,
1636 (unsigned int)task->tk_client->cl_prog, 1720 (unsigned int)task->tk_client->cl_prog,
1637 task->tk_client->cl_server); 1721 task->tk_client->cl_server);
1638 error = -EPFNOSUPPORT; 1722 error = -EPFNOSUPPORT;
1639 goto out_err; 1723 goto out_err;
1640 case RPC_PROG_MISMATCH: 1724 case RPC_PROG_MISMATCH:
1641 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1725 dprintk("RPC: %5u %s: program %u, version %u unsupported by "
1642 "server %s\n", task->tk_pid, __func__, 1726 "server %s\n", task->tk_pid, __func__,
1643 (unsigned int)task->tk_client->cl_prog, 1727 (unsigned int)task->tk_client->cl_prog,
1644 (unsigned int)task->tk_client->cl_vers, 1728 (unsigned int)task->tk_client->cl_vers,
1645 task->tk_client->cl_server); 1729 task->tk_client->cl_server);
1646 error = -EPROTONOSUPPORT; 1730 error = -EPROTONOSUPPORT;
1647 goto out_err; 1731 goto out_err;
1648 case RPC_PROC_UNAVAIL: 1732 case RPC_PROC_UNAVAIL:
1649 dprintk("RPC: %5u %s: proc %s unsupported by program %u, " 1733 dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
1650 "version %u on server %s\n", 1734 "version %u on server %s\n",
1651 task->tk_pid, __func__, 1735 task->tk_pid, __func__,
1652 rpc_proc_name(task), 1736 rpc_proc_name(task),
1653 task->tk_client->cl_prog, 1737 task->tk_client->cl_prog,
1654 task->tk_client->cl_vers, 1738 task->tk_client->cl_vers,
1655 task->tk_client->cl_server); 1739 task->tk_client->cl_server);
1656 error = -EOPNOTSUPP; 1740 error = -EOPNOTSUPP;
1657 goto out_err; 1741 goto out_err;
1658 case RPC_GARBAGE_ARGS: 1742 case RPC_GARBAGE_ARGS:
1659 dprintk("RPC: %5u %s: server saw garbage\n", 1743 dprintk("RPC: %5u %s: server saw garbage\n",
1660 task->tk_pid, __func__); 1744 task->tk_pid, __func__);
1661 break; /* retry */ 1745 break; /* retry */
1662 default: 1746 default:
1663 dprintk("RPC: %5u %s: server accept status: %x\n", 1747 dprintk("RPC: %5u %s: server accept status: %x\n",
1664 task->tk_pid, __func__, n); 1748 task->tk_pid, __func__, n);
1665 /* Also retry */ 1749 /* Also retry */
1666 } 1750 }
1667 1751
1668 out_garbage: 1752 out_garbage:
1669 task->tk_client->cl_stats->rpcgarbage++; 1753 task->tk_client->cl_stats->rpcgarbage++;
1670 if (task->tk_garb_retry) { 1754 if (task->tk_garb_retry) {
1671 task->tk_garb_retry--; 1755 task->tk_garb_retry--;
1672 dprintk("RPC: %5u %s: retrying\n", 1756 dprintk("RPC: %5u %s: retrying\n",
1673 task->tk_pid, __func__); 1757 task->tk_pid, __func__);
1674 task->tk_action = call_bind; 1758 task->tk_action = call_bind;
1675 out_retry: 1759 out_retry:
1676 return ERR_PTR(-EAGAIN); 1760 return ERR_PTR(-EAGAIN);
1677 } 1761 }
1678 out_eio: 1762 out_eio:
1679 error = -EIO; 1763 error = -EIO;
1680 out_err: 1764 out_err:
1681 rpc_exit(task, error); 1765 rpc_exit(task, error);
1682 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, 1766 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
1683 __func__, error); 1767 __func__, error);
1684 return ERR_PTR(error); 1768 return ERR_PTR(error);
1685 out_overflow: 1769 out_overflow:
1686 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, 1770 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
1687 __func__); 1771 __func__);
1688 goto out_garbage; 1772 goto out_garbage;
1689 } 1773 }
1690 1774
1691 static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1775 static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
1692 { 1776 {
1693 return 0; 1777 return 0;
1694 } 1778 }
1695 1779
1696 static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1780 static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
1697 { 1781 {
1698 return 0; 1782 return 0;
1699 } 1783 }
1700 1784
1701 static struct rpc_procinfo rpcproc_null = { 1785 static struct rpc_procinfo rpcproc_null = {
1702 .p_encode = rpcproc_encode_null, 1786 .p_encode = rpcproc_encode_null,
1703 .p_decode = rpcproc_decode_null, 1787 .p_decode = rpcproc_decode_null,
1704 }; 1788 };
1705 1789
1706 static int rpc_ping(struct rpc_clnt *clnt) 1790 static int rpc_ping(struct rpc_clnt *clnt)
1707 { 1791 {
1708 struct rpc_message msg = { 1792 struct rpc_message msg = {
1709 .rpc_proc = &rpcproc_null, 1793 .rpc_proc = &rpcproc_null,
1710 }; 1794 };
1711 int err; 1795 int err;
1712 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1796 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1713 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); 1797 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
1714 put_rpccred(msg.rpc_cred); 1798 put_rpccred(msg.rpc_cred);
1715 return err; 1799 return err;
1716 } 1800 }
1717 1801
1718 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) 1802 struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1719 { 1803 {
1720 struct rpc_message msg = { 1804 struct rpc_message msg = {
1721 .rpc_proc = &rpcproc_null, 1805 .rpc_proc = &rpcproc_null,
1722 .rpc_cred = cred, 1806 .rpc_cred = cred,
1723 }; 1807 };
1724 struct rpc_task_setup task_setup_data = { 1808 struct rpc_task_setup task_setup_data = {
1725 .rpc_client = clnt, 1809 .rpc_client = clnt,
1726 .rpc_message = &msg, 1810 .rpc_message = &msg,
1727 .callback_ops = &rpc_default_ops, 1811 .callback_ops = &rpc_default_ops,
1728 .flags = flags, 1812 .flags = flags,
1729 }; 1813 };
1730 return rpc_run_task(&task_setup_data); 1814 return rpc_run_task(&task_setup_data);
1731 } 1815 }
1732 EXPORT_SYMBOL_GPL(rpc_call_null); 1816 EXPORT_SYMBOL_GPL(rpc_call_null);
1733 1817
1734 #ifdef RPC_DEBUG 1818 #ifdef RPC_DEBUG
1735 static void rpc_show_header(void) 1819 static void rpc_show_header(void)
1736 { 1820 {
1737 printk(KERN_INFO "-pid- flgs status -client- --rqstp- " 1821 printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1738 "-timeout ---ops--\n"); 1822 "-timeout ---ops--\n");
1739 } 1823 }
1740 1824
1741 static void rpc_show_task(const struct rpc_clnt *clnt, 1825 static void rpc_show_task(const struct rpc_clnt *clnt,
1742 const struct rpc_task *task) 1826 const struct rpc_task *task)
1743 { 1827 {
1744 const char *rpc_waitq = "none"; 1828 const char *rpc_waitq = "none";
1745 char *p, action[KSYM_SYMBOL_LEN]; 1829 char *p, action[KSYM_SYMBOL_LEN];
1746 1830
1747 if (RPC_IS_QUEUED(task)) 1831 if (RPC_IS_QUEUED(task))
1748 rpc_waitq = rpc_qname(task->tk_waitqueue); 1832 rpc_waitq = rpc_qname(task->tk_waitqueue);
1749 1833
1750 /* map tk_action pointer to a function name; then trim off 1834 /* map tk_action pointer to a function name; then trim off
1751 * the "+0x0 [sunrpc]" */ 1835 * the "+0x0 [sunrpc]" */
1752 sprint_symbol(action, (unsigned long)task->tk_action); 1836 sprint_symbol(action, (unsigned long)task->tk_action);
1753 p = strchr(action, '+'); 1837 p = strchr(action, '+');
1754 if (p) 1838 if (p)
1755 *p = '\0'; 1839 *p = '\0';
1756 1840
1757 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n", 1841 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
1758 task->tk_pid, task->tk_flags, task->tk_status, 1842 task->tk_pid, task->tk_flags, task->tk_status,
1759 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, 1843 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1760 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), 1844 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1761 action, rpc_waitq); 1845 action, rpc_waitq);
1762 } 1846 }
1763 1847
1764 void rpc_show_tasks(void) 1848 void rpc_show_tasks(void)
1765 { 1849 {
1766 struct rpc_clnt *clnt; 1850 struct rpc_clnt *clnt;
1767 struct rpc_task *task; 1851 struct rpc_task *task;
1768 int header = 0; 1852 int header = 0;
1769 1853
1770 spin_lock(&rpc_client_lock); 1854 spin_lock(&rpc_client_lock);
1771 list_for_each_entry(clnt, &all_clients, cl_clients) { 1855 list_for_each_entry(clnt, &all_clients, cl_clients) {
1772 spin_lock(&clnt->cl_lock); 1856 spin_lock(&clnt->cl_lock);
1773 list_for_each_entry(task, &clnt->cl_tasks, tk_task) { 1857 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
1774 if (!header) { 1858 if (!header) {
1775 rpc_show_header(); 1859 rpc_show_header();
1776 header++; 1860 header++;
1777 } 1861 }
1778 rpc_show_task(clnt, task); 1862 rpc_show_task(clnt, task);
1779 } 1863 }
1780 spin_unlock(&clnt->cl_lock); 1864 spin_unlock(&clnt->cl_lock);
1781 } 1865 }
1782 spin_unlock(&rpc_client_lock); 1866 spin_unlock(&rpc_client_lock);
1783 } 1867 }
1784 #endif 1868 #endif
1785 1869
1 /* 1 /*
2 * linux/net/sunrpc/sched.c 2 * linux/net/sunrpc/sched.c
3 * 3 *
4 * Scheduling for synchronous and asynchronous RPC requests. 4 * Scheduling for synchronous and asynchronous RPC requests.
5 * 5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 * 7 *
8 * TCP NFS related read + write fixes 8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 13
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/interrupt.h> 15 #include <linux/interrupt.h>
16 #include <linux/slab.h> 16 #include <linux/slab.h>
17 #include <linux/mempool.h> 17 #include <linux/mempool.h>
18 #include <linux/smp.h> 18 #include <linux/smp.h>
19 #include <linux/spinlock.h> 19 #include <linux/spinlock.h>
20 #include <linux/mutex.h> 20 #include <linux/mutex.h>
21 21
22 #include <linux/sunrpc/clnt.h> 22 #include <linux/sunrpc/clnt.h>
23 23
24 #include "sunrpc.h" 24 #include "sunrpc.h"
25 25
26 #ifdef RPC_DEBUG 26 #ifdef RPC_DEBUG
27 #define RPCDBG_FACILITY RPCDBG_SCHED 27 #define RPCDBG_FACILITY RPCDBG_SCHED
28 #endif 28 #endif
29 29
30 /* 30 /*
31 * RPC slabs and memory pools 31 * RPC slabs and memory pools
32 */ 32 */
33 #define RPC_BUFFER_MAXSIZE (2048) 33 #define RPC_BUFFER_MAXSIZE (2048)
34 #define RPC_BUFFER_POOLSIZE (8) 34 #define RPC_BUFFER_POOLSIZE (8)
35 #define RPC_TASK_POOLSIZE (8) 35 #define RPC_TASK_POOLSIZE (8)
36 static struct kmem_cache *rpc_task_slabp __read_mostly; 36 static struct kmem_cache *rpc_task_slabp __read_mostly;
37 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 37 static struct kmem_cache *rpc_buffer_slabp __read_mostly;
38 static mempool_t *rpc_task_mempool __read_mostly; 38 static mempool_t *rpc_task_mempool __read_mostly;
39 static mempool_t *rpc_buffer_mempool __read_mostly; 39 static mempool_t *rpc_buffer_mempool __read_mostly;
40 40
41 static void rpc_async_schedule(struct work_struct *); 41 static void rpc_async_schedule(struct work_struct *);
42 static void rpc_release_task(struct rpc_task *task); 42 static void rpc_release_task(struct rpc_task *task);
43 static void __rpc_queue_timer_fn(unsigned long ptr); 43 static void __rpc_queue_timer_fn(unsigned long ptr);
44 44
45 /* 45 /*
46 * RPC tasks sit here while waiting for conditions to improve. 46 * RPC tasks sit here while waiting for conditions to improve.
47 */ 47 */
48 static struct rpc_wait_queue delay_queue; 48 static struct rpc_wait_queue delay_queue;
49 49
50 /* 50 /*
51 * rpciod-related stuff 51 * rpciod-related stuff
52 */ 52 */
53 struct workqueue_struct *rpciod_workqueue; 53 struct workqueue_struct *rpciod_workqueue;
54 54
55 /* 55 /*
56 * Disable the timer for a given RPC task. Should be called with 56 * Disable the timer for a given RPC task. Should be called with
57 * queue->lock and bh_disabled in order to avoid races within 57 * queue->lock and bh_disabled in order to avoid races within
58 * rpc_run_timer(). 58 * rpc_run_timer().
59 */ 59 */
60 static void 60 static void
61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 61 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
62 { 62 {
63 if (task->tk_timeout == 0) 63 if (task->tk_timeout == 0)
64 return; 64 return;
65 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 65 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
66 task->tk_timeout = 0; 66 task->tk_timeout = 0;
67 list_del(&task->u.tk_wait.timer_list); 67 list_del(&task->u.tk_wait.timer_list);
68 if (list_empty(&queue->timer_list.list)) 68 if (list_empty(&queue->timer_list.list))
69 del_timer(&queue->timer_list.timer); 69 del_timer(&queue->timer_list.timer);
70 } 70 }
71 71
72 static void 72 static void
73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 73 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
74 { 74 {
75 queue->timer_list.expires = expires; 75 queue->timer_list.expires = expires;
76 mod_timer(&queue->timer_list.timer, expires); 76 mod_timer(&queue->timer_list.timer, expires);
77 } 77 }
78 78
79 /* 79 /*
80 * Set up a timer for the current task. 80 * Set up a timer for the current task.
81 */ 81 */
82 static void 82 static void
83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 83 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
84 { 84 {
85 if (!task->tk_timeout) 85 if (!task->tk_timeout)
86 return; 86 return;
87 87
88 dprintk("RPC: %5u setting alarm for %lu ms\n", 88 dprintk("RPC: %5u setting alarm for %lu ms\n",
89 task->tk_pid, task->tk_timeout * 1000 / HZ); 89 task->tk_pid, task->tk_timeout * 1000 / HZ);
90 90
91 task->u.tk_wait.expires = jiffies + task->tk_timeout; 91 task->u.tk_wait.expires = jiffies + task->tk_timeout;
92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) 92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
93 rpc_set_queue_timer(queue, task->u.tk_wait.expires); 93 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
95 } 95 }
96 96
97 /* 97 /*
98 * Add new request to a priority queue. 98 * Add new request to a priority queue.
99 */ 99 */
100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) 100 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
101 { 101 {
102 struct list_head *q; 102 struct list_head *q;
103 struct rpc_task *t; 103 struct rpc_task *t;
104 104
105 INIT_LIST_HEAD(&task->u.tk_wait.links); 105 INIT_LIST_HEAD(&task->u.tk_wait.links);
106 q = &queue->tasks[task->tk_priority]; 106 q = &queue->tasks[task->tk_priority];
107 if (unlikely(task->tk_priority > queue->maxpriority)) 107 if (unlikely(task->tk_priority > queue->maxpriority))
108 q = &queue->tasks[queue->maxpriority]; 108 q = &queue->tasks[queue->maxpriority];
109 list_for_each_entry(t, q, u.tk_wait.list) { 109 list_for_each_entry(t, q, u.tk_wait.list) {
110 if (t->tk_owner == task->tk_owner) { 110 if (t->tk_owner == task->tk_owner) {
111 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 111 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
112 return; 112 return;
113 } 113 }
114 } 114 }
115 list_add_tail(&task->u.tk_wait.list, q); 115 list_add_tail(&task->u.tk_wait.list, q);
116 } 116 }
117 117
118 /* 118 /*
119 * Add new request to wait queue. 119 * Add new request to wait queue.
120 * 120 *
121 * Swapper tasks always get inserted at the head of the queue. 121 * Swapper tasks always get inserted at the head of the queue.
122 * This should avoid many nasty memory deadlocks and hopefully 122 * This should avoid many nasty memory deadlocks and hopefully
123 * improve overall performance. 123 * improve overall performance.
124 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 124 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
125 */ 125 */
126 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 126 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
127 { 127 {
128 BUG_ON (RPC_IS_QUEUED(task)); 128 BUG_ON (RPC_IS_QUEUED(task));
129 129
130 if (RPC_IS_PRIORITY(queue)) 130 if (RPC_IS_PRIORITY(queue))
131 __rpc_add_wait_queue_priority(queue, task); 131 __rpc_add_wait_queue_priority(queue, task);
132 else if (RPC_IS_SWAPPER(task)) 132 else if (RPC_IS_SWAPPER(task))
133 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 133 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
134 else 134 else
135 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 135 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
136 task->tk_waitqueue = queue; 136 task->tk_waitqueue = queue;
137 queue->qlen++; 137 queue->qlen++;
138 rpc_set_queued(task); 138 rpc_set_queued(task);
139 139
140 dprintk("RPC: %5u added to queue %p \"%s\"\n", 140 dprintk("RPC: %5u added to queue %p \"%s\"\n",
141 task->tk_pid, queue, rpc_qname(queue)); 141 task->tk_pid, queue, rpc_qname(queue));
142 } 142 }
143 143
144 /* 144 /*
145 * Remove request from a priority queue. 145 * Remove request from a priority queue.
146 */ 146 */
147 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 147 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
148 { 148 {
149 struct rpc_task *t; 149 struct rpc_task *t;
150 150
151 if (!list_empty(&task->u.tk_wait.links)) { 151 if (!list_empty(&task->u.tk_wait.links)) {
152 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); 152 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
153 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); 153 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
154 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); 154 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
155 } 155 }
156 } 156 }
157 157
158 /* 158 /*
159 * Remove request from queue. 159 * Remove request from queue.
160 * Note: must be called with spin lock held. 160 * Note: must be called with spin lock held.
161 */ 161 */
162 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 162 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
163 { 163 {
164 __rpc_disable_timer(queue, task); 164 __rpc_disable_timer(queue, task);
165 if (RPC_IS_PRIORITY(queue)) 165 if (RPC_IS_PRIORITY(queue))
166 __rpc_remove_wait_queue_priority(task); 166 __rpc_remove_wait_queue_priority(task);
167 list_del(&task->u.tk_wait.list); 167 list_del(&task->u.tk_wait.list);
168 queue->qlen--; 168 queue->qlen--;
169 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 169 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
170 task->tk_pid, queue, rpc_qname(queue)); 170 task->tk_pid, queue, rpc_qname(queue));
171 } 171 }
172 172
173 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 173 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
174 { 174 {
175 queue->priority = priority; 175 queue->priority = priority;
176 queue->count = 1 << (priority * 2); 176 queue->count = 1 << (priority * 2);
177 } 177 }
178 178
179 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 179 static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
180 { 180 {
181 queue->owner = pid; 181 queue->owner = pid;
182 queue->nr = RPC_BATCH_COUNT; 182 queue->nr = RPC_BATCH_COUNT;
183 } 183 }
184 184
185 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 185 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
186 { 186 {
187 rpc_set_waitqueue_priority(queue, queue->maxpriority); 187 rpc_set_waitqueue_priority(queue, queue->maxpriority);
188 rpc_set_waitqueue_owner(queue, 0); 188 rpc_set_waitqueue_owner(queue, 0);
189 } 189 }
190 190
191 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 191 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
192 { 192 {
193 int i; 193 int i;
194 194
195 spin_lock_init(&queue->lock); 195 spin_lock_init(&queue->lock);
196 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 196 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
197 INIT_LIST_HEAD(&queue->tasks[i]); 197 INIT_LIST_HEAD(&queue->tasks[i]);
198 queue->maxpriority = nr_queues - 1; 198 queue->maxpriority = nr_queues - 1;
199 rpc_reset_waitqueue_priority(queue); 199 rpc_reset_waitqueue_priority(queue);
200 queue->qlen = 0; 200 queue->qlen = 0;
201 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); 201 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
202 INIT_LIST_HEAD(&queue->timer_list.list); 202 INIT_LIST_HEAD(&queue->timer_list.list);
203 #ifdef RPC_DEBUG 203 #ifdef RPC_DEBUG
204 queue->name = qname; 204 queue->name = qname;
205 #endif 205 #endif
206 } 206 }
207 207
208 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 208 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
209 { 209 {
210 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 210 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
211 } 211 }
212 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 212 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
213 213
214 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 214 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
215 { 215 {
216 __rpc_init_priority_wait_queue(queue, qname, 1); 216 __rpc_init_priority_wait_queue(queue, qname, 1);
217 } 217 }
218 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 218 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
219 219
220 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 220 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
221 { 221 {
222 del_timer_sync(&queue->timer_list.timer); 222 del_timer_sync(&queue->timer_list.timer);
223 } 223 }
224 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 224 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
225 225
226 static int rpc_wait_bit_killable(void *word) 226 static int rpc_wait_bit_killable(void *word)
227 { 227 {
228 if (fatal_signal_pending(current)) 228 if (fatal_signal_pending(current))
229 return -ERESTARTSYS; 229 return -ERESTARTSYS;
230 schedule(); 230 schedule();
231 return 0; 231 return 0;
232 } 232 }
233 233
234 #ifdef RPC_DEBUG 234 #ifdef RPC_DEBUG
235 static void rpc_task_set_debuginfo(struct rpc_task *task) 235 static void rpc_task_set_debuginfo(struct rpc_task *task)
236 { 236 {
237 static atomic_t rpc_pid; 237 static atomic_t rpc_pid;
238 238
239 task->tk_pid = atomic_inc_return(&rpc_pid); 239 task->tk_pid = atomic_inc_return(&rpc_pid);
240 } 240 }
241 #else 241 #else
242 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 242 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
243 { 243 {
244 } 244 }
245 #endif 245 #endif
246 246
247 static void rpc_set_active(struct rpc_task *task) 247 static void rpc_set_active(struct rpc_task *task)
248 { 248 {
249 struct rpc_clnt *clnt;
250 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
251 return;
252 rpc_task_set_debuginfo(task); 249 rpc_task_set_debuginfo(task);
253 /* Add to global list of all tasks */ 250 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
254 clnt = task->tk_client;
255 if (clnt != NULL) {
256 spin_lock(&clnt->cl_lock);
257 list_add_tail(&task->tk_task, &clnt->cl_tasks);
258 spin_unlock(&clnt->cl_lock);
259 }
260 } 251 }
261 252
262 /* 253 /*
263 * Mark an RPC call as having completed by clearing the 'active' bit 254 * Mark an RPC call as having completed by clearing the 'active' bit
264 */ 255 */
265 static void rpc_mark_complete_task(struct rpc_task *task) 256 static void rpc_mark_complete_task(struct rpc_task *task)
266 { 257 {
267 smp_mb__before_clear_bit(); 258 smp_mb__before_clear_bit();
268 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
269 smp_mb__after_clear_bit(); 260 smp_mb__after_clear_bit();
270 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
271 } 262 }
272 263
273 /* 264 /*
274 * Allow callers to wait for completion of an RPC call 265 * Allow callers to wait for completion of an RPC call
275 */ 266 */
276 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 267 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
277 { 268 {
278 if (action == NULL) 269 if (action == NULL)
279 action = rpc_wait_bit_killable; 270 action = rpc_wait_bit_killable;
280 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
281 action, TASK_KILLABLE); 272 action, TASK_KILLABLE);
282 } 273 }
283 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 274 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
284 275
285 /* 276 /*
286 * Make an RPC task runnable. 277 * Make an RPC task runnable.
287 * 278 *
288 * Note: If the task is ASYNC, this must be called with 279 * Note: If the task is ASYNC, this must be called with
289 * the spinlock held to protect the wait queue operation. 280 * the spinlock held to protect the wait queue operation.
290 */ 281 */
291 static void rpc_make_runnable(struct rpc_task *task) 282 static void rpc_make_runnable(struct rpc_task *task)
292 { 283 {
293 rpc_clear_queued(task); 284 rpc_clear_queued(task);
294 if (rpc_test_and_set_running(task)) 285 if (rpc_test_and_set_running(task))
295 return; 286 return;
296 if (RPC_IS_ASYNC(task)) { 287 if (RPC_IS_ASYNC(task)) {
297 int status; 288 int status;
298 289
299 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 290 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
300 status = queue_work(rpciod_workqueue, &task->u.tk_work); 291 status = queue_work(rpciod_workqueue, &task->u.tk_work);
301 if (status < 0) { 292 if (status < 0) {
302 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 293 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
303 task->tk_status = status; 294 task->tk_status = status;
304 return; 295 return;
305 } 296 }
306 } else 297 } else
307 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 298 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
308 } 299 }
309 300
310 /* 301 /*
311 * Prepare for sleeping on a wait queue. 302 * Prepare for sleeping on a wait queue.
312 * By always appending tasks to the list we ensure FIFO behavior. 303 * By always appending tasks to the list we ensure FIFO behavior.
313 * NB: An RPC task will only receive interrupt-driven events as long 304 * NB: An RPC task will only receive interrupt-driven events as long
314 * as it's on a wait queue. 305 * as it's on a wait queue.
315 */ 306 */
316 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 307 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
317 rpc_action action) 308 rpc_action action)
318 { 309 {
319 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 310 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
320 task->tk_pid, rpc_qname(q), jiffies); 311 task->tk_pid, rpc_qname(q), jiffies);
321 312
322 if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
323 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
324 return;
325 }
326
327 __rpc_add_wait_queue(q, task); 313 __rpc_add_wait_queue(q, task);
328 314
329 BUG_ON(task->tk_callback != NULL); 315 BUG_ON(task->tk_callback != NULL);
330 task->tk_callback = action; 316 task->tk_callback = action;
331 __rpc_add_timer(q, task); 317 __rpc_add_timer(q, task);
332 } 318 }
333 319
334 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 320 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
335 rpc_action action) 321 rpc_action action)
336 { 322 {
337 /* Mark the task as being activated if so needed */ 323 /* We shouldn't ever put an inactive task to sleep */
338 rpc_set_active(task); 324 BUG_ON(!RPC_IS_ACTIVATED(task));
339 325
340 /* 326 /*
341 * Protect the queue operations. 327 * Protect the queue operations.
342 */ 328 */
343 spin_lock_bh(&q->lock); 329 spin_lock_bh(&q->lock);
344 __rpc_sleep_on(q, task, action); 330 __rpc_sleep_on(q, task, action);
345 spin_unlock_bh(&q->lock); 331 spin_unlock_bh(&q->lock);
346 } 332 }
347 EXPORT_SYMBOL_GPL(rpc_sleep_on); 333 EXPORT_SYMBOL_GPL(rpc_sleep_on);
348 334
349 /** 335 /**
350 * __rpc_do_wake_up_task - wake up a single rpc_task 336 * __rpc_do_wake_up_task - wake up a single rpc_task
351 * @queue: wait queue 337 * @queue: wait queue
352 * @task: task to be woken up 338 * @task: task to be woken up
353 * 339 *
354 * Caller must hold queue->lock, and have cleared the task queued flag. 340 * Caller must hold queue->lock, and have cleared the task queued flag.
355 */ 341 */
356 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) 342 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
357 { 343 {
358 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 344 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
359 task->tk_pid, jiffies); 345 task->tk_pid, jiffies);
360 346
361 /* Has the task been executed yet? If not, we cannot wake it up! */ 347 /* Has the task been executed yet? If not, we cannot wake it up! */
362 if (!RPC_IS_ACTIVATED(task)) { 348 if (!RPC_IS_ACTIVATED(task)) {
363 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 349 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
364 return; 350 return;
365 } 351 }
366 352
367 __rpc_remove_wait_queue(queue, task); 353 __rpc_remove_wait_queue(queue, task);
368 354
369 rpc_make_runnable(task); 355 rpc_make_runnable(task);
370 356
371 dprintk("RPC: __rpc_wake_up_task done\n"); 357 dprintk("RPC: __rpc_wake_up_task done\n");
372 } 358 }
373 359
374 /* 360 /*
375 * Wake up a queued task while the queue lock is being held 361 * Wake up a queued task while the queue lock is being held
376 */ 362 */
377 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 363 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
378 { 364 {
379 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) 365 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
380 __rpc_do_wake_up_task(queue, task); 366 __rpc_do_wake_up_task(queue, task);
381 } 367 }
382 368
383 /* 369 /*
384 * Tests whether rpc queue is empty 370 * Tests whether rpc queue is empty
385 */ 371 */
386 int rpc_queue_empty(struct rpc_wait_queue *queue) 372 int rpc_queue_empty(struct rpc_wait_queue *queue)
387 { 373 {
388 int res; 374 int res;
389 375
390 spin_lock_bh(&queue->lock); 376 spin_lock_bh(&queue->lock);
391 res = queue->qlen; 377 res = queue->qlen;
392 spin_unlock_bh(&queue->lock); 378 spin_unlock_bh(&queue->lock);
393 return (res == 0); 379 return (res == 0);
394 } 380 }
395 EXPORT_SYMBOL_GPL(rpc_queue_empty); 381 EXPORT_SYMBOL_GPL(rpc_queue_empty);
396 382
397 /* 383 /*
398 * Wake up a task on a specific queue 384 * Wake up a task on a specific queue
399 */ 385 */
400 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 386 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
401 { 387 {
402 spin_lock_bh(&queue->lock); 388 spin_lock_bh(&queue->lock);
403 rpc_wake_up_task_queue_locked(queue, task); 389 rpc_wake_up_task_queue_locked(queue, task);
404 spin_unlock_bh(&queue->lock); 390 spin_unlock_bh(&queue->lock);
405 } 391 }
406 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 392 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
407 393
408 /* 394 /*
409 * Wake up the next task on a priority queue. 395 * Wake up the next task on a priority queue.
410 */ 396 */
411 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) 397 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
412 { 398 {
413 struct list_head *q; 399 struct list_head *q;
414 struct rpc_task *task; 400 struct rpc_task *task;
415 401
416 /* 402 /*
417 * Service a batch of tasks from a single owner. 403 * Service a batch of tasks from a single owner.
418 */ 404 */
419 q = &queue->tasks[queue->priority]; 405 q = &queue->tasks[queue->priority];
420 if (!list_empty(q)) { 406 if (!list_empty(q)) {
421 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 407 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
422 if (queue->owner == task->tk_owner) { 408 if (queue->owner == task->tk_owner) {
423 if (--queue->nr) 409 if (--queue->nr)
424 goto out; 410 goto out;
425 list_move_tail(&task->u.tk_wait.list, q); 411 list_move_tail(&task->u.tk_wait.list, q);
426 } 412 }
427 /* 413 /*
428 * Check if we need to switch queues. 414 * Check if we need to switch queues.
429 */ 415 */
430 if (--queue->count) 416 if (--queue->count)
431 goto new_owner; 417 goto new_owner;
432 } 418 }
433 419
434 /* 420 /*
435 * Service the next queue. 421 * Service the next queue.
436 */ 422 */
437 do { 423 do {
438 if (q == &queue->tasks[0]) 424 if (q == &queue->tasks[0])
439 q = &queue->tasks[queue->maxpriority]; 425 q = &queue->tasks[queue->maxpriority];
440 else 426 else
441 q = q - 1; 427 q = q - 1;
442 if (!list_empty(q)) { 428 if (!list_empty(q)) {
443 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 429 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
444 goto new_queue; 430 goto new_queue;
445 } 431 }
446 } while (q != &queue->tasks[queue->priority]); 432 } while (q != &queue->tasks[queue->priority]);
447 433
448 rpc_reset_waitqueue_priority(queue); 434 rpc_reset_waitqueue_priority(queue);
449 return NULL; 435 return NULL;
450 436
451 new_queue: 437 new_queue:
452 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 438 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
453 new_owner: 439 new_owner:
454 rpc_set_waitqueue_owner(queue, task->tk_owner); 440 rpc_set_waitqueue_owner(queue, task->tk_owner);
455 out: 441 out:
456 rpc_wake_up_task_queue_locked(queue, task); 442 rpc_wake_up_task_queue_locked(queue, task);
457 return task; 443 return task;
458 } 444 }
459 445
460 /* 446 /*
461 * Wake up the next task on the wait queue. 447 * Wake up the next task on the wait queue.
462 */ 448 */
463 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) 449 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
464 { 450 {
465 struct rpc_task *task = NULL; 451 struct rpc_task *task = NULL;
466 452
467 dprintk("RPC: wake_up_next(%p \"%s\")\n", 453 dprintk("RPC: wake_up_next(%p \"%s\")\n",
468 queue, rpc_qname(queue)); 454 queue, rpc_qname(queue));
469 spin_lock_bh(&queue->lock); 455 spin_lock_bh(&queue->lock);
470 if (RPC_IS_PRIORITY(queue)) 456 if (RPC_IS_PRIORITY(queue))
471 task = __rpc_wake_up_next_priority(queue); 457 task = __rpc_wake_up_next_priority(queue);
472 else { 458 else {
473 task_for_first(task, &queue->tasks[0]) 459 task_for_first(task, &queue->tasks[0])
474 rpc_wake_up_task_queue_locked(queue, task); 460 rpc_wake_up_task_queue_locked(queue, task);
475 } 461 }
476 spin_unlock_bh(&queue->lock); 462 spin_unlock_bh(&queue->lock);
477 463
478 return task; 464 return task;
479 } 465 }
480 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 466 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
481 467
482 /** 468 /**
483 * rpc_wake_up - wake up all rpc_tasks 469 * rpc_wake_up - wake up all rpc_tasks
484 * @queue: rpc_wait_queue on which the tasks are sleeping 470 * @queue: rpc_wait_queue on which the tasks are sleeping
485 * 471 *
486 * Grabs queue->lock 472 * Grabs queue->lock
487 */ 473 */
488 void rpc_wake_up(struct rpc_wait_queue *queue) 474 void rpc_wake_up(struct rpc_wait_queue *queue)
489 { 475 {
490 struct rpc_task *task, *next; 476 struct rpc_task *task, *next;
491 struct list_head *head; 477 struct list_head *head;
492 478
493 spin_lock_bh(&queue->lock); 479 spin_lock_bh(&queue->lock);
494 head = &queue->tasks[queue->maxpriority]; 480 head = &queue->tasks[queue->maxpriority];
495 for (;;) { 481 for (;;) {
496 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 482 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
497 rpc_wake_up_task_queue_locked(queue, task); 483 rpc_wake_up_task_queue_locked(queue, task);
498 if (head == &queue->tasks[0]) 484 if (head == &queue->tasks[0])
499 break; 485 break;
500 head--; 486 head--;
501 } 487 }
502 spin_unlock_bh(&queue->lock); 488 spin_unlock_bh(&queue->lock);
503 } 489 }
504 EXPORT_SYMBOL_GPL(rpc_wake_up); 490 EXPORT_SYMBOL_GPL(rpc_wake_up);
505 491
506 /** 492 /**
507 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 493 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
508 * @queue: rpc_wait_queue on which the tasks are sleeping 494 * @queue: rpc_wait_queue on which the tasks are sleeping
509 * @status: status value to set 495 * @status: status value to set
510 * 496 *
511 * Grabs queue->lock 497 * Grabs queue->lock
512 */ 498 */
513 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 499 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
514 { 500 {
515 struct rpc_task *task, *next; 501 struct rpc_task *task, *next;
516 struct list_head *head; 502 struct list_head *head;
517 503
518 spin_lock_bh(&queue->lock); 504 spin_lock_bh(&queue->lock);
519 head = &queue->tasks[queue->maxpriority]; 505 head = &queue->tasks[queue->maxpriority];
520 for (;;) { 506 for (;;) {
521 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 507 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
522 task->tk_status = status; 508 task->tk_status = status;
523 rpc_wake_up_task_queue_locked(queue, task); 509 rpc_wake_up_task_queue_locked(queue, task);
524 } 510 }
525 if (head == &queue->tasks[0]) 511 if (head == &queue->tasks[0])
526 break; 512 break;
527 head--; 513 head--;
528 } 514 }
529 spin_unlock_bh(&queue->lock); 515 spin_unlock_bh(&queue->lock);
530 } 516 }
531 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 517 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
532 518
533 static void __rpc_queue_timer_fn(unsigned long ptr) 519 static void __rpc_queue_timer_fn(unsigned long ptr)
534 { 520 {
535 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; 521 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
536 struct rpc_task *task, *n; 522 struct rpc_task *task, *n;
537 unsigned long expires, now, timeo; 523 unsigned long expires, now, timeo;
538 524
539 spin_lock(&queue->lock); 525 spin_lock(&queue->lock);
540 expires = now = jiffies; 526 expires = now = jiffies;
541 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 527 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
542 timeo = task->u.tk_wait.expires; 528 timeo = task->u.tk_wait.expires;
543 if (time_after_eq(now, timeo)) { 529 if (time_after_eq(now, timeo)) {
544 dprintk("RPC: %5u timeout\n", task->tk_pid); 530 dprintk("RPC: %5u timeout\n", task->tk_pid);
545 task->tk_status = -ETIMEDOUT; 531 task->tk_status = -ETIMEDOUT;
546 rpc_wake_up_task_queue_locked(queue, task); 532 rpc_wake_up_task_queue_locked(queue, task);
547 continue; 533 continue;
548 } 534 }
549 if (expires == now || time_after(expires, timeo)) 535 if (expires == now || time_after(expires, timeo))
550 expires = timeo; 536 expires = timeo;
551 } 537 }
552 if (!list_empty(&queue->timer_list.list)) 538 if (!list_empty(&queue->timer_list.list))
553 rpc_set_queue_timer(queue, expires); 539 rpc_set_queue_timer(queue, expires);
554 spin_unlock(&queue->lock); 540 spin_unlock(&queue->lock);
555 } 541 }
556 542
557 static void __rpc_atrun(struct rpc_task *task) 543 static void __rpc_atrun(struct rpc_task *task)
558 { 544 {
559 task->tk_status = 0; 545 task->tk_status = 0;
560 } 546 }
561 547
562 /* 548 /*
563 * Run a task at a later time 549 * Run a task at a later time
564 */ 550 */
565 void rpc_delay(struct rpc_task *task, unsigned long delay) 551 void rpc_delay(struct rpc_task *task, unsigned long delay)
566 { 552 {
567 task->tk_timeout = delay; 553 task->tk_timeout = delay;
568 rpc_sleep_on(&delay_queue, task, __rpc_atrun); 554 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
569 } 555 }
570 EXPORT_SYMBOL_GPL(rpc_delay); 556 EXPORT_SYMBOL_GPL(rpc_delay);
571 557
572 /* 558 /*
573 * Helper to call task->tk_ops->rpc_call_prepare 559 * Helper to call task->tk_ops->rpc_call_prepare
574 */ 560 */
575 void rpc_prepare_task(struct rpc_task *task) 561 void rpc_prepare_task(struct rpc_task *task)
576 { 562 {
577 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 563 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
578 } 564 }
579 565
580 /* 566 /*
581 * Helper that calls task->tk_ops->rpc_call_done if it exists 567 * Helper that calls task->tk_ops->rpc_call_done if it exists
582 */ 568 */
583 void rpc_exit_task(struct rpc_task *task) 569 void rpc_exit_task(struct rpc_task *task)
584 { 570 {
585 task->tk_action = NULL; 571 task->tk_action = NULL;
586 if (task->tk_ops->rpc_call_done != NULL) { 572 if (task->tk_ops->rpc_call_done != NULL) {
587 task->tk_ops->rpc_call_done(task, task->tk_calldata); 573 task->tk_ops->rpc_call_done(task, task->tk_calldata);
588 if (task->tk_action != NULL) { 574 if (task->tk_action != NULL) {
589 WARN_ON(RPC_ASSASSINATED(task)); 575 WARN_ON(RPC_ASSASSINATED(task));
590 /* Always release the RPC slot and buffer memory */ 576 /* Always release the RPC slot and buffer memory */
591 xprt_release(task); 577 xprt_release(task);
592 } 578 }
593 } 579 }
594 } 580 }
595 581
596 void rpc_exit(struct rpc_task *task, int status) 582 void rpc_exit(struct rpc_task *task, int status)
597 { 583 {
598 task->tk_status = status; 584 task->tk_status = status;
599 task->tk_action = rpc_exit_task; 585 task->tk_action = rpc_exit_task;
600 if (RPC_IS_QUEUED(task)) 586 if (RPC_IS_QUEUED(task))
601 rpc_wake_up_queued_task(task->tk_waitqueue, task); 587 rpc_wake_up_queued_task(task->tk_waitqueue, task);
602 } 588 }
603 EXPORT_SYMBOL_GPL(rpc_exit); 589 EXPORT_SYMBOL_GPL(rpc_exit);
604 590
605 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 591 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
606 { 592 {
607 if (ops->rpc_release != NULL) 593 if (ops->rpc_release != NULL)
608 ops->rpc_release(calldata); 594 ops->rpc_release(calldata);
609 } 595 }
610 596
611 /* 597 /*
612 * This is the RPC `scheduler' (or rather, the finite state machine). 598 * This is the RPC `scheduler' (or rather, the finite state machine).
613 */ 599 */
614 static void __rpc_execute(struct rpc_task *task) 600 static void __rpc_execute(struct rpc_task *task)
615 { 601 {
616 struct rpc_wait_queue *queue; 602 struct rpc_wait_queue *queue;
617 int task_is_async = RPC_IS_ASYNC(task); 603 int task_is_async = RPC_IS_ASYNC(task);
618 int status = 0; 604 int status = 0;
619 605
620 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 606 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
621 task->tk_pid, task->tk_flags); 607 task->tk_pid, task->tk_flags);
622 608
623 BUG_ON(RPC_IS_QUEUED(task)); 609 BUG_ON(RPC_IS_QUEUED(task));
624 610
625 for (;;) { 611 for (;;) {
626 612
627 /* 613 /*
628 * Execute any pending callback. 614 * Execute any pending callback.
629 */ 615 */
630 if (task->tk_callback) { 616 if (task->tk_callback) {
631 void (*save_callback)(struct rpc_task *); 617 void (*save_callback)(struct rpc_task *);
632 618
633 /* 619 /*
634 * We set tk_callback to NULL before calling it, 620 * We set tk_callback to NULL before calling it,
635 * in case it sets the tk_callback field itself: 621 * in case it sets the tk_callback field itself:
636 */ 622 */
637 save_callback = task->tk_callback; 623 save_callback = task->tk_callback;
638 task->tk_callback = NULL; 624 task->tk_callback = NULL;
639 save_callback(task); 625 save_callback(task);
640 } 626 }
641 627
642 /* 628 /*
643 * Perform the next FSM step. 629 * Perform the next FSM step.
644 * tk_action may be NULL when the task has been killed 630 * tk_action may be NULL when the task has been killed
645 * by someone else. 631 * by someone else.
646 */ 632 */
647 if (!RPC_IS_QUEUED(task)) { 633 if (!RPC_IS_QUEUED(task)) {
648 if (task->tk_action == NULL) 634 if (task->tk_action == NULL)
649 break; 635 break;
650 task->tk_action(task); 636 task->tk_action(task);
651 } 637 }
652 638
653 /* 639 /*
654 * Lockless check for whether task is sleeping or not. 640 * Lockless check for whether task is sleeping or not.
655 */ 641 */
656 if (!RPC_IS_QUEUED(task)) 642 if (!RPC_IS_QUEUED(task))
657 continue; 643 continue;
658 /* 644 /*
659 * The queue->lock protects against races with 645 * The queue->lock protects against races with
660 * rpc_make_runnable(). 646 * rpc_make_runnable().
661 * 647 *
662 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 648 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
663 * rpc_task, rpc_make_runnable() can assign it to a 649 * rpc_task, rpc_make_runnable() can assign it to a
664 * different workqueue. We therefore cannot assume that the 650 * different workqueue. We therefore cannot assume that the
665 * rpc_task pointer may still be dereferenced. 651 * rpc_task pointer may still be dereferenced.
666 */ 652 */
667 queue = task->tk_waitqueue; 653 queue = task->tk_waitqueue;
668 spin_lock_bh(&queue->lock); 654 spin_lock_bh(&queue->lock);
669 if (!RPC_IS_QUEUED(task)) { 655 if (!RPC_IS_QUEUED(task)) {
670 spin_unlock_bh(&queue->lock); 656 spin_unlock_bh(&queue->lock);
671 continue; 657 continue;
672 } 658 }
673 rpc_clear_running(task); 659 rpc_clear_running(task);
674 spin_unlock_bh(&queue->lock); 660 spin_unlock_bh(&queue->lock);
675 if (task_is_async) 661 if (task_is_async)
676 return; 662 return;
677 663
678 /* sync task: sleep here */ 664 /* sync task: sleep here */
679 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 665 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
680 status = out_of_line_wait_on_bit(&task->tk_runstate, 666 status = out_of_line_wait_on_bit(&task->tk_runstate,
681 RPC_TASK_QUEUED, rpc_wait_bit_killable, 667 RPC_TASK_QUEUED, rpc_wait_bit_killable,
682 TASK_KILLABLE); 668 TASK_KILLABLE);
683 if (status == -ERESTARTSYS) { 669 if (status == -ERESTARTSYS) {
684 /* 670 /*
685 * When a sync task receives a signal, it exits with 671 * When a sync task receives a signal, it exits with
686 * -ERESTARTSYS. In order to catch any callbacks that 672 * -ERESTARTSYS. In order to catch any callbacks that
687 * clean up after sleeping on some queue, we don't 673 * clean up after sleeping on some queue, we don't
688 * break the loop here, but go around once more. 674 * break the loop here, but go around once more.
689 */ 675 */
690 dprintk("RPC: %5u got signal\n", task->tk_pid); 676 dprintk("RPC: %5u got signal\n", task->tk_pid);
691 task->tk_flags |= RPC_TASK_KILLED; 677 task->tk_flags |= RPC_TASK_KILLED;
692 rpc_exit(task, -ERESTARTSYS); 678 rpc_exit(task, -ERESTARTSYS);
693 } 679 }
694 rpc_set_running(task); 680 rpc_set_running(task);
695 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 681 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
696 } 682 }
697 683
698 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 684 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
699 task->tk_status); 685 task->tk_status);
700 /* Release all resources associated with the task */ 686 /* Release all resources associated with the task */
701 rpc_release_task(task); 687 rpc_release_task(task);
702 } 688 }
703 689
704 /* 690 /*
705 * User-visible entry point to the scheduler. 691 * User-visible entry point to the scheduler.
706 * 692 *
707 * This may be called recursively if e.g. an async NFS task updates 693 * This may be called recursively if e.g. an async NFS task updates
708 * the attributes and finds that dirty pages must be flushed. 694 * the attributes and finds that dirty pages must be flushed.
709 * NOTE: Upon exit of this function the task is guaranteed to be 695 * NOTE: Upon exit of this function the task is guaranteed to be
710 * released. In particular note that tk_release() will have 696 * released. In particular note that tk_release() will have
711 * been called, so your task memory may have been freed. 697 * been called, so your task memory may have been freed.
712 */ 698 */
713 void rpc_execute(struct rpc_task *task) 699 void rpc_execute(struct rpc_task *task)
714 { 700 {
715 rpc_set_active(task); 701 rpc_set_active(task);
716 rpc_set_running(task); 702 rpc_set_running(task);
717 __rpc_execute(task); 703 __rpc_execute(task);
718 } 704 }
719 705
720 static void rpc_async_schedule(struct work_struct *work) 706 static void rpc_async_schedule(struct work_struct *work)
721 { 707 {
722 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 708 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
723 } 709 }
724 710
725 /** 711 /**
726 * rpc_malloc - allocate an RPC buffer 712 * rpc_malloc - allocate an RPC buffer
727 * @task: RPC task that will use this buffer 713 * @task: RPC task that will use this buffer
728 * @size: requested byte size 714 * @size: requested byte size
729 * 715 *
730 * To prevent rpciod from hanging, this allocator never sleeps, 716 * To prevent rpciod from hanging, this allocator never sleeps,
731 * returning NULL if the request cannot be serviced immediately. 717 * returning NULL if the request cannot be serviced immediately.
732 * The caller can arrange to sleep in a way that is safe for rpciod. 718 * The caller can arrange to sleep in a way that is safe for rpciod.
733 * 719 *
734 * Most requests are 'small' (under 2KiB) and can be serviced from a 720 * Most requests are 'small' (under 2KiB) and can be serviced from a
735 * mempool, ensuring that NFS reads and writes can always proceed, 721 * mempool, ensuring that NFS reads and writes can always proceed,
736 * and that there is good locality of reference for these buffers. 722 * and that there is good locality of reference for these buffers.
737 * 723 *
738 * In order to avoid memory starvation triggering more writebacks of 724 * In order to avoid memory starvation triggering more writebacks of
739 * NFS requests, we avoid using GFP_KERNEL. 725 * NFS requests, we avoid using GFP_KERNEL.
740 */ 726 */
741 void *rpc_malloc(struct rpc_task *task, size_t size) 727 void *rpc_malloc(struct rpc_task *task, size_t size)
742 { 728 {
743 struct rpc_buffer *buf; 729 struct rpc_buffer *buf;
744 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; 730 gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
745 731
746 size += sizeof(struct rpc_buffer); 732 size += sizeof(struct rpc_buffer);
747 if (size <= RPC_BUFFER_MAXSIZE) 733 if (size <= RPC_BUFFER_MAXSIZE)
748 buf = mempool_alloc(rpc_buffer_mempool, gfp); 734 buf = mempool_alloc(rpc_buffer_mempool, gfp);
749 else 735 else
750 buf = kmalloc(size, gfp); 736 buf = kmalloc(size, gfp);
751 737
752 if (!buf) 738 if (!buf)
753 return NULL; 739 return NULL;
754 740
755 buf->len = size; 741 buf->len = size;
756 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 742 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
757 task->tk_pid, size, buf); 743 task->tk_pid, size, buf);
758 return &buf->data; 744 return &buf->data;
759 } 745 }
760 EXPORT_SYMBOL_GPL(rpc_malloc); 746 EXPORT_SYMBOL_GPL(rpc_malloc);
761 747
762 /** 748 /**
763 * rpc_free - free buffer allocated via rpc_malloc 749 * rpc_free - free buffer allocated via rpc_malloc
764 * @buffer: buffer to free 750 * @buffer: buffer to free
765 * 751 *
766 */ 752 */
767 void rpc_free(void *buffer) 753 void rpc_free(void *buffer)
768 { 754 {
769 size_t size; 755 size_t size;
770 struct rpc_buffer *buf; 756 struct rpc_buffer *buf;
771 757
772 if (!buffer) 758 if (!buffer)
773 return; 759 return;
774 760
775 buf = container_of(buffer, struct rpc_buffer, data); 761 buf = container_of(buffer, struct rpc_buffer, data);
776 size = buf->len; 762 size = buf->len;
777 763
778 dprintk("RPC: freeing buffer of size %zu at %p\n", 764 dprintk("RPC: freeing buffer of size %zu at %p\n",
779 size, buf); 765 size, buf);
780 766
781 if (size <= RPC_BUFFER_MAXSIZE) 767 if (size <= RPC_BUFFER_MAXSIZE)
782 mempool_free(buf, rpc_buffer_mempool); 768 mempool_free(buf, rpc_buffer_mempool);
783 else 769 else
784 kfree(buf); 770 kfree(buf);
785 } 771 }
786 EXPORT_SYMBOL_GPL(rpc_free); 772 EXPORT_SYMBOL_GPL(rpc_free);
787 773
788 /* 774 /*
789 * Creation and deletion of RPC task structures 775 * Creation and deletion of RPC task structures
790 */ 776 */
791 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 777 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
792 { 778 {
793 memset(task, 0, sizeof(*task)); 779 memset(task, 0, sizeof(*task));
794 atomic_set(&task->tk_count, 1); 780 atomic_set(&task->tk_count, 1);
795 task->tk_flags = task_setup_data->flags; 781 task->tk_flags = task_setup_data->flags;
796 task->tk_ops = task_setup_data->callback_ops; 782 task->tk_ops = task_setup_data->callback_ops;
797 task->tk_calldata = task_setup_data->callback_data; 783 task->tk_calldata = task_setup_data->callback_data;
798 INIT_LIST_HEAD(&task->tk_task); 784 INIT_LIST_HEAD(&task->tk_task);
799 785
800 /* Initialize retry counters */ 786 /* Initialize retry counters */
801 task->tk_garb_retry = 2; 787 task->tk_garb_retry = 2;
802 task->tk_cred_retry = 2; 788 task->tk_cred_retry = 2;
803 789
804 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 790 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
805 task->tk_owner = current->tgid; 791 task->tk_owner = current->tgid;
806 792
807 /* Initialize workqueue for async tasks */ 793 /* Initialize workqueue for async tasks */
808 task->tk_workqueue = task_setup_data->workqueue; 794 task->tk_workqueue = task_setup_data->workqueue;
809 795
810 task->tk_client = task_setup_data->rpc_client;
811 if (task->tk_client != NULL) {
812 kref_get(&task->tk_client->cl_kref);
813 if (task->tk_client->cl_softrtry)
814 task->tk_flags |= RPC_TASK_SOFT;
815 }
816
817 if (task->tk_ops->rpc_call_prepare != NULL) 796 if (task->tk_ops->rpc_call_prepare != NULL)
818 task->tk_action = rpc_prepare_task; 797 task->tk_action = rpc_prepare_task;
819 798
820 if (task_setup_data->rpc_message != NULL) {
821 task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
822 task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
823 task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
824 /* Bind the user cred */
825 rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
826 if (task->tk_action == NULL)
827 rpc_call_start(task);
828 }
829
830 /* starting timestamp */ 799 /* starting timestamp */
831 task->tk_start = ktime_get(); 800 task->tk_start = ktime_get();
832 801
833 dprintk("RPC: new task initialized, procpid %u\n", 802 dprintk("RPC: new task initialized, procpid %u\n",
834 task_pid_nr(current)); 803 task_pid_nr(current));
835 } 804 }
836 805
837 static struct rpc_task * 806 static struct rpc_task *
838 rpc_alloc_task(void) 807 rpc_alloc_task(void)
839 { 808 {
840 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 809 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
841 } 810 }
842 811
843 /* 812 /*
844 * Create a new task for the specified client. 813 * Create a new task for the specified client.
845 */ 814 */
846 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 815 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
847 { 816 {
848 struct rpc_task *task = setup_data->task; 817 struct rpc_task *task = setup_data->task;
849 unsigned short flags = 0; 818 unsigned short flags = 0;
850 819
851 if (task == NULL) { 820 if (task == NULL) {
852 task = rpc_alloc_task(); 821 task = rpc_alloc_task();
853 if (task == NULL) { 822 if (task == NULL) {
854 rpc_release_calldata(setup_data->callback_ops, 823 rpc_release_calldata(setup_data->callback_ops,
855 setup_data->callback_data); 824 setup_data->callback_data);
856 return ERR_PTR(-ENOMEM); 825 return ERR_PTR(-ENOMEM);
857 } 826 }
858 flags = RPC_TASK_DYNAMIC; 827 flags = RPC_TASK_DYNAMIC;
859 } 828 }
860 829
861 rpc_init_task(task, setup_data); 830 rpc_init_task(task, setup_data);
862 if (task->tk_status < 0) { 831 if (task->tk_status < 0) {
863 int err = task->tk_status; 832 int err = task->tk_status;
864 rpc_put_task(task); 833 rpc_put_task(task);
865 return ERR_PTR(err); 834 return ERR_PTR(err);
866 } 835 }
867 836
868 task->tk_flags |= flags; 837 task->tk_flags |= flags;
869 dprintk("RPC: allocated task %p\n", task); 838 dprintk("RPC: allocated task %p\n", task);
870 return task; 839 return task;
871 } 840 }
872 841
873 static void rpc_free_task(struct rpc_task *task) 842 static void rpc_free_task(struct rpc_task *task)
874 { 843 {
875 const struct rpc_call_ops *tk_ops = task->tk_ops; 844 const struct rpc_call_ops *tk_ops = task->tk_ops;
876 void *calldata = task->tk_calldata; 845 void *calldata = task->tk_calldata;
877 846
878 if (task->tk_flags & RPC_TASK_DYNAMIC) { 847 if (task->tk_flags & RPC_TASK_DYNAMIC) {
879 dprintk("RPC: %5u freeing task\n", task->tk_pid); 848 dprintk("RPC: %5u freeing task\n", task->tk_pid);
880 mempool_free(task, rpc_task_mempool); 849 mempool_free(task, rpc_task_mempool);
881 } 850 }
882 rpc_release_calldata(tk_ops, calldata); 851 rpc_release_calldata(tk_ops, calldata);
883 } 852 }
884 853
885 static void rpc_async_release(struct work_struct *work) 854 static void rpc_async_release(struct work_struct *work)
886 { 855 {
887 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 856 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
888 } 857 }
889 858
890 void rpc_put_task(struct rpc_task *task) 859 void rpc_put_task(struct rpc_task *task)
891 { 860 {
892 if (!atomic_dec_and_test(&task->tk_count)) 861 if (!atomic_dec_and_test(&task->tk_count))
893 return; 862 return;
894 /* Release resources */ 863 /* Release resources */
895 if (task->tk_rqstp) 864 if (task->tk_rqstp)
896 xprt_release(task); 865 xprt_release(task);
897 if (task->tk_msg.rpc_cred) 866 if (task->tk_msg.rpc_cred)
898 rpcauth_unbindcred(task); 867 rpcauth_unbindcred(task);
899 if (task->tk_client) { 868 rpc_task_release_client(task);
900 rpc_release_client(task->tk_client);
901 task->tk_client = NULL;
902 }
903 if (task->tk_workqueue != NULL) { 869 if (task->tk_workqueue != NULL) {
904 INIT_WORK(&task->u.tk_work, rpc_async_release); 870 INIT_WORK(&task->u.tk_work, rpc_async_release);
905 queue_work(task->tk_workqueue, &task->u.tk_work); 871 queue_work(task->tk_workqueue, &task->u.tk_work);
906 } else 872 } else
907 rpc_free_task(task); 873 rpc_free_task(task);
908 } 874 }
909 EXPORT_SYMBOL_GPL(rpc_put_task); 875 EXPORT_SYMBOL_GPL(rpc_put_task);
910 876
911 static void rpc_release_task(struct rpc_task *task) 877 static void rpc_release_task(struct rpc_task *task)
912 { 878 {
913 dprintk("RPC: %5u release task\n", task->tk_pid); 879 dprintk("RPC: %5u release task\n", task->tk_pid);
914 880
915 if (!list_empty(&task->tk_task)) {
916 struct rpc_clnt *clnt = task->tk_client;
917 /* Remove from client task list */
918 spin_lock(&clnt->cl_lock);
919 list_del(&task->tk_task);
920 spin_unlock(&clnt->cl_lock);
921 }
922 BUG_ON (RPC_IS_QUEUED(task)); 881 BUG_ON (RPC_IS_QUEUED(task));
923 882
924 /* Wake up anyone who is waiting for task completion */ 883 /* Wake up anyone who is waiting for task completion */
925 rpc_mark_complete_task(task); 884 rpc_mark_complete_task(task);
926 885
927 rpc_put_task(task); 886 rpc_put_task(task);
928 } 887 }
929
930 /*
931 * Kill all tasks for the given client.
932 * XXX: kill their descendants as well?
933 */
934 void rpc_killall_tasks(struct rpc_clnt *clnt)
935 {
936 struct rpc_task *rovr;
937
938
939 if (list_empty(&clnt->cl_tasks))
940 return;
941 dprintk("RPC: killing all tasks for client %p\n", clnt);
942 /*
943 * Spin lock all_tasks to prevent changes...
944 */
945 spin_lock(&clnt->cl_lock);
946 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
947 if (! RPC_IS_ACTIVATED(rovr))
948 continue;
949 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
950 rovr->tk_flags |= RPC_TASK_KILLED;
951 rpc_exit(rovr, -EIO);
952 }
953 }
954 spin_unlock(&clnt->cl_lock);
955 }
956 EXPORT_SYMBOL_GPL(rpc_killall_tasks);
957 888
958 int rpciod_up(void) 889 int rpciod_up(void)
959 { 890 {
960 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 891 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
961 } 892 }
962 893
963 void rpciod_down(void) 894 void rpciod_down(void)
964 { 895 {
965 module_put(THIS_MODULE); 896 module_put(THIS_MODULE);
966 } 897 }
967 898
968 /* 899 /*
969 * Start up the rpciod workqueue. 900 * Start up the rpciod workqueue.
970 */ 901 */
971 static int rpciod_start(void) 902 static int rpciod_start(void)
972 { 903 {
973 struct workqueue_struct *wq; 904 struct workqueue_struct *wq;
974 905
975 /* 906 /*
976 * Create the rpciod thread and wait for it to start. 907 * Create the rpciod thread and wait for it to start.
977 */ 908 */
978 dprintk("RPC: creating workqueue rpciod\n"); 909 dprintk("RPC: creating workqueue rpciod\n");
979 wq = create_workqueue("rpciod"); 910 wq = create_workqueue("rpciod");
980 rpciod_workqueue = wq; 911 rpciod_workqueue = wq;
981 return rpciod_workqueue != NULL; 912 return rpciod_workqueue != NULL;
982 } 913 }
983 914
984 static void rpciod_stop(void) 915 static void rpciod_stop(void)
985 { 916 {
986 struct workqueue_struct *wq = NULL; 917 struct workqueue_struct *wq = NULL;
987 918
988 if (rpciod_workqueue == NULL) 919 if (rpciod_workqueue == NULL)
989 return; 920 return;
990 dprintk("RPC: destroying workqueue rpciod\n"); 921 dprintk("RPC: destroying workqueue rpciod\n");
991 922
992 wq = rpciod_workqueue; 923 wq = rpciod_workqueue;
993 rpciod_workqueue = NULL; 924 rpciod_workqueue = NULL;
994 destroy_workqueue(wq); 925 destroy_workqueue(wq);
995 } 926 }
996 927
997 void 928 void
998 rpc_destroy_mempool(void) 929 rpc_destroy_mempool(void)
999 { 930 {
1000 rpciod_stop(); 931 rpciod_stop();
1001 if (rpc_buffer_mempool) 932 if (rpc_buffer_mempool)
1002 mempool_destroy(rpc_buffer_mempool); 933 mempool_destroy(rpc_buffer_mempool);
1003 if (rpc_task_mempool) 934 if (rpc_task_mempool)
1004 mempool_destroy(rpc_task_mempool); 935 mempool_destroy(rpc_task_mempool);
1005 if (rpc_task_slabp) 936 if (rpc_task_slabp)
1006 kmem_cache_destroy(rpc_task_slabp); 937 kmem_cache_destroy(rpc_task_slabp);
1007 if (rpc_buffer_slabp) 938 if (rpc_buffer_slabp)
1008 kmem_cache_destroy(rpc_buffer_slabp); 939 kmem_cache_destroy(rpc_buffer_slabp);
1009 rpc_destroy_wait_queue(&delay_queue); 940 rpc_destroy_wait_queue(&delay_queue);
1010 } 941 }
1011 942
1012 int 943 int
1013 rpc_init_mempool(void) 944 rpc_init_mempool(void)
1014 { 945 {
1015 /* 946 /*
1016 * The following is not strictly a mempool initialisation, 947 * The following is not strictly a mempool initialisation,
1017 * but there is no harm in doing it here 948 * but there is no harm in doing it here
1018 */ 949 */
1019 rpc_init_wait_queue(&delay_queue, "delayq"); 950 rpc_init_wait_queue(&delay_queue, "delayq");
1020 if (!rpciod_start()) 951 if (!rpciod_start())
1021 goto err_nomem; 952 goto err_nomem;
1022 953
1023 rpc_task_slabp = kmem_cache_create("rpc_tasks", 954 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1024 sizeof(struct rpc_task), 955 sizeof(struct rpc_task),
1025 0, SLAB_HWCACHE_ALIGN, 956 0, SLAB_HWCACHE_ALIGN,
1026 NULL); 957 NULL);
1027 if (!rpc_task_slabp) 958 if (!rpc_task_slabp)
1028 goto err_nomem; 959 goto err_nomem;
1029 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 960 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1030 RPC_BUFFER_MAXSIZE, 961 RPC_BUFFER_MAXSIZE,
1031 0, SLAB_HWCACHE_ALIGN, 962 0, SLAB_HWCACHE_ALIGN,
1032 NULL); 963 NULL);
1033 if (!rpc_buffer_slabp) 964 if (!rpc_buffer_slabp)
1034 goto err_nomem; 965 goto err_nomem;
1035 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 966 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1036 rpc_task_slabp); 967 rpc_task_slabp);
1037 if (!rpc_task_mempool) 968 if (!rpc_task_mempool)
1038 goto err_nomem; 969 goto err_nomem;
1039 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 970 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1040 rpc_buffer_slabp); 971 rpc_buffer_slabp);
1041 if (!rpc_buffer_mempool) 972 if (!rpc_buffer_mempool)
1042 goto err_nomem; 973 goto err_nomem;
1043 return 0; 974 return 0;
1044 err_nomem: 975 err_nomem:
1045 rpc_destroy_mempool(); 976 rpc_destroy_mempool();
1046 return -ENOMEM; 977 return -ENOMEM;
1047 } 978 }
1048 979