Commit 43ac3f2961b8616da26114ec6dc76ac2a61f76ad
1 parent
c42de9dd67
Exists in
master
and in
7 other branches
SUNRPC: Fix memory barriers for req->rq_received
We need to ensure that all writes to the XDR buffers are done before req->rq_received is visible to other processors. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Showing 2 changed files with 7 additions and 1 deletions Inline Diff
net/sunrpc/clnt.c
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/clnt.c | 2 | * linux/net/sunrpc/clnt.c |
3 | * | 3 | * |
4 | * This file contains the high-level RPC interface. | 4 | * This file contains the high-level RPC interface. |
5 | * It is modeled as a finite state machine to support both synchronous | 5 | * It is modeled as a finite state machine to support both synchronous |
6 | * and asynchronous requests. | 6 | * and asynchronous requests. |
7 | * | 7 | * |
8 | * - RPC header generation and argument serialization. | 8 | * - RPC header generation and argument serialization. |
9 | * - Credential refresh. | 9 | * - Credential refresh. |
10 | * - TCP connect handling. | 10 | * - TCP connect handling. |
11 | * - Retry of operation when it is suspected the operation failed because | 11 | * - Retry of operation when it is suspected the operation failed because |
12 | * of uid squashing on the server, or when the credentials were stale | 12 | * of uid squashing on the server, or when the credentials were stale |
13 | * and need to be refreshed, or when a packet was damaged in transit. | 13 | * and need to be refreshed, or when a packet was damaged in transit. |
14 | * This may be have to be moved to the VFS layer. | 14 | * This may be have to be moved to the VFS layer. |
15 | * | 15 | * |
16 | * NB: BSD uses a more intelligent approach to guessing when a request | 16 | * NB: BSD uses a more intelligent approach to guessing when a request |
17 | * or reply has been lost by keeping the RTO estimate for each procedure. | 17 | * or reply has been lost by keeping the RTO estimate for each procedure. |
18 | * We currently make do with a constant timeout value. | 18 | * We currently make do with a constant timeout value. |
19 | * | 19 | * |
20 | * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> | 20 | * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> |
21 | * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> | 21 | * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
25 | 25 | ||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
31 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
32 | 32 | ||
33 | #include <linux/sunrpc/clnt.h> | 33 | #include <linux/sunrpc/clnt.h> |
34 | #include <linux/sunrpc/rpc_pipe_fs.h> | 34 | #include <linux/sunrpc/rpc_pipe_fs.h> |
35 | #include <linux/sunrpc/metrics.h> | 35 | #include <linux/sunrpc/metrics.h> |
36 | 36 | ||
37 | 37 | ||
38 | #define RPC_SLACK_SPACE (1024) /* total overkill */ | 38 | #define RPC_SLACK_SPACE (1024) /* total overkill */ |
39 | 39 | ||
40 | #ifdef RPC_DEBUG | 40 | #ifdef RPC_DEBUG |
41 | # define RPCDBG_FACILITY RPCDBG_CALL | 41 | # define RPCDBG_FACILITY RPCDBG_CALL |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); | 44 | static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); |
45 | 45 | ||
46 | 46 | ||
47 | static void call_start(struct rpc_task *task); | 47 | static void call_start(struct rpc_task *task); |
48 | static void call_reserve(struct rpc_task *task); | 48 | static void call_reserve(struct rpc_task *task); |
49 | static void call_reserveresult(struct rpc_task *task); | 49 | static void call_reserveresult(struct rpc_task *task); |
50 | static void call_allocate(struct rpc_task *task); | 50 | static void call_allocate(struct rpc_task *task); |
51 | static void call_encode(struct rpc_task *task); | 51 | static void call_encode(struct rpc_task *task); |
52 | static void call_decode(struct rpc_task *task); | 52 | static void call_decode(struct rpc_task *task); |
53 | static void call_bind(struct rpc_task *task); | 53 | static void call_bind(struct rpc_task *task); |
54 | static void call_bind_status(struct rpc_task *task); | 54 | static void call_bind_status(struct rpc_task *task); |
55 | static void call_transmit(struct rpc_task *task); | 55 | static void call_transmit(struct rpc_task *task); |
56 | static void call_status(struct rpc_task *task); | 56 | static void call_status(struct rpc_task *task); |
57 | static void call_transmit_status(struct rpc_task *task); | 57 | static void call_transmit_status(struct rpc_task *task); |
58 | static void call_refresh(struct rpc_task *task); | 58 | static void call_refresh(struct rpc_task *task); |
59 | static void call_refreshresult(struct rpc_task *task); | 59 | static void call_refreshresult(struct rpc_task *task); |
60 | static void call_timeout(struct rpc_task *task); | 60 | static void call_timeout(struct rpc_task *task); |
61 | static void call_connect(struct rpc_task *task); | 61 | static void call_connect(struct rpc_task *task); |
62 | static void call_connect_status(struct rpc_task *task); | 62 | static void call_connect_status(struct rpc_task *task); |
63 | static u32 * call_header(struct rpc_task *task); | 63 | static u32 * call_header(struct rpc_task *task); |
64 | static u32 * call_verify(struct rpc_task *task); | 64 | static u32 * call_verify(struct rpc_task *task); |
65 | 65 | ||
66 | 66 | ||
67 | static int | 67 | static int |
68 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) | 68 | rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) |
69 | { | 69 | { |
70 | static uint32_t clntid; | 70 | static uint32_t clntid; |
71 | int error; | 71 | int error; |
72 | 72 | ||
73 | clnt->cl_vfsmnt = ERR_PTR(-ENOENT); | 73 | clnt->cl_vfsmnt = ERR_PTR(-ENOENT); |
74 | clnt->cl_dentry = ERR_PTR(-ENOENT); | 74 | clnt->cl_dentry = ERR_PTR(-ENOENT); |
75 | if (dir_name == NULL) | 75 | if (dir_name == NULL) |
76 | return 0; | 76 | return 0; |
77 | 77 | ||
78 | clnt->cl_vfsmnt = rpc_get_mount(); | 78 | clnt->cl_vfsmnt = rpc_get_mount(); |
79 | if (IS_ERR(clnt->cl_vfsmnt)) | 79 | if (IS_ERR(clnt->cl_vfsmnt)) |
80 | return PTR_ERR(clnt->cl_vfsmnt); | 80 | return PTR_ERR(clnt->cl_vfsmnt); |
81 | 81 | ||
82 | for (;;) { | 82 | for (;;) { |
83 | snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), | 83 | snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), |
84 | "%s/clnt%x", dir_name, | 84 | "%s/clnt%x", dir_name, |
85 | (unsigned int)clntid++); | 85 | (unsigned int)clntid++); |
86 | clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; | 86 | clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; |
87 | clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); | 87 | clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); |
88 | if (!IS_ERR(clnt->cl_dentry)) | 88 | if (!IS_ERR(clnt->cl_dentry)) |
89 | return 0; | 89 | return 0; |
90 | error = PTR_ERR(clnt->cl_dentry); | 90 | error = PTR_ERR(clnt->cl_dentry); |
91 | if (error != -EEXIST) { | 91 | if (error != -EEXIST) { |
92 | printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", | 92 | printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", |
93 | clnt->cl_pathname, error); | 93 | clnt->cl_pathname, error); |
94 | rpc_put_mount(); | 94 | rpc_put_mount(); |
95 | return error; | 95 | return error; |
96 | } | 96 | } |
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Create an RPC client | 101 | * Create an RPC client |
102 | * FIXME: This should also take a flags argument (as in task->tk_flags). | 102 | * FIXME: This should also take a flags argument (as in task->tk_flags). |
103 | * It's called (among others) from pmap_create_client, which may in | 103 | * It's called (among others) from pmap_create_client, which may in |
104 | * turn be called by an async task. In this case, rpciod should not be | 104 | * turn be called by an async task. In this case, rpciod should not be |
105 | * made to sleep too long. | 105 | * made to sleep too long. |
106 | */ | 106 | */ |
107 | struct rpc_clnt * | 107 | struct rpc_clnt * |
108 | rpc_new_client(struct rpc_xprt *xprt, char *servname, | 108 | rpc_new_client(struct rpc_xprt *xprt, char *servname, |
109 | struct rpc_program *program, u32 vers, | 109 | struct rpc_program *program, u32 vers, |
110 | rpc_authflavor_t flavor) | 110 | rpc_authflavor_t flavor) |
111 | { | 111 | { |
112 | struct rpc_version *version; | 112 | struct rpc_version *version; |
113 | struct rpc_clnt *clnt = NULL; | 113 | struct rpc_clnt *clnt = NULL; |
114 | struct rpc_auth *auth; | 114 | struct rpc_auth *auth; |
115 | int err; | 115 | int err; |
116 | int len; | 116 | int len; |
117 | 117 | ||
118 | dprintk("RPC: creating %s client for %s (xprt %p)\n", | 118 | dprintk("RPC: creating %s client for %s (xprt %p)\n", |
119 | program->name, servname, xprt); | 119 | program->name, servname, xprt); |
120 | 120 | ||
121 | err = -EINVAL; | 121 | err = -EINVAL; |
122 | if (!xprt) | 122 | if (!xprt) |
123 | goto out_no_xprt; | 123 | goto out_no_xprt; |
124 | if (vers >= program->nrvers || !(version = program->version[vers])) | 124 | if (vers >= program->nrvers || !(version = program->version[vers])) |
125 | goto out_err; | 125 | goto out_err; |
126 | 126 | ||
127 | err = -ENOMEM; | 127 | err = -ENOMEM; |
128 | clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); | 128 | clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); |
129 | if (!clnt) | 129 | if (!clnt) |
130 | goto out_err; | 130 | goto out_err; |
131 | memset(clnt, 0, sizeof(*clnt)); | 131 | memset(clnt, 0, sizeof(*clnt)); |
132 | atomic_set(&clnt->cl_users, 0); | 132 | atomic_set(&clnt->cl_users, 0); |
133 | atomic_set(&clnt->cl_count, 1); | 133 | atomic_set(&clnt->cl_count, 1); |
134 | clnt->cl_parent = clnt; | 134 | clnt->cl_parent = clnt; |
135 | 135 | ||
136 | clnt->cl_server = clnt->cl_inline_name; | 136 | clnt->cl_server = clnt->cl_inline_name; |
137 | len = strlen(servname) + 1; | 137 | len = strlen(servname) + 1; |
138 | if (len > sizeof(clnt->cl_inline_name)) { | 138 | if (len > sizeof(clnt->cl_inline_name)) { |
139 | char *buf = kmalloc(len, GFP_KERNEL); | 139 | char *buf = kmalloc(len, GFP_KERNEL); |
140 | if (buf != 0) | 140 | if (buf != 0) |
141 | clnt->cl_server = buf; | 141 | clnt->cl_server = buf; |
142 | else | 142 | else |
143 | len = sizeof(clnt->cl_inline_name); | 143 | len = sizeof(clnt->cl_inline_name); |
144 | } | 144 | } |
145 | strlcpy(clnt->cl_server, servname, len); | 145 | strlcpy(clnt->cl_server, servname, len); |
146 | 146 | ||
147 | clnt->cl_xprt = xprt; | 147 | clnt->cl_xprt = xprt; |
148 | clnt->cl_procinfo = version->procs; | 148 | clnt->cl_procinfo = version->procs; |
149 | clnt->cl_maxproc = version->nrprocs; | 149 | clnt->cl_maxproc = version->nrprocs; |
150 | clnt->cl_protname = program->name; | 150 | clnt->cl_protname = program->name; |
151 | clnt->cl_pmap = &clnt->cl_pmap_default; | 151 | clnt->cl_pmap = &clnt->cl_pmap_default; |
152 | clnt->cl_port = xprt->addr.sin_port; | 152 | clnt->cl_port = xprt->addr.sin_port; |
153 | clnt->cl_prog = program->number; | 153 | clnt->cl_prog = program->number; |
154 | clnt->cl_vers = version->number; | 154 | clnt->cl_vers = version->number; |
155 | clnt->cl_prot = xprt->prot; | 155 | clnt->cl_prot = xprt->prot; |
156 | clnt->cl_stats = program->stats; | 156 | clnt->cl_stats = program->stats; |
157 | clnt->cl_metrics = rpc_alloc_iostats(clnt); | 157 | clnt->cl_metrics = rpc_alloc_iostats(clnt); |
158 | rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); | 158 | rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); |
159 | 159 | ||
160 | if (!clnt->cl_port) | 160 | if (!clnt->cl_port) |
161 | clnt->cl_autobind = 1; | 161 | clnt->cl_autobind = 1; |
162 | 162 | ||
163 | clnt->cl_rtt = &clnt->cl_rtt_default; | 163 | clnt->cl_rtt = &clnt->cl_rtt_default; |
164 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); | 164 | rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); |
165 | 165 | ||
166 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); | 166 | err = rpc_setup_pipedir(clnt, program->pipe_dir_name); |
167 | if (err < 0) | 167 | if (err < 0) |
168 | goto out_no_path; | 168 | goto out_no_path; |
169 | 169 | ||
170 | auth = rpcauth_create(flavor, clnt); | 170 | auth = rpcauth_create(flavor, clnt); |
171 | if (IS_ERR(auth)) { | 171 | if (IS_ERR(auth)) { |
172 | printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", | 172 | printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", |
173 | flavor); | 173 | flavor); |
174 | err = PTR_ERR(auth); | 174 | err = PTR_ERR(auth); |
175 | goto out_no_auth; | 175 | goto out_no_auth; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* save the nodename */ | 178 | /* save the nodename */ |
179 | clnt->cl_nodelen = strlen(system_utsname.nodename); | 179 | clnt->cl_nodelen = strlen(system_utsname.nodename); |
180 | if (clnt->cl_nodelen > UNX_MAXNODENAME) | 180 | if (clnt->cl_nodelen > UNX_MAXNODENAME) |
181 | clnt->cl_nodelen = UNX_MAXNODENAME; | 181 | clnt->cl_nodelen = UNX_MAXNODENAME; |
182 | memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); | 182 | memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen); |
183 | return clnt; | 183 | return clnt; |
184 | 184 | ||
185 | out_no_auth: | 185 | out_no_auth: |
186 | if (!IS_ERR(clnt->cl_dentry)) { | 186 | if (!IS_ERR(clnt->cl_dentry)) { |
187 | rpc_rmdir(clnt->cl_pathname); | 187 | rpc_rmdir(clnt->cl_pathname); |
188 | dput(clnt->cl_dentry); | 188 | dput(clnt->cl_dentry); |
189 | rpc_put_mount(); | 189 | rpc_put_mount(); |
190 | } | 190 | } |
191 | out_no_path: | 191 | out_no_path: |
192 | if (clnt->cl_server != clnt->cl_inline_name) | 192 | if (clnt->cl_server != clnt->cl_inline_name) |
193 | kfree(clnt->cl_server); | 193 | kfree(clnt->cl_server); |
194 | kfree(clnt); | 194 | kfree(clnt); |
195 | out_err: | 195 | out_err: |
196 | xprt_destroy(xprt); | 196 | xprt_destroy(xprt); |
197 | out_no_xprt: | 197 | out_no_xprt: |
198 | return ERR_PTR(err); | 198 | return ERR_PTR(err); |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | 201 | /** |
202 | * Create an RPC client | 202 | * Create an RPC client |
203 | * @xprt - pointer to xprt struct | 203 | * @xprt - pointer to xprt struct |
204 | * @servname - name of server | 204 | * @servname - name of server |
205 | * @info - rpc_program | 205 | * @info - rpc_program |
206 | * @version - rpc_program version | 206 | * @version - rpc_program version |
207 | * @authflavor - rpc_auth flavour to use | 207 | * @authflavor - rpc_auth flavour to use |
208 | * | 208 | * |
209 | * Creates an RPC client structure, then pings the server in order to | 209 | * Creates an RPC client structure, then pings the server in order to |
210 | * determine if it is up, and if it supports this program and version. | 210 | * determine if it is up, and if it supports this program and version. |
211 | * | 211 | * |
212 | * This function should never be called by asynchronous tasks such as | 212 | * This function should never be called by asynchronous tasks such as |
213 | * the portmapper. | 213 | * the portmapper. |
214 | */ | 214 | */ |
215 | struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, | 215 | struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname, |
216 | struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) | 216 | struct rpc_program *info, u32 version, rpc_authflavor_t authflavor) |
217 | { | 217 | { |
218 | struct rpc_clnt *clnt; | 218 | struct rpc_clnt *clnt; |
219 | int err; | 219 | int err; |
220 | 220 | ||
221 | clnt = rpc_new_client(xprt, servname, info, version, authflavor); | 221 | clnt = rpc_new_client(xprt, servname, info, version, authflavor); |
222 | if (IS_ERR(clnt)) | 222 | if (IS_ERR(clnt)) |
223 | return clnt; | 223 | return clnt; |
224 | err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); | 224 | err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); |
225 | if (err == 0) | 225 | if (err == 0) |
226 | return clnt; | 226 | return clnt; |
227 | rpc_shutdown_client(clnt); | 227 | rpc_shutdown_client(clnt); |
228 | return ERR_PTR(err); | 228 | return ERR_PTR(err); |
229 | } | 229 | } |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * This function clones the RPC client structure. It allows us to share the | 232 | * This function clones the RPC client structure. It allows us to share the |
233 | * same transport while varying parameters such as the authentication | 233 | * same transport while varying parameters such as the authentication |
234 | * flavour. | 234 | * flavour. |
235 | */ | 235 | */ |
236 | struct rpc_clnt * | 236 | struct rpc_clnt * |
237 | rpc_clone_client(struct rpc_clnt *clnt) | 237 | rpc_clone_client(struct rpc_clnt *clnt) |
238 | { | 238 | { |
239 | struct rpc_clnt *new; | 239 | struct rpc_clnt *new; |
240 | 240 | ||
241 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 241 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
242 | if (!new) | 242 | if (!new) |
243 | goto out_no_clnt; | 243 | goto out_no_clnt; |
244 | memcpy(new, clnt, sizeof(*new)); | 244 | memcpy(new, clnt, sizeof(*new)); |
245 | atomic_set(&new->cl_count, 1); | 245 | atomic_set(&new->cl_count, 1); |
246 | atomic_set(&new->cl_users, 0); | 246 | atomic_set(&new->cl_users, 0); |
247 | new->cl_parent = clnt; | 247 | new->cl_parent = clnt; |
248 | atomic_inc(&clnt->cl_count); | 248 | atomic_inc(&clnt->cl_count); |
249 | /* Duplicate portmapper */ | 249 | /* Duplicate portmapper */ |
250 | rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); | 250 | rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); |
251 | /* Turn off autobind on clones */ | 251 | /* Turn off autobind on clones */ |
252 | new->cl_autobind = 0; | 252 | new->cl_autobind = 0; |
253 | new->cl_oneshot = 0; | 253 | new->cl_oneshot = 0; |
254 | new->cl_dead = 0; | 254 | new->cl_dead = 0; |
255 | if (!IS_ERR(new->cl_dentry)) { | 255 | if (!IS_ERR(new->cl_dentry)) { |
256 | dget(new->cl_dentry); | 256 | dget(new->cl_dentry); |
257 | rpc_get_mount(); | 257 | rpc_get_mount(); |
258 | } | 258 | } |
259 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | 259 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); |
260 | if (new->cl_auth) | 260 | if (new->cl_auth) |
261 | atomic_inc(&new->cl_auth->au_count); | 261 | atomic_inc(&new->cl_auth->au_count); |
262 | new->cl_pmap = &new->cl_pmap_default; | 262 | new->cl_pmap = &new->cl_pmap_default; |
263 | new->cl_metrics = rpc_alloc_iostats(clnt); | 263 | new->cl_metrics = rpc_alloc_iostats(clnt); |
264 | return new; | 264 | return new; |
265 | out_no_clnt: | 265 | out_no_clnt: |
266 | printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); | 266 | printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); |
267 | return ERR_PTR(-ENOMEM); | 267 | return ERR_PTR(-ENOMEM); |
268 | } | 268 | } |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * Properly shut down an RPC client, terminating all outstanding | 271 | * Properly shut down an RPC client, terminating all outstanding |
272 | * requests. Note that we must be certain that cl_oneshot and | 272 | * requests. Note that we must be certain that cl_oneshot and |
273 | * cl_dead are cleared, or else the client would be destroyed | 273 | * cl_dead are cleared, or else the client would be destroyed |
274 | * when the last task releases it. | 274 | * when the last task releases it. |
275 | */ | 275 | */ |
276 | int | 276 | int |
277 | rpc_shutdown_client(struct rpc_clnt *clnt) | 277 | rpc_shutdown_client(struct rpc_clnt *clnt) |
278 | { | 278 | { |
279 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", | 279 | dprintk("RPC: shutting down %s client for %s, tasks=%d\n", |
280 | clnt->cl_protname, clnt->cl_server, | 280 | clnt->cl_protname, clnt->cl_server, |
281 | atomic_read(&clnt->cl_users)); | 281 | atomic_read(&clnt->cl_users)); |
282 | 282 | ||
283 | while (atomic_read(&clnt->cl_users) > 0) { | 283 | while (atomic_read(&clnt->cl_users) > 0) { |
284 | /* Don't let rpc_release_client destroy us */ | 284 | /* Don't let rpc_release_client destroy us */ |
285 | clnt->cl_oneshot = 0; | 285 | clnt->cl_oneshot = 0; |
286 | clnt->cl_dead = 0; | 286 | clnt->cl_dead = 0; |
287 | rpc_killall_tasks(clnt); | 287 | rpc_killall_tasks(clnt); |
288 | wait_event_timeout(destroy_wait, | 288 | wait_event_timeout(destroy_wait, |
289 | !atomic_read(&clnt->cl_users), 1*HZ); | 289 | !atomic_read(&clnt->cl_users), 1*HZ); |
290 | } | 290 | } |
291 | 291 | ||
292 | if (atomic_read(&clnt->cl_users) < 0) { | 292 | if (atomic_read(&clnt->cl_users) < 0) { |
293 | printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", | 293 | printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", |
294 | clnt, atomic_read(&clnt->cl_users)); | 294 | clnt, atomic_read(&clnt->cl_users)); |
295 | #ifdef RPC_DEBUG | 295 | #ifdef RPC_DEBUG |
296 | rpc_show_tasks(); | 296 | rpc_show_tasks(); |
297 | #endif | 297 | #endif |
298 | BUG(); | 298 | BUG(); |
299 | } | 299 | } |
300 | 300 | ||
301 | return rpc_destroy_client(clnt); | 301 | return rpc_destroy_client(clnt); |
302 | } | 302 | } |
303 | 303 | ||
304 | /* | 304 | /* |
305 | * Delete an RPC client | 305 | * Delete an RPC client |
306 | */ | 306 | */ |
307 | int | 307 | int |
308 | rpc_destroy_client(struct rpc_clnt *clnt) | 308 | rpc_destroy_client(struct rpc_clnt *clnt) |
309 | { | 309 | { |
310 | if (!atomic_dec_and_test(&clnt->cl_count)) | 310 | if (!atomic_dec_and_test(&clnt->cl_count)) |
311 | return 1; | 311 | return 1; |
312 | BUG_ON(atomic_read(&clnt->cl_users) != 0); | 312 | BUG_ON(atomic_read(&clnt->cl_users) != 0); |
313 | 313 | ||
314 | dprintk("RPC: destroying %s client for %s\n", | 314 | dprintk("RPC: destroying %s client for %s\n", |
315 | clnt->cl_protname, clnt->cl_server); | 315 | clnt->cl_protname, clnt->cl_server); |
316 | if (clnt->cl_auth) { | 316 | if (clnt->cl_auth) { |
317 | rpcauth_destroy(clnt->cl_auth); | 317 | rpcauth_destroy(clnt->cl_auth); |
318 | clnt->cl_auth = NULL; | 318 | clnt->cl_auth = NULL; |
319 | } | 319 | } |
320 | if (clnt->cl_parent != clnt) { | 320 | if (clnt->cl_parent != clnt) { |
321 | rpc_destroy_client(clnt->cl_parent); | 321 | rpc_destroy_client(clnt->cl_parent); |
322 | goto out_free; | 322 | goto out_free; |
323 | } | 323 | } |
324 | if (clnt->cl_pathname[0]) | 324 | if (clnt->cl_pathname[0]) |
325 | rpc_rmdir(clnt->cl_pathname); | 325 | rpc_rmdir(clnt->cl_pathname); |
326 | if (clnt->cl_xprt) { | 326 | if (clnt->cl_xprt) { |
327 | xprt_destroy(clnt->cl_xprt); | 327 | xprt_destroy(clnt->cl_xprt); |
328 | clnt->cl_xprt = NULL; | 328 | clnt->cl_xprt = NULL; |
329 | } | 329 | } |
330 | if (clnt->cl_server != clnt->cl_inline_name) | 330 | if (clnt->cl_server != clnt->cl_inline_name) |
331 | kfree(clnt->cl_server); | 331 | kfree(clnt->cl_server); |
332 | out_free: | 332 | out_free: |
333 | rpc_free_iostats(clnt->cl_metrics); | 333 | rpc_free_iostats(clnt->cl_metrics); |
334 | clnt->cl_metrics = NULL; | 334 | clnt->cl_metrics = NULL; |
335 | if (!IS_ERR(clnt->cl_dentry)) { | 335 | if (!IS_ERR(clnt->cl_dentry)) { |
336 | dput(clnt->cl_dentry); | 336 | dput(clnt->cl_dentry); |
337 | rpc_put_mount(); | 337 | rpc_put_mount(); |
338 | } | 338 | } |
339 | kfree(clnt); | 339 | kfree(clnt); |
340 | return 0; | 340 | return 0; |
341 | } | 341 | } |
342 | 342 | ||
343 | /* | 343 | /* |
344 | * Release an RPC client | 344 | * Release an RPC client |
345 | */ | 345 | */ |
346 | void | 346 | void |
347 | rpc_release_client(struct rpc_clnt *clnt) | 347 | rpc_release_client(struct rpc_clnt *clnt) |
348 | { | 348 | { |
349 | dprintk("RPC: rpc_release_client(%p, %d)\n", | 349 | dprintk("RPC: rpc_release_client(%p, %d)\n", |
350 | clnt, atomic_read(&clnt->cl_users)); | 350 | clnt, atomic_read(&clnt->cl_users)); |
351 | 351 | ||
352 | if (!atomic_dec_and_test(&clnt->cl_users)) | 352 | if (!atomic_dec_and_test(&clnt->cl_users)) |
353 | return; | 353 | return; |
354 | wake_up(&destroy_wait); | 354 | wake_up(&destroy_wait); |
355 | if (clnt->cl_oneshot || clnt->cl_dead) | 355 | if (clnt->cl_oneshot || clnt->cl_dead) |
356 | rpc_destroy_client(clnt); | 356 | rpc_destroy_client(clnt); |
357 | } | 357 | } |
358 | 358 | ||
359 | /** | 359 | /** |
360 | * rpc_bind_new_program - bind a new RPC program to an existing client | 360 | * rpc_bind_new_program - bind a new RPC program to an existing client |
361 | * @old - old rpc_client | 361 | * @old - old rpc_client |
362 | * @program - rpc program to set | 362 | * @program - rpc program to set |
363 | * @vers - rpc program version | 363 | * @vers - rpc program version |
364 | * | 364 | * |
365 | * Clones the rpc client and sets up a new RPC program. This is mainly | 365 | * Clones the rpc client and sets up a new RPC program. This is mainly |
366 | * of use for enabling different RPC programs to share the same transport. | 366 | * of use for enabling different RPC programs to share the same transport. |
367 | * The Sun NFSv2/v3 ACL protocol can do this. | 367 | * The Sun NFSv2/v3 ACL protocol can do this. |
368 | */ | 368 | */ |
369 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, | 369 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, |
370 | struct rpc_program *program, | 370 | struct rpc_program *program, |
371 | int vers) | 371 | int vers) |
372 | { | 372 | { |
373 | struct rpc_clnt *clnt; | 373 | struct rpc_clnt *clnt; |
374 | struct rpc_version *version; | 374 | struct rpc_version *version; |
375 | int err; | 375 | int err; |
376 | 376 | ||
377 | BUG_ON(vers >= program->nrvers || !program->version[vers]); | 377 | BUG_ON(vers >= program->nrvers || !program->version[vers]); |
378 | version = program->version[vers]; | 378 | version = program->version[vers]; |
379 | clnt = rpc_clone_client(old); | 379 | clnt = rpc_clone_client(old); |
380 | if (IS_ERR(clnt)) | 380 | if (IS_ERR(clnt)) |
381 | goto out; | 381 | goto out; |
382 | clnt->cl_procinfo = version->procs; | 382 | clnt->cl_procinfo = version->procs; |
383 | clnt->cl_maxproc = version->nrprocs; | 383 | clnt->cl_maxproc = version->nrprocs; |
384 | clnt->cl_protname = program->name; | 384 | clnt->cl_protname = program->name; |
385 | clnt->cl_prog = program->number; | 385 | clnt->cl_prog = program->number; |
386 | clnt->cl_vers = version->number; | 386 | clnt->cl_vers = version->number; |
387 | clnt->cl_stats = program->stats; | 387 | clnt->cl_stats = program->stats; |
388 | err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); | 388 | err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); |
389 | if (err != 0) { | 389 | if (err != 0) { |
390 | rpc_shutdown_client(clnt); | 390 | rpc_shutdown_client(clnt); |
391 | clnt = ERR_PTR(err); | 391 | clnt = ERR_PTR(err); |
392 | } | 392 | } |
393 | out: | 393 | out: |
394 | return clnt; | 394 | return clnt; |
395 | } | 395 | } |
396 | 396 | ||
397 | /* | 397 | /* |
398 | * Default callback for async RPC calls | 398 | * Default callback for async RPC calls |
399 | */ | 399 | */ |
400 | static void | 400 | static void |
401 | rpc_default_callback(struct rpc_task *task, void *data) | 401 | rpc_default_callback(struct rpc_task *task, void *data) |
402 | { | 402 | { |
403 | } | 403 | } |
404 | 404 | ||
405 | static const struct rpc_call_ops rpc_default_ops = { | 405 | static const struct rpc_call_ops rpc_default_ops = { |
406 | .rpc_call_done = rpc_default_callback, | 406 | .rpc_call_done = rpc_default_callback, |
407 | }; | 407 | }; |
408 | 408 | ||
409 | /* | 409 | /* |
410 | * Export the signal mask handling for synchronous code that | 410 | * Export the signal mask handling for synchronous code that |
411 | * sleeps on RPC calls | 411 | * sleeps on RPC calls |
412 | */ | 412 | */ |
413 | #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) | 413 | #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) |
414 | 414 | ||
415 | static void rpc_save_sigmask(sigset_t *oldset, int intr) | 415 | static void rpc_save_sigmask(sigset_t *oldset, int intr) |
416 | { | 416 | { |
417 | unsigned long sigallow = sigmask(SIGKILL); | 417 | unsigned long sigallow = sigmask(SIGKILL); |
418 | sigset_t sigmask; | 418 | sigset_t sigmask; |
419 | 419 | ||
420 | /* Block all signals except those listed in sigallow */ | 420 | /* Block all signals except those listed in sigallow */ |
421 | if (intr) | 421 | if (intr) |
422 | sigallow |= RPC_INTR_SIGNALS; | 422 | sigallow |= RPC_INTR_SIGNALS; |
423 | siginitsetinv(&sigmask, sigallow); | 423 | siginitsetinv(&sigmask, sigallow); |
424 | sigprocmask(SIG_BLOCK, &sigmask, oldset); | 424 | sigprocmask(SIG_BLOCK, &sigmask, oldset); |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) | 427 | static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) |
428 | { | 428 | { |
429 | rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); | 429 | rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); |
430 | } | 430 | } |
431 | 431 | ||
432 | static inline void rpc_restore_sigmask(sigset_t *oldset) | 432 | static inline void rpc_restore_sigmask(sigset_t *oldset) |
433 | { | 433 | { |
434 | sigprocmask(SIG_SETMASK, oldset, NULL); | 434 | sigprocmask(SIG_SETMASK, oldset, NULL); |
435 | } | 435 | } |
436 | 436 | ||
437 | void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) | 437 | void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) |
438 | { | 438 | { |
439 | rpc_save_sigmask(oldset, clnt->cl_intr); | 439 | rpc_save_sigmask(oldset, clnt->cl_intr); |
440 | } | 440 | } |
441 | 441 | ||
442 | void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) | 442 | void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) |
443 | { | 443 | { |
444 | rpc_restore_sigmask(oldset); | 444 | rpc_restore_sigmask(oldset); |
445 | } | 445 | } |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * New rpc_call implementation | 448 | * New rpc_call implementation |
449 | */ | 449 | */ |
450 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | 450 | int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) |
451 | { | 451 | { |
452 | struct rpc_task *task; | 452 | struct rpc_task *task; |
453 | sigset_t oldset; | 453 | sigset_t oldset; |
454 | int status; | 454 | int status; |
455 | 455 | ||
456 | /* If this client is slain all further I/O fails */ | 456 | /* If this client is slain all further I/O fails */ |
457 | if (clnt->cl_dead) | 457 | if (clnt->cl_dead) |
458 | return -EIO; | 458 | return -EIO; |
459 | 459 | ||
460 | BUG_ON(flags & RPC_TASK_ASYNC); | 460 | BUG_ON(flags & RPC_TASK_ASYNC); |
461 | 461 | ||
462 | status = -ENOMEM; | 462 | status = -ENOMEM; |
463 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); | 463 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); |
464 | if (task == NULL) | 464 | if (task == NULL) |
465 | goto out; | 465 | goto out; |
466 | 466 | ||
467 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ | 467 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ |
468 | rpc_task_sigmask(task, &oldset); | 468 | rpc_task_sigmask(task, &oldset); |
469 | 469 | ||
470 | rpc_call_setup(task, msg, 0); | 470 | rpc_call_setup(task, msg, 0); |
471 | 471 | ||
472 | /* Set up the call info struct and execute the task */ | 472 | /* Set up the call info struct and execute the task */ |
473 | status = task->tk_status; | 473 | status = task->tk_status; |
474 | if (status == 0) { | 474 | if (status == 0) { |
475 | atomic_inc(&task->tk_count); | 475 | atomic_inc(&task->tk_count); |
476 | status = rpc_execute(task); | 476 | status = rpc_execute(task); |
477 | if (status == 0) | 477 | if (status == 0) |
478 | status = task->tk_status; | 478 | status = task->tk_status; |
479 | } | 479 | } |
480 | rpc_restore_sigmask(&oldset); | 480 | rpc_restore_sigmask(&oldset); |
481 | rpc_release_task(task); | 481 | rpc_release_task(task); |
482 | out: | 482 | out: |
483 | return status; | 483 | return status; |
484 | } | 484 | } |
485 | 485 | ||
486 | /* | 486 | /* |
487 | * New rpc_call implementation | 487 | * New rpc_call implementation |
488 | */ | 488 | */ |
489 | int | 489 | int |
490 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | 490 | rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, |
491 | const struct rpc_call_ops *tk_ops, void *data) | 491 | const struct rpc_call_ops *tk_ops, void *data) |
492 | { | 492 | { |
493 | struct rpc_task *task; | 493 | struct rpc_task *task; |
494 | sigset_t oldset; | 494 | sigset_t oldset; |
495 | int status; | 495 | int status; |
496 | 496 | ||
497 | /* If this client is slain all further I/O fails */ | 497 | /* If this client is slain all further I/O fails */ |
498 | if (clnt->cl_dead) | 498 | if (clnt->cl_dead) |
499 | return -EIO; | 499 | return -EIO; |
500 | 500 | ||
501 | flags |= RPC_TASK_ASYNC; | 501 | flags |= RPC_TASK_ASYNC; |
502 | 502 | ||
503 | /* Create/initialize a new RPC task */ | 503 | /* Create/initialize a new RPC task */ |
504 | status = -ENOMEM; | 504 | status = -ENOMEM; |
505 | if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) | 505 | if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) |
506 | goto out; | 506 | goto out; |
507 | 507 | ||
508 | /* Mask signals on GSS_AUTH upcalls */ | 508 | /* Mask signals on GSS_AUTH upcalls */ |
509 | rpc_task_sigmask(task, &oldset); | 509 | rpc_task_sigmask(task, &oldset); |
510 | 510 | ||
511 | rpc_call_setup(task, msg, 0); | 511 | rpc_call_setup(task, msg, 0); |
512 | 512 | ||
513 | /* Set up the call info struct and execute the task */ | 513 | /* Set up the call info struct and execute the task */ |
514 | status = task->tk_status; | 514 | status = task->tk_status; |
515 | if (status == 0) | 515 | if (status == 0) |
516 | rpc_execute(task); | 516 | rpc_execute(task); |
517 | else | 517 | else |
518 | rpc_release_task(task); | 518 | rpc_release_task(task); |
519 | 519 | ||
520 | rpc_restore_sigmask(&oldset); | 520 | rpc_restore_sigmask(&oldset); |
521 | out: | 521 | out: |
522 | return status; | 522 | return status; |
523 | } | 523 | } |
524 | 524 | ||
525 | 525 | ||
526 | void | 526 | void |
527 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) | 527 | rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) |
528 | { | 528 | { |
529 | task->tk_msg = *msg; | 529 | task->tk_msg = *msg; |
530 | task->tk_flags |= flags; | 530 | task->tk_flags |= flags; |
531 | /* Bind the user cred */ | 531 | /* Bind the user cred */ |
532 | if (task->tk_msg.rpc_cred != NULL) | 532 | if (task->tk_msg.rpc_cred != NULL) |
533 | rpcauth_holdcred(task); | 533 | rpcauth_holdcred(task); |
534 | else | 534 | else |
535 | rpcauth_bindcred(task); | 535 | rpcauth_bindcred(task); |
536 | 536 | ||
537 | if (task->tk_status == 0) | 537 | if (task->tk_status == 0) |
538 | task->tk_action = call_start; | 538 | task->tk_action = call_start; |
539 | else | 539 | else |
540 | task->tk_action = rpc_exit_task; | 540 | task->tk_action = rpc_exit_task; |
541 | } | 541 | } |
542 | 542 | ||
543 | void | 543 | void |
544 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) | 544 | rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) |
545 | { | 545 | { |
546 | struct rpc_xprt *xprt = clnt->cl_xprt; | 546 | struct rpc_xprt *xprt = clnt->cl_xprt; |
547 | if (xprt->ops->set_buffer_size) | 547 | if (xprt->ops->set_buffer_size) |
548 | xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); | 548 | xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); |
549 | } | 549 | } |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * Return size of largest payload RPC client can support, in bytes | 552 | * Return size of largest payload RPC client can support, in bytes |
553 | * | 553 | * |
554 | * For stream transports, this is one RPC record fragment (see RFC | 554 | * For stream transports, this is one RPC record fragment (see RFC |
555 | * 1831), as we don't support multi-record requests yet. For datagram | 555 | * 1831), as we don't support multi-record requests yet. For datagram |
556 | * transports, this is the size of an IP packet minus the IP, UDP, and | 556 | * transports, this is the size of an IP packet minus the IP, UDP, and |
557 | * RPC header sizes. | 557 | * RPC header sizes. |
558 | */ | 558 | */ |
559 | size_t rpc_max_payload(struct rpc_clnt *clnt) | 559 | size_t rpc_max_payload(struct rpc_clnt *clnt) |
560 | { | 560 | { |
561 | return clnt->cl_xprt->max_payload; | 561 | return clnt->cl_xprt->max_payload; |
562 | } | 562 | } |
563 | EXPORT_SYMBOL(rpc_max_payload); | 563 | EXPORT_SYMBOL(rpc_max_payload); |
564 | 564 | ||
565 | /** | 565 | /** |
566 | * rpc_force_rebind - force transport to check that remote port is unchanged | 566 | * rpc_force_rebind - force transport to check that remote port is unchanged |
567 | * @clnt: client to rebind | 567 | * @clnt: client to rebind |
568 | * | 568 | * |
569 | */ | 569 | */ |
570 | void rpc_force_rebind(struct rpc_clnt *clnt) | 570 | void rpc_force_rebind(struct rpc_clnt *clnt) |
571 | { | 571 | { |
572 | if (clnt->cl_autobind) | 572 | if (clnt->cl_autobind) |
573 | clnt->cl_port = 0; | 573 | clnt->cl_port = 0; |
574 | } | 574 | } |
575 | EXPORT_SYMBOL(rpc_force_rebind); | 575 | EXPORT_SYMBOL(rpc_force_rebind); |
576 | 576 | ||
577 | /* | 577 | /* |
578 | * Restart an (async) RPC call. Usually called from within the | 578 | * Restart an (async) RPC call. Usually called from within the |
579 | * exit handler. | 579 | * exit handler. |
580 | */ | 580 | */ |
581 | void | 581 | void |
582 | rpc_restart_call(struct rpc_task *task) | 582 | rpc_restart_call(struct rpc_task *task) |
583 | { | 583 | { |
584 | if (RPC_ASSASSINATED(task)) | 584 | if (RPC_ASSASSINATED(task)) |
585 | return; | 585 | return; |
586 | 586 | ||
587 | task->tk_action = call_start; | 587 | task->tk_action = call_start; |
588 | } | 588 | } |
589 | 589 | ||
590 | /* | 590 | /* |
591 | * 0. Initial state | 591 | * 0. Initial state |
592 | * | 592 | * |
593 | * Other FSM states can be visited zero or more times, but | 593 | * Other FSM states can be visited zero or more times, but |
594 | * this state is visited exactly once for each RPC. | 594 | * this state is visited exactly once for each RPC. |
595 | */ | 595 | */ |
596 | static void | 596 | static void |
597 | call_start(struct rpc_task *task) | 597 | call_start(struct rpc_task *task) |
598 | { | 598 | { |
599 | struct rpc_clnt *clnt = task->tk_client; | 599 | struct rpc_clnt *clnt = task->tk_client; |
600 | 600 | ||
601 | dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, | 601 | dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid, |
602 | clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, | 602 | clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc, |
603 | (RPC_IS_ASYNC(task) ? "async" : "sync")); | 603 | (RPC_IS_ASYNC(task) ? "async" : "sync")); |
604 | 604 | ||
605 | /* Increment call count */ | 605 | /* Increment call count */ |
606 | task->tk_msg.rpc_proc->p_count++; | 606 | task->tk_msg.rpc_proc->p_count++; |
607 | clnt->cl_stats->rpccnt++; | 607 | clnt->cl_stats->rpccnt++; |
608 | task->tk_action = call_reserve; | 608 | task->tk_action = call_reserve; |
609 | } | 609 | } |
610 | 610 | ||
611 | /* | 611 | /* |
612 | * 1. Reserve an RPC call slot | 612 | * 1. Reserve an RPC call slot |
613 | */ | 613 | */ |
614 | static void | 614 | static void |
615 | call_reserve(struct rpc_task *task) | 615 | call_reserve(struct rpc_task *task) |
616 | { | 616 | { |
617 | dprintk("RPC: %4d call_reserve\n", task->tk_pid); | 617 | dprintk("RPC: %4d call_reserve\n", task->tk_pid); |
618 | 618 | ||
619 | if (!rpcauth_uptodatecred(task)) { | 619 | if (!rpcauth_uptodatecred(task)) { |
620 | task->tk_action = call_refresh; | 620 | task->tk_action = call_refresh; |
621 | return; | 621 | return; |
622 | } | 622 | } |
623 | 623 | ||
624 | task->tk_status = 0; | 624 | task->tk_status = 0; |
625 | task->tk_action = call_reserveresult; | 625 | task->tk_action = call_reserveresult; |
626 | xprt_reserve(task); | 626 | xprt_reserve(task); |
627 | } | 627 | } |
628 | 628 | ||
629 | /* | 629 | /* |
630 | * 1b. Grok the result of xprt_reserve() | 630 | * 1b. Grok the result of xprt_reserve() |
631 | */ | 631 | */ |
632 | static void | 632 | static void |
633 | call_reserveresult(struct rpc_task *task) | 633 | call_reserveresult(struct rpc_task *task) |
634 | { | 634 | { |
635 | int status = task->tk_status; | 635 | int status = task->tk_status; |
636 | 636 | ||
637 | dprintk("RPC: %4d call_reserveresult (status %d)\n", | 637 | dprintk("RPC: %4d call_reserveresult (status %d)\n", |
638 | task->tk_pid, task->tk_status); | 638 | task->tk_pid, task->tk_status); |
639 | 639 | ||
640 | /* | 640 | /* |
641 | * After a call to xprt_reserve(), we must have either | 641 | * After a call to xprt_reserve(), we must have either |
642 | * a request slot or else an error status. | 642 | * a request slot or else an error status. |
643 | */ | 643 | */ |
644 | task->tk_status = 0; | 644 | task->tk_status = 0; |
645 | if (status >= 0) { | 645 | if (status >= 0) { |
646 | if (task->tk_rqstp) { | 646 | if (task->tk_rqstp) { |
647 | task->tk_action = call_allocate; | 647 | task->tk_action = call_allocate; |
648 | return; | 648 | return; |
649 | } | 649 | } |
650 | 650 | ||
651 | printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", | 651 | printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", |
652 | __FUNCTION__, status); | 652 | __FUNCTION__, status); |
653 | rpc_exit(task, -EIO); | 653 | rpc_exit(task, -EIO); |
654 | return; | 654 | return; |
655 | } | 655 | } |
656 | 656 | ||
657 | /* | 657 | /* |
658 | * Even though there was an error, we may have acquired | 658 | * Even though there was an error, we may have acquired |
659 | * a request slot somehow. Make sure not to leak it. | 659 | * a request slot somehow. Make sure not to leak it. |
660 | */ | 660 | */ |
661 | if (task->tk_rqstp) { | 661 | if (task->tk_rqstp) { |
662 | printk(KERN_ERR "%s: status=%d, request allocated anyway\n", | 662 | printk(KERN_ERR "%s: status=%d, request allocated anyway\n", |
663 | __FUNCTION__, status); | 663 | __FUNCTION__, status); |
664 | xprt_release(task); | 664 | xprt_release(task); |
665 | } | 665 | } |
666 | 666 | ||
667 | switch (status) { | 667 | switch (status) { |
668 | case -EAGAIN: /* woken up; retry */ | 668 | case -EAGAIN: /* woken up; retry */ |
669 | task->tk_action = call_reserve; | 669 | task->tk_action = call_reserve; |
670 | return; | 670 | return; |
671 | case -EIO: /* probably a shutdown */ | 671 | case -EIO: /* probably a shutdown */ |
672 | break; | 672 | break; |
673 | default: | 673 | default: |
674 | printk(KERN_ERR "%s: unrecognized error %d, exiting\n", | 674 | printk(KERN_ERR "%s: unrecognized error %d, exiting\n", |
675 | __FUNCTION__, status); | 675 | __FUNCTION__, status); |
676 | break; | 676 | break; |
677 | } | 677 | } |
678 | rpc_exit(task, status); | 678 | rpc_exit(task, status); |
679 | } | 679 | } |
680 | 680 | ||
681 | /* | 681 | /* |
682 | * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. | 682 | * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. |
683 | * (Note: buffer memory is freed in xprt_release). | 683 | * (Note: buffer memory is freed in xprt_release). |
684 | */ | 684 | */ |
685 | static void | 685 | static void |
686 | call_allocate(struct rpc_task *task) | 686 | call_allocate(struct rpc_task *task) |
687 | { | 687 | { |
688 | struct rpc_rqst *req = task->tk_rqstp; | 688 | struct rpc_rqst *req = task->tk_rqstp; |
689 | struct rpc_xprt *xprt = task->tk_xprt; | 689 | struct rpc_xprt *xprt = task->tk_xprt; |
690 | unsigned int bufsiz; | 690 | unsigned int bufsiz; |
691 | 691 | ||
692 | dprintk("RPC: %4d call_allocate (status %d)\n", | 692 | dprintk("RPC: %4d call_allocate (status %d)\n", |
693 | task->tk_pid, task->tk_status); | 693 | task->tk_pid, task->tk_status); |
694 | task->tk_action = call_bind; | 694 | task->tk_action = call_bind; |
695 | if (req->rq_buffer) | 695 | if (req->rq_buffer) |
696 | return; | 696 | return; |
697 | 697 | ||
698 | /* FIXME: compute buffer requirements more exactly using | 698 | /* FIXME: compute buffer requirements more exactly using |
699 | * auth->au_wslack */ | 699 | * auth->au_wslack */ |
700 | bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; | 700 | bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE; |
701 | 701 | ||
702 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) | 702 | if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) |
703 | return; | 703 | return; |
704 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); | 704 | printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); |
705 | 705 | ||
706 | if (RPC_IS_ASYNC(task) || !signalled()) { | 706 | if (RPC_IS_ASYNC(task) || !signalled()) { |
707 | xprt_release(task); | 707 | xprt_release(task); |
708 | task->tk_action = call_reserve; | 708 | task->tk_action = call_reserve; |
709 | rpc_delay(task, HZ>>4); | 709 | rpc_delay(task, HZ>>4); |
710 | return; | 710 | return; |
711 | } | 711 | } |
712 | 712 | ||
713 | rpc_exit(task, -ERESTARTSYS); | 713 | rpc_exit(task, -ERESTARTSYS); |
714 | } | 714 | } |
715 | 715 | ||
716 | static inline int | 716 | static inline int |
717 | rpc_task_need_encode(struct rpc_task *task) | 717 | rpc_task_need_encode(struct rpc_task *task) |
718 | { | 718 | { |
719 | return task->tk_rqstp->rq_snd_buf.len == 0; | 719 | return task->tk_rqstp->rq_snd_buf.len == 0; |
720 | } | 720 | } |
721 | 721 | ||
722 | static inline void | 722 | static inline void |
723 | rpc_task_force_reencode(struct rpc_task *task) | 723 | rpc_task_force_reencode(struct rpc_task *task) |
724 | { | 724 | { |
725 | task->tk_rqstp->rq_snd_buf.len = 0; | 725 | task->tk_rqstp->rq_snd_buf.len = 0; |
726 | } | 726 | } |
727 | 727 | ||
728 | /* | 728 | /* |
729 | * 3. Encode arguments of an RPC call | 729 | * 3. Encode arguments of an RPC call |
730 | */ | 730 | */ |
731 | static void | 731 | static void |
732 | call_encode(struct rpc_task *task) | 732 | call_encode(struct rpc_task *task) |
733 | { | 733 | { |
734 | struct rpc_rqst *req = task->tk_rqstp; | 734 | struct rpc_rqst *req = task->tk_rqstp; |
735 | struct xdr_buf *sndbuf = &req->rq_snd_buf; | 735 | struct xdr_buf *sndbuf = &req->rq_snd_buf; |
736 | struct xdr_buf *rcvbuf = &req->rq_rcv_buf; | 736 | struct xdr_buf *rcvbuf = &req->rq_rcv_buf; |
737 | unsigned int bufsiz; | 737 | unsigned int bufsiz; |
738 | kxdrproc_t encode; | 738 | kxdrproc_t encode; |
739 | u32 *p; | 739 | u32 *p; |
740 | 740 | ||
741 | dprintk("RPC: %4d call_encode (status %d)\n", | 741 | dprintk("RPC: %4d call_encode (status %d)\n", |
742 | task->tk_pid, task->tk_status); | 742 | task->tk_pid, task->tk_status); |
743 | 743 | ||
744 | /* Default buffer setup */ | 744 | /* Default buffer setup */ |
745 | bufsiz = req->rq_bufsize >> 1; | 745 | bufsiz = req->rq_bufsize >> 1; |
746 | sndbuf->head[0].iov_base = (void *)req->rq_buffer; | 746 | sndbuf->head[0].iov_base = (void *)req->rq_buffer; |
747 | sndbuf->head[0].iov_len = bufsiz; | 747 | sndbuf->head[0].iov_len = bufsiz; |
748 | sndbuf->tail[0].iov_len = 0; | 748 | sndbuf->tail[0].iov_len = 0; |
749 | sndbuf->page_len = 0; | 749 | sndbuf->page_len = 0; |
750 | sndbuf->len = 0; | 750 | sndbuf->len = 0; |
751 | sndbuf->buflen = bufsiz; | 751 | sndbuf->buflen = bufsiz; |
752 | rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); | 752 | rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz); |
753 | rcvbuf->head[0].iov_len = bufsiz; | 753 | rcvbuf->head[0].iov_len = bufsiz; |
754 | rcvbuf->tail[0].iov_len = 0; | 754 | rcvbuf->tail[0].iov_len = 0; |
755 | rcvbuf->page_len = 0; | 755 | rcvbuf->page_len = 0; |
756 | rcvbuf->len = 0; | 756 | rcvbuf->len = 0; |
757 | rcvbuf->buflen = bufsiz; | 757 | rcvbuf->buflen = bufsiz; |
758 | 758 | ||
759 | /* Encode header and provided arguments */ | 759 | /* Encode header and provided arguments */ |
760 | encode = task->tk_msg.rpc_proc->p_encode; | 760 | encode = task->tk_msg.rpc_proc->p_encode; |
761 | if (!(p = call_header(task))) { | 761 | if (!(p = call_header(task))) { |
762 | printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); | 762 | printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); |
763 | rpc_exit(task, -EIO); | 763 | rpc_exit(task, -EIO); |
764 | return; | 764 | return; |
765 | } | 765 | } |
766 | if (encode == NULL) | 766 | if (encode == NULL) |
767 | return; | 767 | return; |
768 | 768 | ||
769 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, | 769 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, |
770 | task->tk_msg.rpc_argp); | 770 | task->tk_msg.rpc_argp); |
771 | if (task->tk_status == -ENOMEM) { | 771 | if (task->tk_status == -ENOMEM) { |
772 | /* XXX: Is this sane? */ | 772 | /* XXX: Is this sane? */ |
773 | rpc_delay(task, 3*HZ); | 773 | rpc_delay(task, 3*HZ); |
774 | task->tk_status = -EAGAIN; | 774 | task->tk_status = -EAGAIN; |
775 | } | 775 | } |
776 | } | 776 | } |
777 | 777 | ||
778 | /* | 778 | /* |
779 | * 4. Get the server port number if not yet set | 779 | * 4. Get the server port number if not yet set |
780 | */ | 780 | */ |
781 | static void | 781 | static void |
782 | call_bind(struct rpc_task *task) | 782 | call_bind(struct rpc_task *task) |
783 | { | 783 | { |
784 | struct rpc_clnt *clnt = task->tk_client; | 784 | struct rpc_clnt *clnt = task->tk_client; |
785 | 785 | ||
786 | dprintk("RPC: %4d call_bind (status %d)\n", | 786 | dprintk("RPC: %4d call_bind (status %d)\n", |
787 | task->tk_pid, task->tk_status); | 787 | task->tk_pid, task->tk_status); |
788 | 788 | ||
789 | task->tk_action = call_connect; | 789 | task->tk_action = call_connect; |
790 | if (!clnt->cl_port) { | 790 | if (!clnt->cl_port) { |
791 | task->tk_action = call_bind_status; | 791 | task->tk_action = call_bind_status; |
792 | task->tk_timeout = task->tk_xprt->bind_timeout; | 792 | task->tk_timeout = task->tk_xprt->bind_timeout; |
793 | rpc_getport(task, clnt); | 793 | rpc_getport(task, clnt); |
794 | } | 794 | } |
795 | } | 795 | } |
796 | 796 | ||
797 | /* | 797 | /* |
798 | * 4a. Sort out bind result | 798 | * 4a. Sort out bind result |
799 | */ | 799 | */ |
800 | static void | 800 | static void |
801 | call_bind_status(struct rpc_task *task) | 801 | call_bind_status(struct rpc_task *task) |
802 | { | 802 | { |
803 | int status = -EACCES; | 803 | int status = -EACCES; |
804 | 804 | ||
805 | if (task->tk_status >= 0) { | 805 | if (task->tk_status >= 0) { |
806 | dprintk("RPC: %4d call_bind_status (status %d)\n", | 806 | dprintk("RPC: %4d call_bind_status (status %d)\n", |
807 | task->tk_pid, task->tk_status); | 807 | task->tk_pid, task->tk_status); |
808 | task->tk_status = 0; | 808 | task->tk_status = 0; |
809 | task->tk_action = call_connect; | 809 | task->tk_action = call_connect; |
810 | return; | 810 | return; |
811 | } | 811 | } |
812 | 812 | ||
813 | switch (task->tk_status) { | 813 | switch (task->tk_status) { |
814 | case -EACCES: | 814 | case -EACCES: |
815 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", | 815 | dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", |
816 | task->tk_pid); | 816 | task->tk_pid); |
817 | rpc_delay(task, 3*HZ); | 817 | rpc_delay(task, 3*HZ); |
818 | goto retry_bind; | 818 | goto retry_bind; |
819 | case -ETIMEDOUT: | 819 | case -ETIMEDOUT: |
820 | dprintk("RPC: %4d rpcbind request timed out\n", | 820 | dprintk("RPC: %4d rpcbind request timed out\n", |
821 | task->tk_pid); | 821 | task->tk_pid); |
822 | if (RPC_IS_SOFT(task)) { | 822 | if (RPC_IS_SOFT(task)) { |
823 | status = -EIO; | 823 | status = -EIO; |
824 | break; | 824 | break; |
825 | } | 825 | } |
826 | goto retry_bind; | 826 | goto retry_bind; |
827 | case -EPFNOSUPPORT: | 827 | case -EPFNOSUPPORT: |
828 | dprintk("RPC: %4d remote rpcbind service unavailable\n", | 828 | dprintk("RPC: %4d remote rpcbind service unavailable\n", |
829 | task->tk_pid); | 829 | task->tk_pid); |
830 | break; | 830 | break; |
831 | case -EPROTONOSUPPORT: | 831 | case -EPROTONOSUPPORT: |
832 | dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", | 832 | dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", |
833 | task->tk_pid); | 833 | task->tk_pid); |
834 | break; | 834 | break; |
835 | default: | 835 | default: |
836 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", | 836 | dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", |
837 | task->tk_pid, -task->tk_status); | 837 | task->tk_pid, -task->tk_status); |
838 | status = -EIO; | 838 | status = -EIO; |
839 | break; | 839 | break; |
840 | } | 840 | } |
841 | 841 | ||
842 | rpc_exit(task, status); | 842 | rpc_exit(task, status); |
843 | return; | 843 | return; |
844 | 844 | ||
845 | retry_bind: | 845 | retry_bind: |
846 | task->tk_status = 0; | 846 | task->tk_status = 0; |
847 | task->tk_action = call_bind; | 847 | task->tk_action = call_bind; |
848 | return; | 848 | return; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* | 851 | /* |
852 | * 4b. Connect to the RPC server | 852 | * 4b. Connect to the RPC server |
853 | */ | 853 | */ |
854 | static void | 854 | static void |
855 | call_connect(struct rpc_task *task) | 855 | call_connect(struct rpc_task *task) |
856 | { | 856 | { |
857 | struct rpc_xprt *xprt = task->tk_xprt; | 857 | struct rpc_xprt *xprt = task->tk_xprt; |
858 | 858 | ||
859 | dprintk("RPC: %4d call_connect xprt %p %s connected\n", | 859 | dprintk("RPC: %4d call_connect xprt %p %s connected\n", |
860 | task->tk_pid, xprt, | 860 | task->tk_pid, xprt, |
861 | (xprt_connected(xprt) ? "is" : "is not")); | 861 | (xprt_connected(xprt) ? "is" : "is not")); |
862 | 862 | ||
863 | task->tk_action = call_transmit; | 863 | task->tk_action = call_transmit; |
864 | if (!xprt_connected(xprt)) { | 864 | if (!xprt_connected(xprt)) { |
865 | task->tk_action = call_connect_status; | 865 | task->tk_action = call_connect_status; |
866 | if (task->tk_status < 0) | 866 | if (task->tk_status < 0) |
867 | return; | 867 | return; |
868 | xprt_connect(task); | 868 | xprt_connect(task); |
869 | } | 869 | } |
870 | } | 870 | } |
871 | 871 | ||
872 | /* | 872 | /* |
873 | * 4c. Sort out connect result | 873 | * 4c. Sort out connect result |
874 | */ | 874 | */ |
875 | static void | 875 | static void |
876 | call_connect_status(struct rpc_task *task) | 876 | call_connect_status(struct rpc_task *task) |
877 | { | 877 | { |
878 | struct rpc_clnt *clnt = task->tk_client; | 878 | struct rpc_clnt *clnt = task->tk_client; |
879 | int status = task->tk_status; | 879 | int status = task->tk_status; |
880 | 880 | ||
881 | dprintk("RPC: %5u call_connect_status (status %d)\n", | 881 | dprintk("RPC: %5u call_connect_status (status %d)\n", |
882 | task->tk_pid, task->tk_status); | 882 | task->tk_pid, task->tk_status); |
883 | 883 | ||
884 | task->tk_status = 0; | 884 | task->tk_status = 0; |
885 | if (status >= 0) { | 885 | if (status >= 0) { |
886 | clnt->cl_stats->netreconn++; | 886 | clnt->cl_stats->netreconn++; |
887 | task->tk_action = call_transmit; | 887 | task->tk_action = call_transmit; |
888 | return; | 888 | return; |
889 | } | 889 | } |
890 | 890 | ||
891 | /* Something failed: remote service port may have changed */ | 891 | /* Something failed: remote service port may have changed */ |
892 | rpc_force_rebind(clnt); | 892 | rpc_force_rebind(clnt); |
893 | 893 | ||
894 | switch (status) { | 894 | switch (status) { |
895 | case -ENOTCONN: | 895 | case -ENOTCONN: |
896 | case -ETIMEDOUT: | 896 | case -ETIMEDOUT: |
897 | case -EAGAIN: | 897 | case -EAGAIN: |
898 | task->tk_action = call_bind; | 898 | task->tk_action = call_bind; |
899 | break; | 899 | break; |
900 | default: | 900 | default: |
901 | rpc_exit(task, -EIO); | 901 | rpc_exit(task, -EIO); |
902 | break; | 902 | break; |
903 | } | 903 | } |
904 | } | 904 | } |
905 | 905 | ||
906 | /* | 906 | /* |
907 | * 5. Transmit the RPC request, and wait for reply | 907 | * 5. Transmit the RPC request, and wait for reply |
908 | */ | 908 | */ |
909 | static void | 909 | static void |
910 | call_transmit(struct rpc_task *task) | 910 | call_transmit(struct rpc_task *task) |
911 | { | 911 | { |
912 | dprintk("RPC: %4d call_transmit (status %d)\n", | 912 | dprintk("RPC: %4d call_transmit (status %d)\n", |
913 | task->tk_pid, task->tk_status); | 913 | task->tk_pid, task->tk_status); |
914 | 914 | ||
915 | task->tk_action = call_status; | 915 | task->tk_action = call_status; |
916 | if (task->tk_status < 0) | 916 | if (task->tk_status < 0) |
917 | return; | 917 | return; |
918 | task->tk_status = xprt_prepare_transmit(task); | 918 | task->tk_status = xprt_prepare_transmit(task); |
919 | if (task->tk_status != 0) | 919 | if (task->tk_status != 0) |
920 | return; | 920 | return; |
921 | /* Encode here so that rpcsec_gss can use correct sequence number. */ | 921 | /* Encode here so that rpcsec_gss can use correct sequence number. */ |
922 | if (rpc_task_need_encode(task)) { | 922 | if (rpc_task_need_encode(task)) { |
923 | task->tk_rqstp->rq_bytes_sent = 0; | 923 | task->tk_rqstp->rq_bytes_sent = 0; |
924 | call_encode(task); | 924 | call_encode(task); |
925 | /* Did the encode result in an error condition? */ | 925 | /* Did the encode result in an error condition? */ |
926 | if (task->tk_status != 0) | 926 | if (task->tk_status != 0) |
927 | goto out_nosend; | 927 | goto out_nosend; |
928 | } | 928 | } |
929 | task->tk_action = call_transmit_status; | 929 | task->tk_action = call_transmit_status; |
930 | xprt_transmit(task); | 930 | xprt_transmit(task); |
931 | if (task->tk_status < 0) | 931 | if (task->tk_status < 0) |
932 | return; | 932 | return; |
933 | if (!task->tk_msg.rpc_proc->p_decode) { | 933 | if (!task->tk_msg.rpc_proc->p_decode) { |
934 | task->tk_action = rpc_exit_task; | 934 | task->tk_action = rpc_exit_task; |
935 | rpc_wake_up_task(task); | 935 | rpc_wake_up_task(task); |
936 | } | 936 | } |
937 | return; | 937 | return; |
938 | out_nosend: | 938 | out_nosend: |
939 | /* release socket write lock before attempting to handle error */ | 939 | /* release socket write lock before attempting to handle error */ |
940 | xprt_abort_transmit(task); | 940 | xprt_abort_transmit(task); |
941 | rpc_task_force_reencode(task); | 941 | rpc_task_force_reencode(task); |
942 | } | 942 | } |
943 | 943 | ||
944 | /* | 944 | /* |
945 | * 6. Sort out the RPC call status | 945 | * 6. Sort out the RPC call status |
946 | */ | 946 | */ |
947 | static void | 947 | static void |
948 | call_status(struct rpc_task *task) | 948 | call_status(struct rpc_task *task) |
949 | { | 949 | { |
950 | struct rpc_clnt *clnt = task->tk_client; | 950 | struct rpc_clnt *clnt = task->tk_client; |
951 | struct rpc_rqst *req = task->tk_rqstp; | 951 | struct rpc_rqst *req = task->tk_rqstp; |
952 | int status; | 952 | int status; |
953 | 953 | ||
954 | if (req->rq_received > 0 && !req->rq_bytes_sent) | 954 | if (req->rq_received > 0 && !req->rq_bytes_sent) |
955 | task->tk_status = req->rq_received; | 955 | task->tk_status = req->rq_received; |
956 | 956 | ||
957 | dprintk("RPC: %4d call_status (status %d)\n", | 957 | dprintk("RPC: %4d call_status (status %d)\n", |
958 | task->tk_pid, task->tk_status); | 958 | task->tk_pid, task->tk_status); |
959 | 959 | ||
960 | status = task->tk_status; | 960 | status = task->tk_status; |
961 | if (status >= 0) { | 961 | if (status >= 0) { |
962 | task->tk_action = call_decode; | 962 | task->tk_action = call_decode; |
963 | return; | 963 | return; |
964 | } | 964 | } |
965 | 965 | ||
966 | task->tk_status = 0; | 966 | task->tk_status = 0; |
967 | switch(status) { | 967 | switch(status) { |
968 | case -ETIMEDOUT: | 968 | case -ETIMEDOUT: |
969 | task->tk_action = call_timeout; | 969 | task->tk_action = call_timeout; |
970 | break; | 970 | break; |
971 | case -ECONNREFUSED: | 971 | case -ECONNREFUSED: |
972 | case -ENOTCONN: | 972 | case -ENOTCONN: |
973 | rpc_force_rebind(clnt); | 973 | rpc_force_rebind(clnt); |
974 | task->tk_action = call_bind; | 974 | task->tk_action = call_bind; |
975 | break; | 975 | break; |
976 | case -EAGAIN: | 976 | case -EAGAIN: |
977 | task->tk_action = call_transmit; | 977 | task->tk_action = call_transmit; |
978 | break; | 978 | break; |
979 | case -EIO: | 979 | case -EIO: |
980 | /* shutdown or soft timeout */ | 980 | /* shutdown or soft timeout */ |
981 | rpc_exit(task, status); | 981 | rpc_exit(task, status); |
982 | break; | 982 | break; |
983 | default: | 983 | default: |
984 | printk("%s: RPC call returned error %d\n", | 984 | printk("%s: RPC call returned error %d\n", |
985 | clnt->cl_protname, -status); | 985 | clnt->cl_protname, -status); |
986 | rpc_exit(task, status); | 986 | rpc_exit(task, status); |
987 | break; | 987 | break; |
988 | } | 988 | } |
989 | } | 989 | } |
990 | 990 | ||
991 | /* | 991 | /* |
992 | * 6a. Handle transmission errors. | 992 | * 6a. Handle transmission errors. |
993 | */ | 993 | */ |
994 | static void | 994 | static void |
995 | call_transmit_status(struct rpc_task *task) | 995 | call_transmit_status(struct rpc_task *task) |
996 | { | 996 | { |
997 | if (task->tk_status != -EAGAIN) | 997 | if (task->tk_status != -EAGAIN) |
998 | rpc_task_force_reencode(task); | 998 | rpc_task_force_reencode(task); |
999 | call_status(task); | 999 | call_status(task); |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | /* | 1002 | /* |
1003 | * 6b. Handle RPC timeout | 1003 | * 6b. Handle RPC timeout |
1004 | * We do not release the request slot, so we keep using the | 1004 | * We do not release the request slot, so we keep using the |
1005 | * same XID for all retransmits. | 1005 | * same XID for all retransmits. |
1006 | */ | 1006 | */ |
1007 | static void | 1007 | static void |
1008 | call_timeout(struct rpc_task *task) | 1008 | call_timeout(struct rpc_task *task) |
1009 | { | 1009 | { |
1010 | struct rpc_clnt *clnt = task->tk_client; | 1010 | struct rpc_clnt *clnt = task->tk_client; |
1011 | 1011 | ||
1012 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { | 1012 | if (xprt_adjust_timeout(task->tk_rqstp) == 0) { |
1013 | dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); | 1013 | dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid); |
1014 | goto retry; | 1014 | goto retry; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); | 1017 | dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); |
1018 | task->tk_timeouts++; | 1018 | task->tk_timeouts++; |
1019 | 1019 | ||
1020 | if (RPC_IS_SOFT(task)) { | 1020 | if (RPC_IS_SOFT(task)) { |
1021 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", | 1021 | printk(KERN_NOTICE "%s: server %s not responding, timed out\n", |
1022 | clnt->cl_protname, clnt->cl_server); | 1022 | clnt->cl_protname, clnt->cl_server); |
1023 | rpc_exit(task, -EIO); | 1023 | rpc_exit(task, -EIO); |
1024 | return; | 1024 | return; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { | 1027 | if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { |
1028 | task->tk_flags |= RPC_CALL_MAJORSEEN; | 1028 | task->tk_flags |= RPC_CALL_MAJORSEEN; |
1029 | printk(KERN_NOTICE "%s: server %s not responding, still trying\n", | 1029 | printk(KERN_NOTICE "%s: server %s not responding, still trying\n", |
1030 | clnt->cl_protname, clnt->cl_server); | 1030 | clnt->cl_protname, clnt->cl_server); |
1031 | } | 1031 | } |
1032 | rpc_force_rebind(clnt); | 1032 | rpc_force_rebind(clnt); |
1033 | 1033 | ||
1034 | retry: | 1034 | retry: |
1035 | clnt->cl_stats->rpcretrans++; | 1035 | clnt->cl_stats->rpcretrans++; |
1036 | task->tk_action = call_bind; | 1036 | task->tk_action = call_bind; |
1037 | task->tk_status = 0; | 1037 | task->tk_status = 0; |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | /* | 1040 | /* |
1041 | * 7. Decode the RPC reply | 1041 | * 7. Decode the RPC reply |
1042 | */ | 1042 | */ |
1043 | static void | 1043 | static void |
1044 | call_decode(struct rpc_task *task) | 1044 | call_decode(struct rpc_task *task) |
1045 | { | 1045 | { |
1046 | struct rpc_clnt *clnt = task->tk_client; | 1046 | struct rpc_clnt *clnt = task->tk_client; |
1047 | struct rpc_rqst *req = task->tk_rqstp; | 1047 | struct rpc_rqst *req = task->tk_rqstp; |
1048 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; | 1048 | kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; |
1049 | u32 *p; | 1049 | u32 *p; |
1050 | 1050 | ||
1051 | dprintk("RPC: %4d call_decode (status %d)\n", | 1051 | dprintk("RPC: %4d call_decode (status %d)\n", |
1052 | task->tk_pid, task->tk_status); | 1052 | task->tk_pid, task->tk_status); |
1053 | 1053 | ||
1054 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { | 1054 | if (task->tk_flags & RPC_CALL_MAJORSEEN) { |
1055 | printk(KERN_NOTICE "%s: server %s OK\n", | 1055 | printk(KERN_NOTICE "%s: server %s OK\n", |
1056 | clnt->cl_protname, clnt->cl_server); | 1056 | clnt->cl_protname, clnt->cl_server); |
1057 | task->tk_flags &= ~RPC_CALL_MAJORSEEN; | 1057 | task->tk_flags &= ~RPC_CALL_MAJORSEEN; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | if (task->tk_status < 12) { | 1060 | if (task->tk_status < 12) { |
1061 | if (!RPC_IS_SOFT(task)) { | 1061 | if (!RPC_IS_SOFT(task)) { |
1062 | task->tk_action = call_bind; | 1062 | task->tk_action = call_bind; |
1063 | clnt->cl_stats->rpcretrans++; | 1063 | clnt->cl_stats->rpcretrans++; |
1064 | goto out_retry; | 1064 | goto out_retry; |
1065 | } | 1065 | } |
1066 | printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", | 1066 | printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n", |
1067 | clnt->cl_protname, task->tk_status); | 1067 | clnt->cl_protname, task->tk_status); |
1068 | rpc_exit(task, -EIO); | 1068 | rpc_exit(task, -EIO); |
1069 | return; | 1069 | return; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | /* | ||
1073 | * Ensure that we see all writes made by xprt_complete_rqst() | ||
1074 | * before it changed req->rq_received. | ||
1075 | */ | ||
1076 | smp_rmb(); | ||
1072 | req->rq_rcv_buf.len = req->rq_private_buf.len; | 1077 | req->rq_rcv_buf.len = req->rq_private_buf.len; |
1073 | 1078 | ||
1074 | /* Check that the softirq receive buffer is valid */ | 1079 | /* Check that the softirq receive buffer is valid */ |
1075 | WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, | 1080 | WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, |
1076 | sizeof(req->rq_rcv_buf)) != 0); | 1081 | sizeof(req->rq_rcv_buf)) != 0); |
1077 | 1082 | ||
1078 | /* Verify the RPC header */ | 1083 | /* Verify the RPC header */ |
1079 | p = call_verify(task); | 1084 | p = call_verify(task); |
1080 | if (IS_ERR(p)) { | 1085 | if (IS_ERR(p)) { |
1081 | if (p == ERR_PTR(-EAGAIN)) | 1086 | if (p == ERR_PTR(-EAGAIN)) |
1082 | goto out_retry; | 1087 | goto out_retry; |
1083 | return; | 1088 | return; |
1084 | } | 1089 | } |
1085 | 1090 | ||
1086 | task->tk_action = rpc_exit_task; | 1091 | task->tk_action = rpc_exit_task; |
1087 | 1092 | ||
1088 | if (decode) | 1093 | if (decode) |
1089 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, | 1094 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, |
1090 | task->tk_msg.rpc_resp); | 1095 | task->tk_msg.rpc_resp); |
1091 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, | 1096 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, |
1092 | task->tk_status); | 1097 | task->tk_status); |
1093 | return; | 1098 | return; |
1094 | out_retry: | 1099 | out_retry: |
1095 | req->rq_received = req->rq_private_buf.len = 0; | 1100 | req->rq_received = req->rq_private_buf.len = 0; |
1096 | task->tk_status = 0; | 1101 | task->tk_status = 0; |
1097 | } | 1102 | } |
1098 | 1103 | ||
1099 | /* | 1104 | /* |
1100 | * 8. Refresh the credentials if rejected by the server | 1105 | * 8. Refresh the credentials if rejected by the server |
1101 | */ | 1106 | */ |
1102 | static void | 1107 | static void |
1103 | call_refresh(struct rpc_task *task) | 1108 | call_refresh(struct rpc_task *task) |
1104 | { | 1109 | { |
1105 | dprintk("RPC: %4d call_refresh\n", task->tk_pid); | 1110 | dprintk("RPC: %4d call_refresh\n", task->tk_pid); |
1106 | 1111 | ||
1107 | xprt_release(task); /* Must do to obtain new XID */ | 1112 | xprt_release(task); /* Must do to obtain new XID */ |
1108 | task->tk_action = call_refreshresult; | 1113 | task->tk_action = call_refreshresult; |
1109 | task->tk_status = 0; | 1114 | task->tk_status = 0; |
1110 | task->tk_client->cl_stats->rpcauthrefresh++; | 1115 | task->tk_client->cl_stats->rpcauthrefresh++; |
1111 | rpcauth_refreshcred(task); | 1116 | rpcauth_refreshcred(task); |
1112 | } | 1117 | } |
1113 | 1118 | ||
1114 | /* | 1119 | /* |
1115 | * 8a. Process the results of a credential refresh | 1120 | * 8a. Process the results of a credential refresh |
1116 | */ | 1121 | */ |
1117 | static void | 1122 | static void |
1118 | call_refreshresult(struct rpc_task *task) | 1123 | call_refreshresult(struct rpc_task *task) |
1119 | { | 1124 | { |
1120 | int status = task->tk_status; | 1125 | int status = task->tk_status; |
1121 | dprintk("RPC: %4d call_refreshresult (status %d)\n", | 1126 | dprintk("RPC: %4d call_refreshresult (status %d)\n", |
1122 | task->tk_pid, task->tk_status); | 1127 | task->tk_pid, task->tk_status); |
1123 | 1128 | ||
1124 | task->tk_status = 0; | 1129 | task->tk_status = 0; |
1125 | task->tk_action = call_reserve; | 1130 | task->tk_action = call_reserve; |
1126 | if (status >= 0 && rpcauth_uptodatecred(task)) | 1131 | if (status >= 0 && rpcauth_uptodatecred(task)) |
1127 | return; | 1132 | return; |
1128 | if (status == -EACCES) { | 1133 | if (status == -EACCES) { |
1129 | rpc_exit(task, -EACCES); | 1134 | rpc_exit(task, -EACCES); |
1130 | return; | 1135 | return; |
1131 | } | 1136 | } |
1132 | task->tk_action = call_refresh; | 1137 | task->tk_action = call_refresh; |
1133 | if (status != -ETIMEDOUT) | 1138 | if (status != -ETIMEDOUT) |
1134 | rpc_delay(task, 3*HZ); | 1139 | rpc_delay(task, 3*HZ); |
1135 | return; | 1140 | return; |
1136 | } | 1141 | } |
1137 | 1142 | ||
1138 | /* | 1143 | /* |
1139 | * Call header serialization | 1144 | * Call header serialization |
1140 | */ | 1145 | */ |
1141 | static u32 * | 1146 | static u32 * |
1142 | call_header(struct rpc_task *task) | 1147 | call_header(struct rpc_task *task) |
1143 | { | 1148 | { |
1144 | struct rpc_clnt *clnt = task->tk_client; | 1149 | struct rpc_clnt *clnt = task->tk_client; |
1145 | struct rpc_rqst *req = task->tk_rqstp; | 1150 | struct rpc_rqst *req = task->tk_rqstp; |
1146 | u32 *p = req->rq_svec[0].iov_base; | 1151 | u32 *p = req->rq_svec[0].iov_base; |
1147 | 1152 | ||
1148 | /* FIXME: check buffer size? */ | 1153 | /* FIXME: check buffer size? */ |
1149 | 1154 | ||
1150 | p = xprt_skip_transport_header(task->tk_xprt, p); | 1155 | p = xprt_skip_transport_header(task->tk_xprt, p); |
1151 | *p++ = req->rq_xid; /* XID */ | 1156 | *p++ = req->rq_xid; /* XID */ |
1152 | *p++ = htonl(RPC_CALL); /* CALL */ | 1157 | *p++ = htonl(RPC_CALL); /* CALL */ |
1153 | *p++ = htonl(RPC_VERSION); /* RPC version */ | 1158 | *p++ = htonl(RPC_VERSION); /* RPC version */ |
1154 | *p++ = htonl(clnt->cl_prog); /* program number */ | 1159 | *p++ = htonl(clnt->cl_prog); /* program number */ |
1155 | *p++ = htonl(clnt->cl_vers); /* program version */ | 1160 | *p++ = htonl(clnt->cl_vers); /* program version */ |
1156 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ | 1161 | *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ |
1157 | p = rpcauth_marshcred(task, p); | 1162 | p = rpcauth_marshcred(task, p); |
1158 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); | 1163 | req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); |
1159 | return p; | 1164 | return p; |
1160 | } | 1165 | } |
1161 | 1166 | ||
1162 | /* | 1167 | /* |
1163 | * Reply header verification | 1168 | * Reply header verification |
1164 | */ | 1169 | */ |
1165 | static u32 * | 1170 | static u32 * |
1166 | call_verify(struct rpc_task *task) | 1171 | call_verify(struct rpc_task *task) |
1167 | { | 1172 | { |
1168 | struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; | 1173 | struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; |
1169 | int len = task->tk_rqstp->rq_rcv_buf.len >> 2; | 1174 | int len = task->tk_rqstp->rq_rcv_buf.len >> 2; |
1170 | u32 *p = iov->iov_base, n; | 1175 | u32 *p = iov->iov_base, n; |
1171 | int error = -EACCES; | 1176 | int error = -EACCES; |
1172 | 1177 | ||
1173 | if ((len -= 3) < 0) | 1178 | if ((len -= 3) < 0) |
1174 | goto out_overflow; | 1179 | goto out_overflow; |
1175 | p += 1; /* skip XID */ | 1180 | p += 1; /* skip XID */ |
1176 | 1181 | ||
1177 | if ((n = ntohl(*p++)) != RPC_REPLY) { | 1182 | if ((n = ntohl(*p++)) != RPC_REPLY) { |
1178 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); | 1183 | printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); |
1179 | goto out_garbage; | 1184 | goto out_garbage; |
1180 | } | 1185 | } |
1181 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { | 1186 | if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { |
1182 | if (--len < 0) | 1187 | if (--len < 0) |
1183 | goto out_overflow; | 1188 | goto out_overflow; |
1184 | switch ((n = ntohl(*p++))) { | 1189 | switch ((n = ntohl(*p++))) { |
1185 | case RPC_AUTH_ERROR: | 1190 | case RPC_AUTH_ERROR: |
1186 | break; | 1191 | break; |
1187 | case RPC_MISMATCH: | 1192 | case RPC_MISMATCH: |
1188 | dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); | 1193 | dprintk("%s: RPC call version mismatch!\n", __FUNCTION__); |
1189 | error = -EPROTONOSUPPORT; | 1194 | error = -EPROTONOSUPPORT; |
1190 | goto out_err; | 1195 | goto out_err; |
1191 | default: | 1196 | default: |
1192 | dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); | 1197 | dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n); |
1193 | goto out_eio; | 1198 | goto out_eio; |
1194 | } | 1199 | } |
1195 | if (--len < 0) | 1200 | if (--len < 0) |
1196 | goto out_overflow; | 1201 | goto out_overflow; |
1197 | switch ((n = ntohl(*p++))) { | 1202 | switch ((n = ntohl(*p++))) { |
1198 | case RPC_AUTH_REJECTEDCRED: | 1203 | case RPC_AUTH_REJECTEDCRED: |
1199 | case RPC_AUTH_REJECTEDVERF: | 1204 | case RPC_AUTH_REJECTEDVERF: |
1200 | case RPCSEC_GSS_CREDPROBLEM: | 1205 | case RPCSEC_GSS_CREDPROBLEM: |
1201 | case RPCSEC_GSS_CTXPROBLEM: | 1206 | case RPCSEC_GSS_CTXPROBLEM: |
1202 | if (!task->tk_cred_retry) | 1207 | if (!task->tk_cred_retry) |
1203 | break; | 1208 | break; |
1204 | task->tk_cred_retry--; | 1209 | task->tk_cred_retry--; |
1205 | dprintk("RPC: %4d call_verify: retry stale creds\n", | 1210 | dprintk("RPC: %4d call_verify: retry stale creds\n", |
1206 | task->tk_pid); | 1211 | task->tk_pid); |
1207 | rpcauth_invalcred(task); | 1212 | rpcauth_invalcred(task); |
1208 | task->tk_action = call_refresh; | 1213 | task->tk_action = call_refresh; |
1209 | goto out_retry; | 1214 | goto out_retry; |
1210 | case RPC_AUTH_BADCRED: | 1215 | case RPC_AUTH_BADCRED: |
1211 | case RPC_AUTH_BADVERF: | 1216 | case RPC_AUTH_BADVERF: |
1212 | /* possibly garbled cred/verf? */ | 1217 | /* possibly garbled cred/verf? */ |
1213 | if (!task->tk_garb_retry) | 1218 | if (!task->tk_garb_retry) |
1214 | break; | 1219 | break; |
1215 | task->tk_garb_retry--; | 1220 | task->tk_garb_retry--; |
1216 | dprintk("RPC: %4d call_verify: retry garbled creds\n", | 1221 | dprintk("RPC: %4d call_verify: retry garbled creds\n", |
1217 | task->tk_pid); | 1222 | task->tk_pid); |
1218 | task->tk_action = call_bind; | 1223 | task->tk_action = call_bind; |
1219 | goto out_retry; | 1224 | goto out_retry; |
1220 | case RPC_AUTH_TOOWEAK: | 1225 | case RPC_AUTH_TOOWEAK: |
1221 | printk(KERN_NOTICE "call_verify: server %s requires stronger " | 1226 | printk(KERN_NOTICE "call_verify: server %s requires stronger " |
1222 | "authentication.\n", task->tk_client->cl_server); | 1227 | "authentication.\n", task->tk_client->cl_server); |
1223 | break; | 1228 | break; |
1224 | default: | 1229 | default: |
1225 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); | 1230 | printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); |
1226 | error = -EIO; | 1231 | error = -EIO; |
1227 | } | 1232 | } |
1228 | dprintk("RPC: %4d call_verify: call rejected %d\n", | 1233 | dprintk("RPC: %4d call_verify: call rejected %d\n", |
1229 | task->tk_pid, n); | 1234 | task->tk_pid, n); |
1230 | goto out_err; | 1235 | goto out_err; |
1231 | } | 1236 | } |
1232 | if (!(p = rpcauth_checkverf(task, p))) { | 1237 | if (!(p = rpcauth_checkverf(task, p))) { |
1233 | printk(KERN_WARNING "call_verify: auth check failed\n"); | 1238 | printk(KERN_WARNING "call_verify: auth check failed\n"); |
1234 | goto out_garbage; /* bad verifier, retry */ | 1239 | goto out_garbage; /* bad verifier, retry */ |
1235 | } | 1240 | } |
1236 | len = p - (u32 *)iov->iov_base - 1; | 1241 | len = p - (u32 *)iov->iov_base - 1; |
1237 | if (len < 0) | 1242 | if (len < 0) |
1238 | goto out_overflow; | 1243 | goto out_overflow; |
1239 | switch ((n = ntohl(*p++))) { | 1244 | switch ((n = ntohl(*p++))) { |
1240 | case RPC_SUCCESS: | 1245 | case RPC_SUCCESS: |
1241 | return p; | 1246 | return p; |
1242 | case RPC_PROG_UNAVAIL: | 1247 | case RPC_PROG_UNAVAIL: |
1243 | dprintk("RPC: call_verify: program %u is unsupported by server %s\n", | 1248 | dprintk("RPC: call_verify: program %u is unsupported by server %s\n", |
1244 | (unsigned int)task->tk_client->cl_prog, | 1249 | (unsigned int)task->tk_client->cl_prog, |
1245 | task->tk_client->cl_server); | 1250 | task->tk_client->cl_server); |
1246 | error = -EPFNOSUPPORT; | 1251 | error = -EPFNOSUPPORT; |
1247 | goto out_err; | 1252 | goto out_err; |
1248 | case RPC_PROG_MISMATCH: | 1253 | case RPC_PROG_MISMATCH: |
1249 | dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", | 1254 | dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n", |
1250 | (unsigned int)task->tk_client->cl_prog, | 1255 | (unsigned int)task->tk_client->cl_prog, |
1251 | (unsigned int)task->tk_client->cl_vers, | 1256 | (unsigned int)task->tk_client->cl_vers, |
1252 | task->tk_client->cl_server); | 1257 | task->tk_client->cl_server); |
1253 | error = -EPROTONOSUPPORT; | 1258 | error = -EPROTONOSUPPORT; |
1254 | goto out_err; | 1259 | goto out_err; |
1255 | case RPC_PROC_UNAVAIL: | 1260 | case RPC_PROC_UNAVAIL: |
1256 | dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", | 1261 | dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n", |
1257 | task->tk_msg.rpc_proc, | 1262 | task->tk_msg.rpc_proc, |
1258 | task->tk_client->cl_prog, | 1263 | task->tk_client->cl_prog, |
1259 | task->tk_client->cl_vers, | 1264 | task->tk_client->cl_vers, |
1260 | task->tk_client->cl_server); | 1265 | task->tk_client->cl_server); |
1261 | error = -EOPNOTSUPP; | 1266 | error = -EOPNOTSUPP; |
1262 | goto out_err; | 1267 | goto out_err; |
1263 | case RPC_GARBAGE_ARGS: | 1268 | case RPC_GARBAGE_ARGS: |
1264 | dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); | 1269 | dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__); |
1265 | break; /* retry */ | 1270 | break; /* retry */ |
1266 | default: | 1271 | default: |
1267 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); | 1272 | printk(KERN_WARNING "call_verify: server accept status: %x\n", n); |
1268 | /* Also retry */ | 1273 | /* Also retry */ |
1269 | } | 1274 | } |
1270 | 1275 | ||
1271 | out_garbage: | 1276 | out_garbage: |
1272 | task->tk_client->cl_stats->rpcgarbage++; | 1277 | task->tk_client->cl_stats->rpcgarbage++; |
1273 | if (task->tk_garb_retry) { | 1278 | if (task->tk_garb_retry) { |
1274 | task->tk_garb_retry--; | 1279 | task->tk_garb_retry--; |
1275 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); | 1280 | dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid); |
1276 | task->tk_action = call_bind; | 1281 | task->tk_action = call_bind; |
1277 | out_retry: | 1282 | out_retry: |
1278 | return ERR_PTR(-EAGAIN); | 1283 | return ERR_PTR(-EAGAIN); |
1279 | } | 1284 | } |
1280 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); | 1285 | printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); |
1281 | out_eio: | 1286 | out_eio: |
1282 | error = -EIO; | 1287 | error = -EIO; |
1283 | out_err: | 1288 | out_err: |
1284 | rpc_exit(task, error); | 1289 | rpc_exit(task, error); |
1285 | return ERR_PTR(error); | 1290 | return ERR_PTR(error); |
1286 | out_overflow: | 1291 | out_overflow: |
1287 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); | 1292 | printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); |
1288 | goto out_garbage; | 1293 | goto out_garbage; |
1289 | } | 1294 | } |
1290 | 1295 | ||
1291 | static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) | 1296 | static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj) |
1292 | { | 1297 | { |
1293 | return 0; | 1298 | return 0; |
1294 | } | 1299 | } |
1295 | 1300 | ||
1296 | static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) | 1301 | static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj) |
1297 | { | 1302 | { |
1298 | return 0; | 1303 | return 0; |
1299 | } | 1304 | } |
1300 | 1305 | ||
1301 | static struct rpc_procinfo rpcproc_null = { | 1306 | static struct rpc_procinfo rpcproc_null = { |
1302 | .p_encode = rpcproc_encode_null, | 1307 | .p_encode = rpcproc_encode_null, |
1303 | .p_decode = rpcproc_decode_null, | 1308 | .p_decode = rpcproc_decode_null, |
1304 | }; | 1309 | }; |
1305 | 1310 | ||
1306 | int rpc_ping(struct rpc_clnt *clnt, int flags) | 1311 | int rpc_ping(struct rpc_clnt *clnt, int flags) |
1307 | { | 1312 | { |
1308 | struct rpc_message msg = { | 1313 | struct rpc_message msg = { |
1309 | .rpc_proc = &rpcproc_null, | 1314 | .rpc_proc = &rpcproc_null, |
1310 | }; | 1315 | }; |
1311 | int err; | 1316 | int err; |
1312 | msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); | 1317 | msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); |
1313 | err = rpc_call_sync(clnt, &msg, flags); | 1318 | err = rpc_call_sync(clnt, &msg, flags); |
1314 | put_rpccred(msg.rpc_cred); | 1319 | put_rpccred(msg.rpc_cred); |
1315 | return err; | 1320 | return err; |
1316 | } | 1321 | } |
1317 | 1322 |
net/sunrpc/xprt.c
1 | /* | 1 | /* |
2 | * linux/net/sunrpc/xprt.c | 2 | * linux/net/sunrpc/xprt.c |
3 | * | 3 | * |
4 | * This is a generic RPC call interface supporting congestion avoidance, | 4 | * This is a generic RPC call interface supporting congestion avoidance, |
5 | * and asynchronous calls. | 5 | * and asynchronous calls. |
6 | * | 6 | * |
7 | * The interface works like this: | 7 | * The interface works like this: |
8 | * | 8 | * |
9 | * - When a process places a call, it allocates a request slot if | 9 | * - When a process places a call, it allocates a request slot if |
10 | * one is available. Otherwise, it sleeps on the backlog queue | 10 | * one is available. Otherwise, it sleeps on the backlog queue |
11 | * (xprt_reserve). | 11 | * (xprt_reserve). |
12 | * - Next, the caller puts together the RPC message, stuffs it into | 12 | * - Next, the caller puts together the RPC message, stuffs it into |
13 | * the request struct, and calls xprt_transmit(). | 13 | * the request struct, and calls xprt_transmit(). |
14 | * - xprt_transmit sends the message and installs the caller on the | 14 | * - xprt_transmit sends the message and installs the caller on the |
15 | * transport's wait list. At the same time, it installs a timer that | 15 | * transport's wait list. At the same time, it installs a timer that |
16 | * is run after the packet's timeout has expired. | 16 | * is run after the packet's timeout has expired. |
17 | * - When a packet arrives, the data_ready handler walks the list of | 17 | * - When a packet arrives, the data_ready handler walks the list of |
18 | * pending requests for that transport. If a matching XID is found, the | 18 | * pending requests for that transport. If a matching XID is found, the |
19 | * caller is woken up, and the timer removed. | 19 | * caller is woken up, and the timer removed. |
20 | * - When no reply arrives within the timeout interval, the timer is | 20 | * - When no reply arrives within the timeout interval, the timer is |
21 | * fired by the kernel and runs xprt_timer(). It either adjusts the | 21 | * fired by the kernel and runs xprt_timer(). It either adjusts the |
22 | * timeout values (minor timeout) or wakes up the caller with a status | 22 | * timeout values (minor timeout) or wakes up the caller with a status |
23 | * of -ETIMEDOUT. | 23 | * of -ETIMEDOUT. |
24 | * - When the caller receives a notification from RPC that a reply arrived, | 24 | * - When the caller receives a notification from RPC that a reply arrived, |
25 | * it should release the RPC slot, and process the reply. | 25 | * it should release the RPC slot, and process the reply. |
26 | * If the call timed out, it may choose to retry the operation by | 26 | * If the call timed out, it may choose to retry the operation by |
27 | * adjusting the initial timeout value, and simply calling rpc_call | 27 | * adjusting the initial timeout value, and simply calling rpc_call |
28 | * again. | 28 | * again. |
29 | * | 29 | * |
30 | * Support for async RPC is done through a set of RPC-specific scheduling | 30 | * Support for async RPC is done through a set of RPC-specific scheduling |
31 | * primitives that `transparently' work for processes as well as async | 31 | * primitives that `transparently' work for processes as well as async |
32 | * tasks that rely on callbacks. | 32 | * tasks that rely on callbacks. |
33 | * | 33 | * |
34 | * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> | 34 | * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> |
35 | * | 35 | * |
36 | * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> | 36 | * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com> |
37 | */ | 37 | */ |
38 | 38 | ||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | 40 | ||
41 | #include <linux/types.h> | 41 | #include <linux/types.h> |
42 | #include <linux/interrupt.h> | 42 | #include <linux/interrupt.h> |
43 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
44 | #include <linux/random.h> | 44 | #include <linux/random.h> |
45 | 45 | ||
46 | #include <linux/sunrpc/clnt.h> | 46 | #include <linux/sunrpc/clnt.h> |
47 | #include <linux/sunrpc/metrics.h> | 47 | #include <linux/sunrpc/metrics.h> |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Local variables | 50 | * Local variables |
51 | */ | 51 | */ |
52 | 52 | ||
53 | #ifdef RPC_DEBUG | 53 | #ifdef RPC_DEBUG |
54 | # define RPCDBG_FACILITY RPCDBG_XPRT | 54 | # define RPCDBG_FACILITY RPCDBG_XPRT |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Local functions | 58 | * Local functions |
59 | */ | 59 | */ |
60 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); | 60 | static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); |
61 | static inline void do_xprt_reserve(struct rpc_task *); | 61 | static inline void do_xprt_reserve(struct rpc_task *); |
62 | static void xprt_connect_status(struct rpc_task *task); | 62 | static void xprt_connect_status(struct rpc_task *task); |
63 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); | 63 | static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * The transport code maintains an estimate on the maximum number of out- | 66 | * The transport code maintains an estimate on the maximum number of out- |
67 | * standing RPC requests, using a smoothed version of the congestion | 67 | * standing RPC requests, using a smoothed version of the congestion |
68 | * avoidance implemented in 44BSD. This is basically the Van Jacobson | 68 | * avoidance implemented in 44BSD. This is basically the Van Jacobson |
69 | * congestion algorithm: If a retransmit occurs, the congestion window is | 69 | * congestion algorithm: If a retransmit occurs, the congestion window is |
70 | * halved; otherwise, it is incremented by 1/cwnd when | 70 | * halved; otherwise, it is incremented by 1/cwnd when |
71 | * | 71 | * |
72 | * - a reply is received and | 72 | * - a reply is received and |
73 | * - a full number of requests are outstanding and | 73 | * - a full number of requests are outstanding and |
74 | * - the congestion window hasn't been updated recently. | 74 | * - the congestion window hasn't been updated recently. |
75 | */ | 75 | */ |
76 | #define RPC_CWNDSHIFT (8U) | 76 | #define RPC_CWNDSHIFT (8U) |
77 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) | 77 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) |
78 | #define RPC_INITCWND RPC_CWNDSCALE | 78 | #define RPC_INITCWND RPC_CWNDSCALE |
79 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) | 79 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) |
80 | 80 | ||
81 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) | 81 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) |
82 | 82 | ||
83 | /** | 83 | /** |
84 | * xprt_reserve_xprt - serialize write access to transports | 84 | * xprt_reserve_xprt - serialize write access to transports |
85 | * @task: task that is requesting access to the transport | 85 | * @task: task that is requesting access to the transport |
86 | * | 86 | * |
87 | * This prevents mixing the payload of separate requests, and prevents | 87 | * This prevents mixing the payload of separate requests, and prevents |
88 | * transport connects from colliding with writes. No congestion control | 88 | * transport connects from colliding with writes. No congestion control |
89 | * is provided. | 89 | * is provided. |
90 | */ | 90 | */ |
91 | int xprt_reserve_xprt(struct rpc_task *task) | 91 | int xprt_reserve_xprt(struct rpc_task *task) |
92 | { | 92 | { |
93 | struct rpc_xprt *xprt = task->tk_xprt; | 93 | struct rpc_xprt *xprt = task->tk_xprt; |
94 | struct rpc_rqst *req = task->tk_rqstp; | 94 | struct rpc_rqst *req = task->tk_rqstp; |
95 | 95 | ||
96 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 96 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
97 | if (task == xprt->snd_task) | 97 | if (task == xprt->snd_task) |
98 | return 1; | 98 | return 1; |
99 | if (task == NULL) | 99 | if (task == NULL) |
100 | return 0; | 100 | return 0; |
101 | goto out_sleep; | 101 | goto out_sleep; |
102 | } | 102 | } |
103 | xprt->snd_task = task; | 103 | xprt->snd_task = task; |
104 | if (req) { | 104 | if (req) { |
105 | req->rq_bytes_sent = 0; | 105 | req->rq_bytes_sent = 0; |
106 | req->rq_ntrans++; | 106 | req->rq_ntrans++; |
107 | } | 107 | } |
108 | return 1; | 108 | return 1; |
109 | 109 | ||
110 | out_sleep: | 110 | out_sleep: |
111 | dprintk("RPC: %4d failed to lock transport %p\n", | 111 | dprintk("RPC: %4d failed to lock transport %p\n", |
112 | task->tk_pid, xprt); | 112 | task->tk_pid, xprt); |
113 | task->tk_timeout = 0; | 113 | task->tk_timeout = 0; |
114 | task->tk_status = -EAGAIN; | 114 | task->tk_status = -EAGAIN; |
115 | if (req && req->rq_ntrans) | 115 | if (req && req->rq_ntrans) |
116 | rpc_sleep_on(&xprt->resend, task, NULL, NULL); | 116 | rpc_sleep_on(&xprt->resend, task, NULL, NULL); |
117 | else | 117 | else |
118 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | 118 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); |
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static void xprt_clear_locked(struct rpc_xprt *xprt) | 122 | static void xprt_clear_locked(struct rpc_xprt *xprt) |
123 | { | 123 | { |
124 | xprt->snd_task = NULL; | 124 | xprt->snd_task = NULL; |
125 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { | 125 | if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) { |
126 | smp_mb__before_clear_bit(); | 126 | smp_mb__before_clear_bit(); |
127 | clear_bit(XPRT_LOCKED, &xprt->state); | 127 | clear_bit(XPRT_LOCKED, &xprt->state); |
128 | smp_mb__after_clear_bit(); | 128 | smp_mb__after_clear_bit(); |
129 | } else | 129 | } else |
130 | schedule_work(&xprt->task_cleanup); | 130 | schedule_work(&xprt->task_cleanup); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * xprt_reserve_xprt_cong - serialize write access to transports | 134 | * xprt_reserve_xprt_cong - serialize write access to transports |
135 | * @task: task that is requesting access to the transport | 135 | * @task: task that is requesting access to the transport |
136 | * | 136 | * |
137 | * Same as xprt_reserve_xprt, but Van Jacobson congestion control is | 137 | * Same as xprt_reserve_xprt, but Van Jacobson congestion control is |
138 | * integrated into the decision of whether a request is allowed to be | 138 | * integrated into the decision of whether a request is allowed to be |
139 | * woken up and given access to the transport. | 139 | * woken up and given access to the transport. |
140 | */ | 140 | */ |
141 | int xprt_reserve_xprt_cong(struct rpc_task *task) | 141 | int xprt_reserve_xprt_cong(struct rpc_task *task) |
142 | { | 142 | { |
143 | struct rpc_xprt *xprt = task->tk_xprt; | 143 | struct rpc_xprt *xprt = task->tk_xprt; |
144 | struct rpc_rqst *req = task->tk_rqstp; | 144 | struct rpc_rqst *req = task->tk_rqstp; |
145 | 145 | ||
146 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { | 146 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { |
147 | if (task == xprt->snd_task) | 147 | if (task == xprt->snd_task) |
148 | return 1; | 148 | return 1; |
149 | goto out_sleep; | 149 | goto out_sleep; |
150 | } | 150 | } |
151 | if (__xprt_get_cong(xprt, task)) { | 151 | if (__xprt_get_cong(xprt, task)) { |
152 | xprt->snd_task = task; | 152 | xprt->snd_task = task; |
153 | if (req) { | 153 | if (req) { |
154 | req->rq_bytes_sent = 0; | 154 | req->rq_bytes_sent = 0; |
155 | req->rq_ntrans++; | 155 | req->rq_ntrans++; |
156 | } | 156 | } |
157 | return 1; | 157 | return 1; |
158 | } | 158 | } |
159 | xprt_clear_locked(xprt); | 159 | xprt_clear_locked(xprt); |
160 | out_sleep: | 160 | out_sleep: |
161 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); | 161 | dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); |
162 | task->tk_timeout = 0; | 162 | task->tk_timeout = 0; |
163 | task->tk_status = -EAGAIN; | 163 | task->tk_status = -EAGAIN; |
164 | if (req && req->rq_ntrans) | 164 | if (req && req->rq_ntrans) |
165 | rpc_sleep_on(&xprt->resend, task, NULL, NULL); | 165 | rpc_sleep_on(&xprt->resend, task, NULL, NULL); |
166 | else | 166 | else |
167 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | 167 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); |
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) | 171 | static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) |
172 | { | 172 | { |
173 | int retval; | 173 | int retval; |
174 | 174 | ||
175 | spin_lock_bh(&xprt->transport_lock); | 175 | spin_lock_bh(&xprt->transport_lock); |
176 | retval = xprt->ops->reserve_xprt(task); | 176 | retval = xprt->ops->reserve_xprt(task); |
177 | spin_unlock_bh(&xprt->transport_lock); | 177 | spin_unlock_bh(&xprt->transport_lock); |
178 | return retval; | 178 | return retval; |
179 | } | 179 | } |
180 | 180 | ||
181 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) | 181 | static void __xprt_lock_write_next(struct rpc_xprt *xprt) |
182 | { | 182 | { |
183 | struct rpc_task *task; | 183 | struct rpc_task *task; |
184 | struct rpc_rqst *req; | 184 | struct rpc_rqst *req; |
185 | 185 | ||
186 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 186 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
187 | return; | 187 | return; |
188 | 188 | ||
189 | task = rpc_wake_up_next(&xprt->resend); | 189 | task = rpc_wake_up_next(&xprt->resend); |
190 | if (!task) { | 190 | if (!task) { |
191 | task = rpc_wake_up_next(&xprt->sending); | 191 | task = rpc_wake_up_next(&xprt->sending); |
192 | if (!task) | 192 | if (!task) |
193 | goto out_unlock; | 193 | goto out_unlock; |
194 | } | 194 | } |
195 | 195 | ||
196 | req = task->tk_rqstp; | 196 | req = task->tk_rqstp; |
197 | xprt->snd_task = task; | 197 | xprt->snd_task = task; |
198 | if (req) { | 198 | if (req) { |
199 | req->rq_bytes_sent = 0; | 199 | req->rq_bytes_sent = 0; |
200 | req->rq_ntrans++; | 200 | req->rq_ntrans++; |
201 | } | 201 | } |
202 | return; | 202 | return; |
203 | 203 | ||
204 | out_unlock: | 204 | out_unlock: |
205 | xprt_clear_locked(xprt); | 205 | xprt_clear_locked(xprt); |
206 | } | 206 | } |
207 | 207 | ||
208 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) | 208 | static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) |
209 | { | 209 | { |
210 | struct rpc_task *task; | 210 | struct rpc_task *task; |
211 | 211 | ||
212 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 212 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
213 | return; | 213 | return; |
214 | if (RPCXPRT_CONGESTED(xprt)) | 214 | if (RPCXPRT_CONGESTED(xprt)) |
215 | goto out_unlock; | 215 | goto out_unlock; |
216 | task = rpc_wake_up_next(&xprt->resend); | 216 | task = rpc_wake_up_next(&xprt->resend); |
217 | if (!task) { | 217 | if (!task) { |
218 | task = rpc_wake_up_next(&xprt->sending); | 218 | task = rpc_wake_up_next(&xprt->sending); |
219 | if (!task) | 219 | if (!task) |
220 | goto out_unlock; | 220 | goto out_unlock; |
221 | } | 221 | } |
222 | if (__xprt_get_cong(xprt, task)) { | 222 | if (__xprt_get_cong(xprt, task)) { |
223 | struct rpc_rqst *req = task->tk_rqstp; | 223 | struct rpc_rqst *req = task->tk_rqstp; |
224 | xprt->snd_task = task; | 224 | xprt->snd_task = task; |
225 | if (req) { | 225 | if (req) { |
226 | req->rq_bytes_sent = 0; | 226 | req->rq_bytes_sent = 0; |
227 | req->rq_ntrans++; | 227 | req->rq_ntrans++; |
228 | } | 228 | } |
229 | return; | 229 | return; |
230 | } | 230 | } |
231 | out_unlock: | 231 | out_unlock: |
232 | xprt_clear_locked(xprt); | 232 | xprt_clear_locked(xprt); |
233 | } | 233 | } |
234 | 234 | ||
235 | /** | 235 | /** |
236 | * xprt_release_xprt - allow other requests to use a transport | 236 | * xprt_release_xprt - allow other requests to use a transport |
237 | * @xprt: transport with other tasks potentially waiting | 237 | * @xprt: transport with other tasks potentially waiting |
238 | * @task: task that is releasing access to the transport | 238 | * @task: task that is releasing access to the transport |
239 | * | 239 | * |
240 | * Note that "task" can be NULL. No congestion control is provided. | 240 | * Note that "task" can be NULL. No congestion control is provided. |
241 | */ | 241 | */ |
242 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) | 242 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) |
243 | { | 243 | { |
244 | if (xprt->snd_task == task) { | 244 | if (xprt->snd_task == task) { |
245 | xprt_clear_locked(xprt); | 245 | xprt_clear_locked(xprt); |
246 | __xprt_lock_write_next(xprt); | 246 | __xprt_lock_write_next(xprt); |
247 | } | 247 | } |
248 | } | 248 | } |
249 | 249 | ||
250 | /** | 250 | /** |
251 | * xprt_release_xprt_cong - allow other requests to use a transport | 251 | * xprt_release_xprt_cong - allow other requests to use a transport |
252 | * @xprt: transport with other tasks potentially waiting | 252 | * @xprt: transport with other tasks potentially waiting |
253 | * @task: task that is releasing access to the transport | 253 | * @task: task that is releasing access to the transport |
254 | * | 254 | * |
255 | * Note that "task" can be NULL. Another task is awoken to use the | 255 | * Note that "task" can be NULL. Another task is awoken to use the |
256 | * transport if the transport's congestion window allows it. | 256 | * transport if the transport's congestion window allows it. |
257 | */ | 257 | */ |
258 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) | 258 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
259 | { | 259 | { |
260 | if (xprt->snd_task == task) { | 260 | if (xprt->snd_task == task) { |
261 | xprt_clear_locked(xprt); | 261 | xprt_clear_locked(xprt); |
262 | __xprt_lock_write_next_cong(xprt); | 262 | __xprt_lock_write_next_cong(xprt); |
263 | } | 263 | } |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) | 266 | static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) |
267 | { | 267 | { |
268 | spin_lock_bh(&xprt->transport_lock); | 268 | spin_lock_bh(&xprt->transport_lock); |
269 | xprt->ops->release_xprt(xprt, task); | 269 | xprt->ops->release_xprt(xprt, task); |
270 | spin_unlock_bh(&xprt->transport_lock); | 270 | spin_unlock_bh(&xprt->transport_lock); |
271 | } | 271 | } |
272 | 272 | ||
273 | /* | 273 | /* |
274 | * Van Jacobson congestion avoidance. Check if the congestion window | 274 | * Van Jacobson congestion avoidance. Check if the congestion window |
275 | * overflowed. Put the task to sleep if this is the case. | 275 | * overflowed. Put the task to sleep if this is the case. |
276 | */ | 276 | */ |
277 | static int | 277 | static int |
278 | __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) | 278 | __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task) |
279 | { | 279 | { |
280 | struct rpc_rqst *req = task->tk_rqstp; | 280 | struct rpc_rqst *req = task->tk_rqstp; |
281 | 281 | ||
282 | if (req->rq_cong) | 282 | if (req->rq_cong) |
283 | return 1; | 283 | return 1; |
284 | dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n", | 284 | dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n", |
285 | task->tk_pid, xprt->cong, xprt->cwnd); | 285 | task->tk_pid, xprt->cong, xprt->cwnd); |
286 | if (RPCXPRT_CONGESTED(xprt)) | 286 | if (RPCXPRT_CONGESTED(xprt)) |
287 | return 0; | 287 | return 0; |
288 | req->rq_cong = 1; | 288 | req->rq_cong = 1; |
289 | xprt->cong += RPC_CWNDSCALE; | 289 | xprt->cong += RPC_CWNDSCALE; |
290 | return 1; | 290 | return 1; |
291 | } | 291 | } |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * Adjust the congestion window, and wake up the next task | 294 | * Adjust the congestion window, and wake up the next task |
295 | * that has been sleeping due to congestion | 295 | * that has been sleeping due to congestion |
296 | */ | 296 | */ |
297 | static void | 297 | static void |
298 | __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) | 298 | __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) |
299 | { | 299 | { |
300 | if (!req->rq_cong) | 300 | if (!req->rq_cong) |
301 | return; | 301 | return; |
302 | req->rq_cong = 0; | 302 | req->rq_cong = 0; |
303 | xprt->cong -= RPC_CWNDSCALE; | 303 | xprt->cong -= RPC_CWNDSCALE; |
304 | __xprt_lock_write_next_cong(xprt); | 304 | __xprt_lock_write_next_cong(xprt); |
305 | } | 305 | } |
306 | 306 | ||
307 | /** | 307 | /** |
308 | * xprt_release_rqst_cong - housekeeping when request is complete | 308 | * xprt_release_rqst_cong - housekeeping when request is complete |
309 | * @task: RPC request that recently completed | 309 | * @task: RPC request that recently completed |
310 | * | 310 | * |
311 | * Useful for transports that require congestion control. | 311 | * Useful for transports that require congestion control. |
312 | */ | 312 | */ |
313 | void xprt_release_rqst_cong(struct rpc_task *task) | 313 | void xprt_release_rqst_cong(struct rpc_task *task) |
314 | { | 314 | { |
315 | __xprt_put_cong(task->tk_xprt, task->tk_rqstp); | 315 | __xprt_put_cong(task->tk_xprt, task->tk_rqstp); |
316 | } | 316 | } |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * xprt_adjust_cwnd - adjust transport congestion window | 319 | * xprt_adjust_cwnd - adjust transport congestion window |
320 | * @task: recently completed RPC request used to adjust window | 320 | * @task: recently completed RPC request used to adjust window |
321 | * @result: result code of completed RPC request | 321 | * @result: result code of completed RPC request |
322 | * | 322 | * |
323 | * We use a time-smoothed congestion estimator to avoid heavy oscillation. | 323 | * We use a time-smoothed congestion estimator to avoid heavy oscillation. |
324 | */ | 324 | */ |
325 | void xprt_adjust_cwnd(struct rpc_task *task, int result) | 325 | void xprt_adjust_cwnd(struct rpc_task *task, int result) |
326 | { | 326 | { |
327 | struct rpc_rqst *req = task->tk_rqstp; | 327 | struct rpc_rqst *req = task->tk_rqstp; |
328 | struct rpc_xprt *xprt = task->tk_xprt; | 328 | struct rpc_xprt *xprt = task->tk_xprt; |
329 | unsigned long cwnd = xprt->cwnd; | 329 | unsigned long cwnd = xprt->cwnd; |
330 | 330 | ||
331 | if (result >= 0 && cwnd <= xprt->cong) { | 331 | if (result >= 0 && cwnd <= xprt->cong) { |
332 | /* The (cwnd >> 1) term makes sure | 332 | /* The (cwnd >> 1) term makes sure |
333 | * the result gets rounded properly. */ | 333 | * the result gets rounded properly. */ |
334 | cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; | 334 | cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; |
335 | if (cwnd > RPC_MAXCWND(xprt)) | 335 | if (cwnd > RPC_MAXCWND(xprt)) |
336 | cwnd = RPC_MAXCWND(xprt); | 336 | cwnd = RPC_MAXCWND(xprt); |
337 | __xprt_lock_write_next_cong(xprt); | 337 | __xprt_lock_write_next_cong(xprt); |
338 | } else if (result == -ETIMEDOUT) { | 338 | } else if (result == -ETIMEDOUT) { |
339 | cwnd >>= 1; | 339 | cwnd >>= 1; |
340 | if (cwnd < RPC_CWNDSCALE) | 340 | if (cwnd < RPC_CWNDSCALE) |
341 | cwnd = RPC_CWNDSCALE; | 341 | cwnd = RPC_CWNDSCALE; |
342 | } | 342 | } |
343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", | 343 | dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", |
344 | xprt->cong, xprt->cwnd, cwnd); | 344 | xprt->cong, xprt->cwnd, cwnd); |
345 | xprt->cwnd = cwnd; | 345 | xprt->cwnd = cwnd; |
346 | __xprt_put_cong(xprt, req); | 346 | __xprt_put_cong(xprt, req); |
347 | } | 347 | } |
348 | 348 | ||
349 | /** | 349 | /** |
350 | * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue | 350 | * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue |
351 | * @xprt: transport with waiting tasks | 351 | * @xprt: transport with waiting tasks |
352 | * @status: result code to plant in each task before waking it | 352 | * @status: result code to plant in each task before waking it |
353 | * | 353 | * |
354 | */ | 354 | */ |
355 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) | 355 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) |
356 | { | 356 | { |
357 | if (status < 0) | 357 | if (status < 0) |
358 | rpc_wake_up_status(&xprt->pending, status); | 358 | rpc_wake_up_status(&xprt->pending, status); |
359 | else | 359 | else |
360 | rpc_wake_up(&xprt->pending); | 360 | rpc_wake_up(&xprt->pending); |
361 | } | 361 | } |
362 | 362 | ||
363 | /** | 363 | /** |
364 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear | 364 | * xprt_wait_for_buffer_space - wait for transport output buffer to clear |
365 | * @task: task to be put to sleep | 365 | * @task: task to be put to sleep |
366 | * | 366 | * |
367 | */ | 367 | */ |
368 | void xprt_wait_for_buffer_space(struct rpc_task *task) | 368 | void xprt_wait_for_buffer_space(struct rpc_task *task) |
369 | { | 369 | { |
370 | struct rpc_rqst *req = task->tk_rqstp; | 370 | struct rpc_rqst *req = task->tk_rqstp; |
371 | struct rpc_xprt *xprt = req->rq_xprt; | 371 | struct rpc_xprt *xprt = req->rq_xprt; |
372 | 372 | ||
373 | task->tk_timeout = req->rq_timeout; | 373 | task->tk_timeout = req->rq_timeout; |
374 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); | 374 | rpc_sleep_on(&xprt->pending, task, NULL, NULL); |
375 | } | 375 | } |
376 | 376 | ||
377 | /** | 377 | /** |
378 | * xprt_write_space - wake the task waiting for transport output buffer space | 378 | * xprt_write_space - wake the task waiting for transport output buffer space |
379 | * @xprt: transport with waiting tasks | 379 | * @xprt: transport with waiting tasks |
380 | * | 380 | * |
381 | * Can be called in a soft IRQ context, so xprt_write_space never sleeps. | 381 | * Can be called in a soft IRQ context, so xprt_write_space never sleeps. |
382 | */ | 382 | */ |
383 | void xprt_write_space(struct rpc_xprt *xprt) | 383 | void xprt_write_space(struct rpc_xprt *xprt) |
384 | { | 384 | { |
385 | if (unlikely(xprt->shutdown)) | 385 | if (unlikely(xprt->shutdown)) |
386 | return; | 386 | return; |
387 | 387 | ||
388 | spin_lock_bh(&xprt->transport_lock); | 388 | spin_lock_bh(&xprt->transport_lock); |
389 | if (xprt->snd_task) { | 389 | if (xprt->snd_task) { |
390 | dprintk("RPC: write space: waking waiting task on xprt %p\n", | 390 | dprintk("RPC: write space: waking waiting task on xprt %p\n", |
391 | xprt); | 391 | xprt); |
392 | rpc_wake_up_task(xprt->snd_task); | 392 | rpc_wake_up_task(xprt->snd_task); |
393 | } | 393 | } |
394 | spin_unlock_bh(&xprt->transport_lock); | 394 | spin_unlock_bh(&xprt->transport_lock); |
395 | } | 395 | } |
396 | 396 | ||
397 | /** | 397 | /** |
398 | * xprt_set_retrans_timeout_def - set a request's retransmit timeout | 398 | * xprt_set_retrans_timeout_def - set a request's retransmit timeout |
399 | * @task: task whose timeout is to be set | 399 | * @task: task whose timeout is to be set |
400 | * | 400 | * |
401 | * Set a request's retransmit timeout based on the transport's | 401 | * Set a request's retransmit timeout based on the transport's |
402 | * default timeout parameters. Used by transports that don't adjust | 402 | * default timeout parameters. Used by transports that don't adjust |
403 | * the retransmit timeout based on round-trip time estimation. | 403 | * the retransmit timeout based on round-trip time estimation. |
404 | */ | 404 | */ |
405 | void xprt_set_retrans_timeout_def(struct rpc_task *task) | 405 | void xprt_set_retrans_timeout_def(struct rpc_task *task) |
406 | { | 406 | { |
407 | task->tk_timeout = task->tk_rqstp->rq_timeout; | 407 | task->tk_timeout = task->tk_rqstp->rq_timeout; |
408 | } | 408 | } |
409 | 409 | ||
410 | /* | 410 | /* |
411 | * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout | 411 | * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout |
412 | * @task: task whose timeout is to be set | 412 | * @task: task whose timeout is to be set |
413 | * | 413 | * |
414 | * Set a request's retransmit timeout using the RTT estimator. | 414 | * Set a request's retransmit timeout using the RTT estimator. |
415 | */ | 415 | */ |
416 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task) | 416 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task) |
417 | { | 417 | { |
418 | int timer = task->tk_msg.rpc_proc->p_timer; | 418 | int timer = task->tk_msg.rpc_proc->p_timer; |
419 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; | 419 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; |
420 | struct rpc_rqst *req = task->tk_rqstp; | 420 | struct rpc_rqst *req = task->tk_rqstp; |
421 | unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; | 421 | unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; |
422 | 422 | ||
423 | task->tk_timeout = rpc_calc_rto(rtt, timer); | 423 | task->tk_timeout = rpc_calc_rto(rtt, timer); |
424 | task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; | 424 | task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; |
425 | if (task->tk_timeout > max_timeout || task->tk_timeout == 0) | 425 | if (task->tk_timeout > max_timeout || task->tk_timeout == 0) |
426 | task->tk_timeout = max_timeout; | 426 | task->tk_timeout = max_timeout; |
427 | } | 427 | } |
428 | 428 | ||
429 | static void xprt_reset_majortimeo(struct rpc_rqst *req) | 429 | static void xprt_reset_majortimeo(struct rpc_rqst *req) |
430 | { | 430 | { |
431 | struct rpc_timeout *to = &req->rq_xprt->timeout; | 431 | struct rpc_timeout *to = &req->rq_xprt->timeout; |
432 | 432 | ||
433 | req->rq_majortimeo = req->rq_timeout; | 433 | req->rq_majortimeo = req->rq_timeout; |
434 | if (to->to_exponential) | 434 | if (to->to_exponential) |
435 | req->rq_majortimeo <<= to->to_retries; | 435 | req->rq_majortimeo <<= to->to_retries; |
436 | else | 436 | else |
437 | req->rq_majortimeo += to->to_increment * to->to_retries; | 437 | req->rq_majortimeo += to->to_increment * to->to_retries; |
438 | if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) | 438 | if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) |
439 | req->rq_majortimeo = to->to_maxval; | 439 | req->rq_majortimeo = to->to_maxval; |
440 | req->rq_majortimeo += jiffies; | 440 | req->rq_majortimeo += jiffies; |
441 | } | 441 | } |
442 | 442 | ||
443 | /** | 443 | /** |
444 | * xprt_adjust_timeout - adjust timeout values for next retransmit | 444 | * xprt_adjust_timeout - adjust timeout values for next retransmit |
445 | * @req: RPC request containing parameters to use for the adjustment | 445 | * @req: RPC request containing parameters to use for the adjustment |
446 | * | 446 | * |
447 | */ | 447 | */ |
448 | int xprt_adjust_timeout(struct rpc_rqst *req) | 448 | int xprt_adjust_timeout(struct rpc_rqst *req) |
449 | { | 449 | { |
450 | struct rpc_xprt *xprt = req->rq_xprt; | 450 | struct rpc_xprt *xprt = req->rq_xprt; |
451 | struct rpc_timeout *to = &xprt->timeout; | 451 | struct rpc_timeout *to = &xprt->timeout; |
452 | int status = 0; | 452 | int status = 0; |
453 | 453 | ||
454 | if (time_before(jiffies, req->rq_majortimeo)) { | 454 | if (time_before(jiffies, req->rq_majortimeo)) { |
455 | if (to->to_exponential) | 455 | if (to->to_exponential) |
456 | req->rq_timeout <<= 1; | 456 | req->rq_timeout <<= 1; |
457 | else | 457 | else |
458 | req->rq_timeout += to->to_increment; | 458 | req->rq_timeout += to->to_increment; |
459 | if (to->to_maxval && req->rq_timeout >= to->to_maxval) | 459 | if (to->to_maxval && req->rq_timeout >= to->to_maxval) |
460 | req->rq_timeout = to->to_maxval; | 460 | req->rq_timeout = to->to_maxval; |
461 | req->rq_retries++; | 461 | req->rq_retries++; |
462 | pprintk("RPC: %lu retrans\n", jiffies); | 462 | pprintk("RPC: %lu retrans\n", jiffies); |
463 | } else { | 463 | } else { |
464 | req->rq_timeout = to->to_initval; | 464 | req->rq_timeout = to->to_initval; |
465 | req->rq_retries = 0; | 465 | req->rq_retries = 0; |
466 | xprt_reset_majortimeo(req); | 466 | xprt_reset_majortimeo(req); |
467 | /* Reset the RTT counters == "slow start" */ | 467 | /* Reset the RTT counters == "slow start" */ |
468 | spin_lock_bh(&xprt->transport_lock); | 468 | spin_lock_bh(&xprt->transport_lock); |
469 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 469 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
470 | spin_unlock_bh(&xprt->transport_lock); | 470 | spin_unlock_bh(&xprt->transport_lock); |
471 | pprintk("RPC: %lu timeout\n", jiffies); | 471 | pprintk("RPC: %lu timeout\n", jiffies); |
472 | status = -ETIMEDOUT; | 472 | status = -ETIMEDOUT; |
473 | } | 473 | } |
474 | 474 | ||
475 | if (req->rq_timeout == 0) { | 475 | if (req->rq_timeout == 0) { |
476 | printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); | 476 | printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n"); |
477 | req->rq_timeout = 5 * HZ; | 477 | req->rq_timeout = 5 * HZ; |
478 | } | 478 | } |
479 | return status; | 479 | return status; |
480 | } | 480 | } |
481 | 481 | ||
482 | static void xprt_autoclose(void *args) | 482 | static void xprt_autoclose(void *args) |
483 | { | 483 | { |
484 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 484 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; |
485 | 485 | ||
486 | xprt_disconnect(xprt); | 486 | xprt_disconnect(xprt); |
487 | xprt->ops->close(xprt); | 487 | xprt->ops->close(xprt); |
488 | xprt_release_write(xprt, NULL); | 488 | xprt_release_write(xprt, NULL); |
489 | } | 489 | } |
490 | 490 | ||
491 | /** | 491 | /** |
492 | * xprt_disconnect - mark a transport as disconnected | 492 | * xprt_disconnect - mark a transport as disconnected |
493 | * @xprt: transport to flag for disconnect | 493 | * @xprt: transport to flag for disconnect |
494 | * | 494 | * |
495 | */ | 495 | */ |
496 | void xprt_disconnect(struct rpc_xprt *xprt) | 496 | void xprt_disconnect(struct rpc_xprt *xprt) |
497 | { | 497 | { |
498 | dprintk("RPC: disconnected transport %p\n", xprt); | 498 | dprintk("RPC: disconnected transport %p\n", xprt); |
499 | spin_lock_bh(&xprt->transport_lock); | 499 | spin_lock_bh(&xprt->transport_lock); |
500 | xprt_clear_connected(xprt); | 500 | xprt_clear_connected(xprt); |
501 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | 501 | xprt_wake_pending_tasks(xprt, -ENOTCONN); |
502 | spin_unlock_bh(&xprt->transport_lock); | 502 | spin_unlock_bh(&xprt->transport_lock); |
503 | } | 503 | } |
504 | 504 | ||
505 | static void | 505 | static void |
506 | xprt_init_autodisconnect(unsigned long data) | 506 | xprt_init_autodisconnect(unsigned long data) |
507 | { | 507 | { |
508 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; | 508 | struct rpc_xprt *xprt = (struct rpc_xprt *)data; |
509 | 509 | ||
510 | spin_lock(&xprt->transport_lock); | 510 | spin_lock(&xprt->transport_lock); |
511 | if (!list_empty(&xprt->recv) || xprt->shutdown) | 511 | if (!list_empty(&xprt->recv) || xprt->shutdown) |
512 | goto out_abort; | 512 | goto out_abort; |
513 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) | 513 | if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) |
514 | goto out_abort; | 514 | goto out_abort; |
515 | spin_unlock(&xprt->transport_lock); | 515 | spin_unlock(&xprt->transport_lock); |
516 | if (xprt_connecting(xprt)) | 516 | if (xprt_connecting(xprt)) |
517 | xprt_release_write(xprt, NULL); | 517 | xprt_release_write(xprt, NULL); |
518 | else | 518 | else |
519 | schedule_work(&xprt->task_cleanup); | 519 | schedule_work(&xprt->task_cleanup); |
520 | return; | 520 | return; |
521 | out_abort: | 521 | out_abort: |
522 | spin_unlock(&xprt->transport_lock); | 522 | spin_unlock(&xprt->transport_lock); |
523 | } | 523 | } |
524 | 524 | ||
525 | /** | 525 | /** |
526 | * xprt_connect - schedule a transport connect operation | 526 | * xprt_connect - schedule a transport connect operation |
527 | * @task: RPC task that is requesting the connect | 527 | * @task: RPC task that is requesting the connect |
528 | * | 528 | * |
529 | */ | 529 | */ |
530 | void xprt_connect(struct rpc_task *task) | 530 | void xprt_connect(struct rpc_task *task) |
531 | { | 531 | { |
532 | struct rpc_xprt *xprt = task->tk_xprt; | 532 | struct rpc_xprt *xprt = task->tk_xprt; |
533 | 533 | ||
534 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, | 534 | dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid, |
535 | xprt, (xprt_connected(xprt) ? "is" : "is not")); | 535 | xprt, (xprt_connected(xprt) ? "is" : "is not")); |
536 | 536 | ||
537 | if (!xprt->addr.sin_port) { | 537 | if (!xprt->addr.sin_port) { |
538 | task->tk_status = -EIO; | 538 | task->tk_status = -EIO; |
539 | return; | 539 | return; |
540 | } | 540 | } |
541 | if (!xprt_lock_write(xprt, task)) | 541 | if (!xprt_lock_write(xprt, task)) |
542 | return; | 542 | return; |
543 | if (xprt_connected(xprt)) | 543 | if (xprt_connected(xprt)) |
544 | xprt_release_write(xprt, task); | 544 | xprt_release_write(xprt, task); |
545 | else { | 545 | else { |
546 | if (task->tk_rqstp) | 546 | if (task->tk_rqstp) |
547 | task->tk_rqstp->rq_bytes_sent = 0; | 547 | task->tk_rqstp->rq_bytes_sent = 0; |
548 | 548 | ||
549 | task->tk_timeout = xprt->connect_timeout; | 549 | task->tk_timeout = xprt->connect_timeout; |
550 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); | 550 | rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); |
551 | xprt->stat.connect_start = jiffies; | 551 | xprt->stat.connect_start = jiffies; |
552 | xprt->ops->connect(task); | 552 | xprt->ops->connect(task); |
553 | } | 553 | } |
554 | return; | 554 | return; |
555 | } | 555 | } |
556 | 556 | ||
557 | static void xprt_connect_status(struct rpc_task *task) | 557 | static void xprt_connect_status(struct rpc_task *task) |
558 | { | 558 | { |
559 | struct rpc_xprt *xprt = task->tk_xprt; | 559 | struct rpc_xprt *xprt = task->tk_xprt; |
560 | 560 | ||
561 | if (task->tk_status >= 0) { | 561 | if (task->tk_status >= 0) { |
562 | xprt->stat.connect_count++; | 562 | xprt->stat.connect_count++; |
563 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; | 563 | xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start; |
564 | dprintk("RPC: %4d xprt_connect_status: connection established\n", | 564 | dprintk("RPC: %4d xprt_connect_status: connection established\n", |
565 | task->tk_pid); | 565 | task->tk_pid); |
566 | return; | 566 | return; |
567 | } | 567 | } |
568 | 568 | ||
569 | switch (task->tk_status) { | 569 | switch (task->tk_status) { |
570 | case -ECONNREFUSED: | 570 | case -ECONNREFUSED: |
571 | case -ECONNRESET: | 571 | case -ECONNRESET: |
572 | dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", | 572 | dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", |
573 | task->tk_pid, task->tk_client->cl_server); | 573 | task->tk_pid, task->tk_client->cl_server); |
574 | break; | 574 | break; |
575 | case -ENOTCONN: | 575 | case -ENOTCONN: |
576 | dprintk("RPC: %4d xprt_connect_status: connection broken\n", | 576 | dprintk("RPC: %4d xprt_connect_status: connection broken\n", |
577 | task->tk_pid); | 577 | task->tk_pid); |
578 | break; | 578 | break; |
579 | case -ETIMEDOUT: | 579 | case -ETIMEDOUT: |
580 | dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", | 580 | dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", |
581 | task->tk_pid); | 581 | task->tk_pid); |
582 | break; | 582 | break; |
583 | default: | 583 | default: |
584 | dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", | 584 | dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", |
585 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); | 585 | task->tk_pid, -task->tk_status, task->tk_client->cl_server); |
586 | xprt_release_write(xprt, task); | 586 | xprt_release_write(xprt, task); |
587 | task->tk_status = -EIO; | 587 | task->tk_status = -EIO; |
588 | return; | 588 | return; |
589 | } | 589 | } |
590 | 590 | ||
591 | /* if soft mounted, just cause this RPC to fail */ | 591 | /* if soft mounted, just cause this RPC to fail */ |
592 | if (RPC_IS_SOFT(task)) { | 592 | if (RPC_IS_SOFT(task)) { |
593 | xprt_release_write(xprt, task); | 593 | xprt_release_write(xprt, task); |
594 | task->tk_status = -EIO; | 594 | task->tk_status = -EIO; |
595 | } | 595 | } |
596 | } | 596 | } |
597 | 597 | ||
598 | /** | 598 | /** |
599 | * xprt_lookup_rqst - find an RPC request corresponding to an XID | 599 | * xprt_lookup_rqst - find an RPC request corresponding to an XID |
600 | * @xprt: transport on which the original request was transmitted | 600 | * @xprt: transport on which the original request was transmitted |
601 | * @xid: RPC XID of incoming reply | 601 | * @xid: RPC XID of incoming reply |
602 | * | 602 | * |
603 | */ | 603 | */ |
604 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) | 604 | struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) |
605 | { | 605 | { |
606 | struct list_head *pos; | 606 | struct list_head *pos; |
607 | 607 | ||
608 | list_for_each(pos, &xprt->recv) { | 608 | list_for_each(pos, &xprt->recv) { |
609 | struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); | 609 | struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); |
610 | if (entry->rq_xid == xid) | 610 | if (entry->rq_xid == xid) |
611 | return entry; | 611 | return entry; |
612 | } | 612 | } |
613 | xprt->stat.bad_xids++; | 613 | xprt->stat.bad_xids++; |
614 | return NULL; | 614 | return NULL; |
615 | } | 615 | } |
616 | 616 | ||
617 | /** | 617 | /** |
618 | * xprt_update_rtt - update an RPC client's RTT state after receiving a reply | 618 | * xprt_update_rtt - update an RPC client's RTT state after receiving a reply |
619 | * @task: RPC request that recently completed | 619 | * @task: RPC request that recently completed |
620 | * | 620 | * |
621 | */ | 621 | */ |
622 | void xprt_update_rtt(struct rpc_task *task) | 622 | void xprt_update_rtt(struct rpc_task *task) |
623 | { | 623 | { |
624 | struct rpc_rqst *req = task->tk_rqstp; | 624 | struct rpc_rqst *req = task->tk_rqstp; |
625 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; | 625 | struct rpc_rtt *rtt = task->tk_client->cl_rtt; |
626 | unsigned timer = task->tk_msg.rpc_proc->p_timer; | 626 | unsigned timer = task->tk_msg.rpc_proc->p_timer; |
627 | 627 | ||
628 | if (timer) { | 628 | if (timer) { |
629 | if (req->rq_ntrans == 1) | 629 | if (req->rq_ntrans == 1) |
630 | rpc_update_rtt(rtt, timer, | 630 | rpc_update_rtt(rtt, timer, |
631 | (long)jiffies - req->rq_xtime); | 631 | (long)jiffies - req->rq_xtime); |
632 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); | 632 | rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); |
633 | } | 633 | } |
634 | } | 634 | } |
635 | 635 | ||
636 | /** | 636 | /** |
637 | * xprt_complete_rqst - called when reply processing is complete | 637 | * xprt_complete_rqst - called when reply processing is complete |
638 | * @task: RPC request that recently completed | 638 | * @task: RPC request that recently completed |
639 | * @copied: actual number of bytes received from the transport | 639 | * @copied: actual number of bytes received from the transport |
640 | * | 640 | * |
641 | * Caller holds transport lock. | 641 | * Caller holds transport lock. |
642 | */ | 642 | */ |
643 | void xprt_complete_rqst(struct rpc_task *task, int copied) | 643 | void xprt_complete_rqst(struct rpc_task *task, int copied) |
644 | { | 644 | { |
645 | struct rpc_rqst *req = task->tk_rqstp; | 645 | struct rpc_rqst *req = task->tk_rqstp; |
646 | 646 | ||
647 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", | 647 | dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", |
648 | task->tk_pid, ntohl(req->rq_xid), copied); | 648 | task->tk_pid, ntohl(req->rq_xid), copied); |
649 | 649 | ||
650 | task->tk_xprt->stat.recvs++; | 650 | task->tk_xprt->stat.recvs++; |
651 | task->tk_rtt = (long)jiffies - req->rq_xtime; | 651 | task->tk_rtt = (long)jiffies - req->rq_xtime; |
652 | 652 | ||
653 | list_del_init(&req->rq_list); | 653 | list_del_init(&req->rq_list); |
654 | /* Ensure all writes are done before we update req->rq_received */ | ||
655 | smp_wmb(); | ||
654 | req->rq_received = req->rq_private_buf.len = copied; | 656 | req->rq_received = req->rq_private_buf.len = copied; |
655 | rpc_wake_up_task(task); | 657 | rpc_wake_up_task(task); |
656 | } | 658 | } |
657 | 659 | ||
658 | static void xprt_timer(struct rpc_task *task) | 660 | static void xprt_timer(struct rpc_task *task) |
659 | { | 661 | { |
660 | struct rpc_rqst *req = task->tk_rqstp; | 662 | struct rpc_rqst *req = task->tk_rqstp; |
661 | struct rpc_xprt *xprt = req->rq_xprt; | 663 | struct rpc_xprt *xprt = req->rq_xprt; |
662 | 664 | ||
663 | dprintk("RPC: %4d xprt_timer\n", task->tk_pid); | 665 | dprintk("RPC: %4d xprt_timer\n", task->tk_pid); |
664 | 666 | ||
665 | spin_lock(&xprt->transport_lock); | 667 | spin_lock(&xprt->transport_lock); |
666 | if (!req->rq_received) { | 668 | if (!req->rq_received) { |
667 | if (xprt->ops->timer) | 669 | if (xprt->ops->timer) |
668 | xprt->ops->timer(task); | 670 | xprt->ops->timer(task); |
669 | task->tk_status = -ETIMEDOUT; | 671 | task->tk_status = -ETIMEDOUT; |
670 | } | 672 | } |
671 | task->tk_timeout = 0; | 673 | task->tk_timeout = 0; |
672 | rpc_wake_up_task(task); | 674 | rpc_wake_up_task(task); |
673 | spin_unlock(&xprt->transport_lock); | 675 | spin_unlock(&xprt->transport_lock); |
674 | } | 676 | } |
675 | 677 | ||
676 | /** | 678 | /** |
677 | * xprt_prepare_transmit - reserve the transport before sending a request | 679 | * xprt_prepare_transmit - reserve the transport before sending a request |
678 | * @task: RPC task about to send a request | 680 | * @task: RPC task about to send a request |
679 | * | 681 | * |
680 | */ | 682 | */ |
681 | int xprt_prepare_transmit(struct rpc_task *task) | 683 | int xprt_prepare_transmit(struct rpc_task *task) |
682 | { | 684 | { |
683 | struct rpc_rqst *req = task->tk_rqstp; | 685 | struct rpc_rqst *req = task->tk_rqstp; |
684 | struct rpc_xprt *xprt = req->rq_xprt; | 686 | struct rpc_xprt *xprt = req->rq_xprt; |
685 | int err = 0; | 687 | int err = 0; |
686 | 688 | ||
687 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); | 689 | dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid); |
688 | 690 | ||
689 | spin_lock_bh(&xprt->transport_lock); | 691 | spin_lock_bh(&xprt->transport_lock); |
690 | if (req->rq_received && !req->rq_bytes_sent) { | 692 | if (req->rq_received && !req->rq_bytes_sent) { |
691 | err = req->rq_received; | 693 | err = req->rq_received; |
692 | goto out_unlock; | 694 | goto out_unlock; |
693 | } | 695 | } |
694 | if (!xprt->ops->reserve_xprt(task)) { | 696 | if (!xprt->ops->reserve_xprt(task)) { |
695 | err = -EAGAIN; | 697 | err = -EAGAIN; |
696 | goto out_unlock; | 698 | goto out_unlock; |
697 | } | 699 | } |
698 | 700 | ||
699 | if (!xprt_connected(xprt)) { | 701 | if (!xprt_connected(xprt)) { |
700 | err = -ENOTCONN; | 702 | err = -ENOTCONN; |
701 | goto out_unlock; | 703 | goto out_unlock; |
702 | } | 704 | } |
703 | out_unlock: | 705 | out_unlock: |
704 | spin_unlock_bh(&xprt->transport_lock); | 706 | spin_unlock_bh(&xprt->transport_lock); |
705 | return err; | 707 | return err; |
706 | } | 708 | } |
707 | 709 | ||
708 | void | 710 | void |
709 | xprt_abort_transmit(struct rpc_task *task) | 711 | xprt_abort_transmit(struct rpc_task *task) |
710 | { | 712 | { |
711 | struct rpc_xprt *xprt = task->tk_xprt; | 713 | struct rpc_xprt *xprt = task->tk_xprt; |
712 | 714 | ||
713 | xprt_release_write(xprt, task); | 715 | xprt_release_write(xprt, task); |
714 | } | 716 | } |
715 | 717 | ||
716 | /** | 718 | /** |
717 | * xprt_transmit - send an RPC request on a transport | 719 | * xprt_transmit - send an RPC request on a transport |
718 | * @task: controlling RPC task | 720 | * @task: controlling RPC task |
719 | * | 721 | * |
720 | * We have to copy the iovec because sendmsg fiddles with its contents. | 722 | * We have to copy the iovec because sendmsg fiddles with its contents. |
721 | */ | 723 | */ |
722 | void xprt_transmit(struct rpc_task *task) | 724 | void xprt_transmit(struct rpc_task *task) |
723 | { | 725 | { |
724 | struct rpc_rqst *req = task->tk_rqstp; | 726 | struct rpc_rqst *req = task->tk_rqstp; |
725 | struct rpc_xprt *xprt = req->rq_xprt; | 727 | struct rpc_xprt *xprt = req->rq_xprt; |
726 | int status; | 728 | int status; |
727 | 729 | ||
728 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 730 | dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
729 | 731 | ||
730 | smp_rmb(); | ||
731 | if (!req->rq_received) { | 732 | if (!req->rq_received) { |
732 | if (list_empty(&req->rq_list)) { | 733 | if (list_empty(&req->rq_list)) { |
733 | spin_lock_bh(&xprt->transport_lock); | 734 | spin_lock_bh(&xprt->transport_lock); |
734 | /* Update the softirq receive buffer */ | 735 | /* Update the softirq receive buffer */ |
735 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 736 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
736 | sizeof(req->rq_private_buf)); | 737 | sizeof(req->rq_private_buf)); |
737 | /* Add request to the receive list */ | 738 | /* Add request to the receive list */ |
738 | list_add_tail(&req->rq_list, &xprt->recv); | 739 | list_add_tail(&req->rq_list, &xprt->recv); |
739 | spin_unlock_bh(&xprt->transport_lock); | 740 | spin_unlock_bh(&xprt->transport_lock); |
740 | xprt_reset_majortimeo(req); | 741 | xprt_reset_majortimeo(req); |
741 | /* Turn off autodisconnect */ | 742 | /* Turn off autodisconnect */ |
742 | del_singleshot_timer_sync(&xprt->timer); | 743 | del_singleshot_timer_sync(&xprt->timer); |
743 | } | 744 | } |
744 | } else if (!req->rq_bytes_sent) | 745 | } else if (!req->rq_bytes_sent) |
745 | return; | 746 | return; |
746 | 747 | ||
747 | status = xprt->ops->send_request(task); | 748 | status = xprt->ops->send_request(task); |
748 | if (status == 0) { | 749 | if (status == 0) { |
749 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); | 750 | dprintk("RPC: %4d xmit complete\n", task->tk_pid); |
750 | spin_lock_bh(&xprt->transport_lock); | 751 | spin_lock_bh(&xprt->transport_lock); |
751 | 752 | ||
752 | xprt->ops->set_retrans_timeout(task); | 753 | xprt->ops->set_retrans_timeout(task); |
753 | 754 | ||
754 | xprt->stat.sends++; | 755 | xprt->stat.sends++; |
755 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; | 756 | xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs; |
756 | xprt->stat.bklog_u += xprt->backlog.qlen; | 757 | xprt->stat.bklog_u += xprt->backlog.qlen; |
757 | 758 | ||
758 | /* Don't race with disconnect */ | 759 | /* Don't race with disconnect */ |
759 | if (!xprt_connected(xprt)) | 760 | if (!xprt_connected(xprt)) |
760 | task->tk_status = -ENOTCONN; | 761 | task->tk_status = -ENOTCONN; |
761 | else if (!req->rq_received) | 762 | else if (!req->rq_received) |
762 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); | 763 | rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); |
763 | 764 | ||
764 | xprt->ops->release_xprt(xprt, task); | 765 | xprt->ops->release_xprt(xprt, task); |
765 | spin_unlock_bh(&xprt->transport_lock); | 766 | spin_unlock_bh(&xprt->transport_lock); |
766 | return; | 767 | return; |
767 | } | 768 | } |
768 | 769 | ||
769 | /* Note: at this point, task->tk_sleeping has not yet been set, | 770 | /* Note: at this point, task->tk_sleeping has not yet been set, |
770 | * hence there is no danger of the waking up task being put on | 771 | * hence there is no danger of the waking up task being put on |
771 | * schedq, and being picked up by a parallel run of rpciod(). | 772 | * schedq, and being picked up by a parallel run of rpciod(). |
772 | */ | 773 | */ |
773 | task->tk_status = status; | 774 | task->tk_status = status; |
774 | 775 | ||
775 | switch (status) { | 776 | switch (status) { |
776 | case -ECONNREFUSED: | 777 | case -ECONNREFUSED: |
777 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); | 778 | rpc_sleep_on(&xprt->sending, task, NULL, NULL); |
778 | case -EAGAIN: | 779 | case -EAGAIN: |
779 | case -ENOTCONN: | 780 | case -ENOTCONN: |
780 | return; | 781 | return; |
781 | default: | 782 | default: |
782 | break; | 783 | break; |
783 | } | 784 | } |
784 | xprt_release_write(xprt, task); | 785 | xprt_release_write(xprt, task); |
785 | return; | 786 | return; |
786 | } | 787 | } |
787 | 788 | ||
788 | static inline void do_xprt_reserve(struct rpc_task *task) | 789 | static inline void do_xprt_reserve(struct rpc_task *task) |
789 | { | 790 | { |
790 | struct rpc_xprt *xprt = task->tk_xprt; | 791 | struct rpc_xprt *xprt = task->tk_xprt; |
791 | 792 | ||
792 | task->tk_status = 0; | 793 | task->tk_status = 0; |
793 | if (task->tk_rqstp) | 794 | if (task->tk_rqstp) |
794 | return; | 795 | return; |
795 | if (!list_empty(&xprt->free)) { | 796 | if (!list_empty(&xprt->free)) { |
796 | struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); | 797 | struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); |
797 | list_del_init(&req->rq_list); | 798 | list_del_init(&req->rq_list); |
798 | task->tk_rqstp = req; | 799 | task->tk_rqstp = req; |
799 | xprt_request_init(task, xprt); | 800 | xprt_request_init(task, xprt); |
800 | return; | 801 | return; |
801 | } | 802 | } |
802 | dprintk("RPC: waiting for request slot\n"); | 803 | dprintk("RPC: waiting for request slot\n"); |
803 | task->tk_status = -EAGAIN; | 804 | task->tk_status = -EAGAIN; |
804 | task->tk_timeout = 0; | 805 | task->tk_timeout = 0; |
805 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); | 806 | rpc_sleep_on(&xprt->backlog, task, NULL, NULL); |
806 | } | 807 | } |
807 | 808 | ||
808 | /** | 809 | /** |
809 | * xprt_reserve - allocate an RPC request slot | 810 | * xprt_reserve - allocate an RPC request slot |
810 | * @task: RPC task requesting a slot allocation | 811 | * @task: RPC task requesting a slot allocation |
811 | * | 812 | * |
812 | * If no more slots are available, place the task on the transport's | 813 | * If no more slots are available, place the task on the transport's |
813 | * backlog queue. | 814 | * backlog queue. |
814 | */ | 815 | */ |
815 | void xprt_reserve(struct rpc_task *task) | 816 | void xprt_reserve(struct rpc_task *task) |
816 | { | 817 | { |
817 | struct rpc_xprt *xprt = task->tk_xprt; | 818 | struct rpc_xprt *xprt = task->tk_xprt; |
818 | 819 | ||
819 | task->tk_status = -EIO; | 820 | task->tk_status = -EIO; |
820 | spin_lock(&xprt->reserve_lock); | 821 | spin_lock(&xprt->reserve_lock); |
821 | do_xprt_reserve(task); | 822 | do_xprt_reserve(task); |
822 | spin_unlock(&xprt->reserve_lock); | 823 | spin_unlock(&xprt->reserve_lock); |
823 | } | 824 | } |
824 | 825 | ||
825 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) | 826 | static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) |
826 | { | 827 | { |
827 | return xprt->xid++; | 828 | return xprt->xid++; |
828 | } | 829 | } |
829 | 830 | ||
830 | static inline void xprt_init_xid(struct rpc_xprt *xprt) | 831 | static inline void xprt_init_xid(struct rpc_xprt *xprt) |
831 | { | 832 | { |
832 | get_random_bytes(&xprt->xid, sizeof(xprt->xid)); | 833 | get_random_bytes(&xprt->xid, sizeof(xprt->xid)); |
833 | } | 834 | } |
834 | 835 | ||
835 | static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | 836 | static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) |
836 | { | 837 | { |
837 | struct rpc_rqst *req = task->tk_rqstp; | 838 | struct rpc_rqst *req = task->tk_rqstp; |
838 | 839 | ||
839 | req->rq_timeout = xprt->timeout.to_initval; | 840 | req->rq_timeout = xprt->timeout.to_initval; |
840 | req->rq_task = task; | 841 | req->rq_task = task; |
841 | req->rq_xprt = xprt; | 842 | req->rq_xprt = xprt; |
842 | req->rq_buffer = NULL; | 843 | req->rq_buffer = NULL; |
843 | req->rq_bufsize = 0; | 844 | req->rq_bufsize = 0; |
844 | req->rq_xid = xprt_alloc_xid(xprt); | 845 | req->rq_xid = xprt_alloc_xid(xprt); |
845 | req->rq_release_snd_buf = NULL; | 846 | req->rq_release_snd_buf = NULL; |
846 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, | 847 | dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, |
847 | req, ntohl(req->rq_xid)); | 848 | req, ntohl(req->rq_xid)); |
848 | } | 849 | } |
849 | 850 | ||
850 | /** | 851 | /** |
851 | * xprt_release - release an RPC request slot | 852 | * xprt_release - release an RPC request slot |
852 | * @task: task which is finished with the slot | 853 | * @task: task which is finished with the slot |
853 | * | 854 | * |
854 | */ | 855 | */ |
855 | void xprt_release(struct rpc_task *task) | 856 | void xprt_release(struct rpc_task *task) |
856 | { | 857 | { |
857 | struct rpc_xprt *xprt = task->tk_xprt; | 858 | struct rpc_xprt *xprt = task->tk_xprt; |
858 | struct rpc_rqst *req; | 859 | struct rpc_rqst *req; |
859 | 860 | ||
860 | if (!(req = task->tk_rqstp)) | 861 | if (!(req = task->tk_rqstp)) |
861 | return; | 862 | return; |
862 | rpc_count_iostats(task); | 863 | rpc_count_iostats(task); |
863 | spin_lock_bh(&xprt->transport_lock); | 864 | spin_lock_bh(&xprt->transport_lock); |
864 | xprt->ops->release_xprt(xprt, task); | 865 | xprt->ops->release_xprt(xprt, task); |
865 | if (xprt->ops->release_request) | 866 | if (xprt->ops->release_request) |
866 | xprt->ops->release_request(task); | 867 | xprt->ops->release_request(task); |
867 | if (!list_empty(&req->rq_list)) | 868 | if (!list_empty(&req->rq_list)) |
868 | list_del(&req->rq_list); | 869 | list_del(&req->rq_list); |
869 | xprt->last_used = jiffies; | 870 | xprt->last_used = jiffies; |
870 | if (list_empty(&xprt->recv)) | 871 | if (list_empty(&xprt->recv)) |
871 | mod_timer(&xprt->timer, | 872 | mod_timer(&xprt->timer, |
872 | xprt->last_used + xprt->idle_timeout); | 873 | xprt->last_used + xprt->idle_timeout); |
873 | spin_unlock_bh(&xprt->transport_lock); | 874 | spin_unlock_bh(&xprt->transport_lock); |
874 | xprt->ops->buf_free(task); | 875 | xprt->ops->buf_free(task); |
875 | task->tk_rqstp = NULL; | 876 | task->tk_rqstp = NULL; |
876 | if (req->rq_release_snd_buf) | 877 | if (req->rq_release_snd_buf) |
877 | req->rq_release_snd_buf(req); | 878 | req->rq_release_snd_buf(req); |
878 | memset(req, 0, sizeof(*req)); /* mark unused */ | 879 | memset(req, 0, sizeof(*req)); /* mark unused */ |
879 | 880 | ||
880 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); | 881 | dprintk("RPC: %4d release request %p\n", task->tk_pid, req); |
881 | 882 | ||
882 | spin_lock(&xprt->reserve_lock); | 883 | spin_lock(&xprt->reserve_lock); |
883 | list_add(&req->rq_list, &xprt->free); | 884 | list_add(&req->rq_list, &xprt->free); |
884 | rpc_wake_up_next(&xprt->backlog); | 885 | rpc_wake_up_next(&xprt->backlog); |
885 | spin_unlock(&xprt->reserve_lock); | 886 | spin_unlock(&xprt->reserve_lock); |
886 | } | 887 | } |
887 | 888 | ||
888 | /** | 889 | /** |
889 | * xprt_set_timeout - set constant RPC timeout | 890 | * xprt_set_timeout - set constant RPC timeout |
890 | * @to: RPC timeout parameters to set up | 891 | * @to: RPC timeout parameters to set up |
891 | * @retr: number of retries | 892 | * @retr: number of retries |
892 | * @incr: amount of increase after each retry | 893 | * @incr: amount of increase after each retry |
893 | * | 894 | * |
894 | */ | 895 | */ |
895 | void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) | 896 | void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) |
896 | { | 897 | { |
897 | to->to_initval = | 898 | to->to_initval = |
898 | to->to_increment = incr; | 899 | to->to_increment = incr; |
899 | to->to_maxval = to->to_initval + (incr * retr); | 900 | to->to_maxval = to->to_initval + (incr * retr); |
900 | to->to_retries = retr; | 901 | to->to_retries = retr; |
901 | to->to_exponential = 0; | 902 | to->to_exponential = 0; |
902 | } | 903 | } |
903 | 904 | ||
904 | static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) | 905 | static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) |
905 | { | 906 | { |
906 | int result; | 907 | int result; |
907 | struct rpc_xprt *xprt; | 908 | struct rpc_xprt *xprt; |
908 | struct rpc_rqst *req; | 909 | struct rpc_rqst *req; |
909 | 910 | ||
910 | if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) | 911 | if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) |
911 | return ERR_PTR(-ENOMEM); | 912 | return ERR_PTR(-ENOMEM); |
912 | memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ | 913 | memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ |
913 | 914 | ||
914 | xprt->addr = *ap; | 915 | xprt->addr = *ap; |
915 | 916 | ||
916 | switch (proto) { | 917 | switch (proto) { |
917 | case IPPROTO_UDP: | 918 | case IPPROTO_UDP: |
918 | result = xs_setup_udp(xprt, to); | 919 | result = xs_setup_udp(xprt, to); |
919 | break; | 920 | break; |
920 | case IPPROTO_TCP: | 921 | case IPPROTO_TCP: |
921 | result = xs_setup_tcp(xprt, to); | 922 | result = xs_setup_tcp(xprt, to); |
922 | break; | 923 | break; |
923 | default: | 924 | default: |
924 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | 925 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", |
925 | proto); | 926 | proto); |
926 | result = -EIO; | 927 | result = -EIO; |
927 | break; | 928 | break; |
928 | } | 929 | } |
929 | if (result) { | 930 | if (result) { |
930 | kfree(xprt); | 931 | kfree(xprt); |
931 | return ERR_PTR(result); | 932 | return ERR_PTR(result); |
932 | } | 933 | } |
933 | 934 | ||
934 | spin_lock_init(&xprt->transport_lock); | 935 | spin_lock_init(&xprt->transport_lock); |
935 | spin_lock_init(&xprt->reserve_lock); | 936 | spin_lock_init(&xprt->reserve_lock); |
936 | 937 | ||
937 | INIT_LIST_HEAD(&xprt->free); | 938 | INIT_LIST_HEAD(&xprt->free); |
938 | INIT_LIST_HEAD(&xprt->recv); | 939 | INIT_LIST_HEAD(&xprt->recv); |
939 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); | 940 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); |
940 | init_timer(&xprt->timer); | 941 | init_timer(&xprt->timer); |
941 | xprt->timer.function = xprt_init_autodisconnect; | 942 | xprt->timer.function = xprt_init_autodisconnect; |
942 | xprt->timer.data = (unsigned long) xprt; | 943 | xprt->timer.data = (unsigned long) xprt; |
943 | xprt->last_used = jiffies; | 944 | xprt->last_used = jiffies; |
944 | xprt->cwnd = RPC_INITCWND; | 945 | xprt->cwnd = RPC_INITCWND; |
945 | 946 | ||
946 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); | 947 | rpc_init_wait_queue(&xprt->pending, "xprt_pending"); |
947 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); | 948 | rpc_init_wait_queue(&xprt->sending, "xprt_sending"); |
948 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); | 949 | rpc_init_wait_queue(&xprt->resend, "xprt_resend"); |
949 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); | 950 | rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); |
950 | 951 | ||
951 | /* initialize free list */ | 952 | /* initialize free list */ |
952 | for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) | 953 | for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) |
953 | list_add(&req->rq_list, &xprt->free); | 954 | list_add(&req->rq_list, &xprt->free); |
954 | 955 | ||
955 | xprt_init_xid(xprt); | 956 | xprt_init_xid(xprt); |
956 | 957 | ||
957 | dprintk("RPC: created transport %p with %u slots\n", xprt, | 958 | dprintk("RPC: created transport %p with %u slots\n", xprt, |
958 | xprt->max_reqs); | 959 | xprt->max_reqs); |
959 | 960 | ||
960 | return xprt; | 961 | return xprt; |
961 | } | 962 | } |
962 | 963 | ||
963 | /** | 964 | /** |
964 | * xprt_create_proto - create an RPC client transport | 965 | * xprt_create_proto - create an RPC client transport |
965 | * @proto: requested transport protocol | 966 | * @proto: requested transport protocol |
966 | * @sap: remote peer's address | 967 | * @sap: remote peer's address |
967 | * @to: timeout parameters for new transport | 968 | * @to: timeout parameters for new transport |
968 | * | 969 | * |
969 | */ | 970 | */ |
970 | struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) | 971 | struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) |
971 | { | 972 | { |
972 | struct rpc_xprt *xprt; | 973 | struct rpc_xprt *xprt; |
973 | 974 | ||
974 | xprt = xprt_setup(proto, sap, to); | 975 | xprt = xprt_setup(proto, sap, to); |
975 | if (IS_ERR(xprt)) | 976 | if (IS_ERR(xprt)) |
976 | dprintk("RPC: xprt_create_proto failed\n"); | 977 | dprintk("RPC: xprt_create_proto failed\n"); |
977 | else | 978 | else |
978 | dprintk("RPC: xprt_create_proto created xprt %p\n", xprt); | 979 | dprintk("RPC: xprt_create_proto created xprt %p\n", xprt); |
979 | return xprt; | 980 | return xprt; |
980 | } | 981 | } |
981 | 982 | ||
982 | /** | 983 | /** |
983 | * xprt_destroy - destroy an RPC transport, killing off all requests. | 984 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
984 | * @xprt: transport to destroy | 985 | * @xprt: transport to destroy |
985 | * | 986 | * |
986 | */ | 987 | */ |
987 | int xprt_destroy(struct rpc_xprt *xprt) | 988 | int xprt_destroy(struct rpc_xprt *xprt) |
988 | { | 989 | { |
989 | dprintk("RPC: destroying transport %p\n", xprt); | 990 | dprintk("RPC: destroying transport %p\n", xprt); |
990 | xprt->shutdown = 1; | 991 | xprt->shutdown = 1; |
991 | del_timer_sync(&xprt->timer); | 992 | del_timer_sync(&xprt->timer); |
992 | xprt->ops->destroy(xprt); | 993 | xprt->ops->destroy(xprt); |
993 | kfree(xprt); | 994 | kfree(xprt); |
994 | 995 | ||
995 | return 0; | 996 | return 0; |
996 | } | 997 | } |