Commit 5e1533c7880bb0df98f71fa683979ec296aa947d

Authored by Stanislav Kinsbursky
Committed by J. Bruce Fields
1 parent 08d44a35a9

NFSd: make nfsd4_manager allocated per network namespace context.

Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>

Showing 2 changed files with 21 additions and 13 deletions Inline Diff

1 /* 1 /*
2 * per net namespace data structures for nfsd 2 * per net namespace data structures for nfsd
3 * 3 *
4 * Copyright (C) 2012, Jeff Layton <jlayton@redhat.com> 4 * Copyright (C) 2012, Jeff Layton <jlayton@redhat.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option) 8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version. 9 * any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details. 14 * more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License along with 16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * this program; if not, write to the Free Software Foundation, Inc., 51
18 * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */ 19 */
20 20
21 #ifndef __NFSD_NETNS_H__ 21 #ifndef __NFSD_NETNS_H__
22 #define __NFSD_NETNS_H__ 22 #define __NFSD_NETNS_H__
23 23
24 #include <net/net_namespace.h> 24 #include <net/net_namespace.h>
25 #include <net/netns/generic.h> 25 #include <net/netns/generic.h>
26 26
27 struct cld_net; 27 struct cld_net;
28 28
29 struct nfsd_net { 29 struct nfsd_net {
30 struct cld_net *cld_net; 30 struct cld_net *cld_net;
31 31
32 struct cache_detail *svc_expkey_cache; 32 struct cache_detail *svc_expkey_cache;
33 struct cache_detail *svc_export_cache; 33 struct cache_detail *svc_export_cache;
34 34
35 struct cache_detail *idtoname_cache; 35 struct cache_detail *idtoname_cache;
36 struct cache_detail *nametoid_cache; 36 struct cache_detail *nametoid_cache;
37
38 struct lock_manager nfsd4_manager;
37 }; 39 };
38 40
39 extern int nfsd_net_id; 41 extern int nfsd_net_id;
40 #endif /* __NFSD_NETNS_H__ */ 42 #endif /* __NFSD_NETNS_H__ */
41 43
1 /* 1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan. 2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * Kendrick Smith <kmsmith@umich.edu> 5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu> 6 * Andy Adamson <kandros@umich.edu>
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 11 *
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its 17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived 18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission. 19 * from this software without specific prior written permission.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/file.h> 35 #include <linux/file.h>
36 #include <linux/fs.h> 36 #include <linux/fs.h>
37 #include <linux/slab.h> 37 #include <linux/slab.h>
38 #include <linux/namei.h> 38 #include <linux/namei.h>
39 #include <linux/swap.h> 39 #include <linux/swap.h>
40 #include <linux/pagemap.h> 40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h> 41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h> 42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/clnt.h> 43 #include <linux/sunrpc/clnt.h>
44 #include "xdr4.h" 44 #include "xdr4.h"
45 #include "vfs.h" 45 #include "vfs.h"
46 #include "current_stateid.h" 46 #include "current_stateid.h"
47 #include "fault_inject.h" 47 #include "fault_inject.h"
48 48
49 #include "netns.h"
50
49 #define NFSDDBG_FACILITY NFSDDBG_PROC 51 #define NFSDDBG_FACILITY NFSDDBG_PROC
50 52
51 /* Globals */ 53 /* Globals */
52 time_t nfsd4_lease = 90; /* default lease time */ 54 time_t nfsd4_lease = 90; /* default lease time */
53 time_t nfsd4_grace = 90; 55 time_t nfsd4_grace = 90;
54 static time_t boot_time; 56 static time_t boot_time;
55 57
56 #define all_ones {{~0,~0},~0} 58 #define all_ones {{~0,~0},~0}
57 static const stateid_t one_stateid = { 59 static const stateid_t one_stateid = {
58 .si_generation = ~0, 60 .si_generation = ~0,
59 .si_opaque = all_ones, 61 .si_opaque = all_ones,
60 }; 62 };
61 static const stateid_t zero_stateid = { 63 static const stateid_t zero_stateid = {
62 /* all fields zero */ 64 /* all fields zero */
63 }; 65 };
64 static const stateid_t currentstateid = { 66 static const stateid_t currentstateid = {
65 .si_generation = 1, 67 .si_generation = 1,
66 }; 68 };
67 69
68 static u64 current_sessionid = 1; 70 static u64 current_sessionid = 1;
69 71
70 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
71 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 73 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
72 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) 74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
73 75
74 /* forward declarations */ 76 /* forward declarations */
75 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); 77 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
76 78
77 /* Locking: */ 79 /* Locking: */
78 80
79 /* Currently used for almost all code touching nfsv4 state: */ 81 /* Currently used for almost all code touching nfsv4 state: */
80 static DEFINE_MUTEX(client_mutex); 82 static DEFINE_MUTEX(client_mutex);
81 83
82 /* 84 /*
83 * Currently used for the del_recall_lru and file hash table. In an 85 * Currently used for the del_recall_lru and file hash table. In an
84 * effort to decrease the scope of the client_mutex, this spinlock may 86 * effort to decrease the scope of the client_mutex, this spinlock may
85 * eventually cover more: 87 * eventually cover more:
86 */ 88 */
87 static DEFINE_SPINLOCK(recall_lock); 89 static DEFINE_SPINLOCK(recall_lock);
88 90
89 static struct kmem_cache *openowner_slab = NULL; 91 static struct kmem_cache *openowner_slab = NULL;
90 static struct kmem_cache *lockowner_slab = NULL; 92 static struct kmem_cache *lockowner_slab = NULL;
91 static struct kmem_cache *file_slab = NULL; 93 static struct kmem_cache *file_slab = NULL;
92 static struct kmem_cache *stateid_slab = NULL; 94 static struct kmem_cache *stateid_slab = NULL;
93 static struct kmem_cache *deleg_slab = NULL; 95 static struct kmem_cache *deleg_slab = NULL;
94 96
95 void 97 void
96 nfs4_lock_state(void) 98 nfs4_lock_state(void)
97 { 99 {
98 mutex_lock(&client_mutex); 100 mutex_lock(&client_mutex);
99 } 101 }
100 102
101 static void free_session(struct kref *); 103 static void free_session(struct kref *);
102 104
103 /* Must be called under the client_lock */ 105 /* Must be called under the client_lock */
104 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 106 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
105 { 107 {
106 kref_put(&ses->se_ref, free_session); 108 kref_put(&ses->se_ref, free_session);
107 } 109 }
108 110
109 static void nfsd4_get_session(struct nfsd4_session *ses) 111 static void nfsd4_get_session(struct nfsd4_session *ses)
110 { 112 {
111 kref_get(&ses->se_ref); 113 kref_get(&ses->se_ref);
112 } 114 }
113 115
114 void 116 void
115 nfs4_unlock_state(void) 117 nfs4_unlock_state(void)
116 { 118 {
117 mutex_unlock(&client_mutex); 119 mutex_unlock(&client_mutex);
118 } 120 }
119 121
120 static inline u32 122 static inline u32
121 opaque_hashval(const void *ptr, int nbytes) 123 opaque_hashval(const void *ptr, int nbytes)
122 { 124 {
123 unsigned char *cptr = (unsigned char *) ptr; 125 unsigned char *cptr = (unsigned char *) ptr;
124 126
125 u32 x = 0; 127 u32 x = 0;
126 while (nbytes--) { 128 while (nbytes--) {
127 x *= 37; 129 x *= 37;
128 x += *cptr++; 130 x += *cptr++;
129 } 131 }
130 return x; 132 return x;
131 } 133 }
132 134
133 static struct list_head del_recall_lru; 135 static struct list_head del_recall_lru;
134 136
135 static void nfsd4_free_file(struct nfs4_file *f) 137 static void nfsd4_free_file(struct nfs4_file *f)
136 { 138 {
137 kmem_cache_free(file_slab, f); 139 kmem_cache_free(file_slab, f);
138 } 140 }
139 141
140 static inline void 142 static inline void
141 put_nfs4_file(struct nfs4_file *fi) 143 put_nfs4_file(struct nfs4_file *fi)
142 { 144 {
143 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { 145 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
144 list_del(&fi->fi_hash); 146 list_del(&fi->fi_hash);
145 spin_unlock(&recall_lock); 147 spin_unlock(&recall_lock);
146 iput(fi->fi_inode); 148 iput(fi->fi_inode);
147 nfsd4_free_file(fi); 149 nfsd4_free_file(fi);
148 } 150 }
149 } 151 }
150 152
151 static inline void 153 static inline void
152 get_nfs4_file(struct nfs4_file *fi) 154 get_nfs4_file(struct nfs4_file *fi)
153 { 155 {
154 atomic_inc(&fi->fi_ref); 156 atomic_inc(&fi->fi_ref);
155 } 157 }
156 158
157 static int num_delegations; 159 static int num_delegations;
158 unsigned int max_delegations; 160 unsigned int max_delegations;
159 161
160 /* 162 /*
161 * Open owner state (share locks) 163 * Open owner state (share locks)
162 */ 164 */
163 165
164 /* hash tables for lock and open owners */ 166 /* hash tables for lock and open owners */
165 #define OWNER_HASH_BITS 8 167 #define OWNER_HASH_BITS 8
166 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 168 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
167 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 169 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
168 170
169 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) 171 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
170 { 172 {
171 unsigned int ret; 173 unsigned int ret;
172 174
173 ret = opaque_hashval(ownername->data, ownername->len); 175 ret = opaque_hashval(ownername->data, ownername->len);
174 ret += clientid; 176 ret += clientid;
175 return ret & OWNER_HASH_MASK; 177 return ret & OWNER_HASH_MASK;
176 } 178 }
177 179
178 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE]; 180 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
179 181
180 /* hash table for nfs4_file */ 182 /* hash table for nfs4_file */
181 #define FILE_HASH_BITS 8 183 #define FILE_HASH_BITS 8
182 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 184 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
183 185
184 static unsigned int file_hashval(struct inode *ino) 186 static unsigned int file_hashval(struct inode *ino)
185 { 187 {
186 /* XXX: why are we hashing on inode pointer, anyway? */ 188 /* XXX: why are we hashing on inode pointer, anyway? */
187 return hash_ptr(ino, FILE_HASH_BITS); 189 return hash_ptr(ino, FILE_HASH_BITS);
188 } 190 }
189 191
190 static struct list_head file_hashtbl[FILE_HASH_SIZE]; 192 static struct list_head file_hashtbl[FILE_HASH_SIZE];
191 193
192 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 194 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
193 { 195 {
194 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); 196 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
195 atomic_inc(&fp->fi_access[oflag]); 197 atomic_inc(&fp->fi_access[oflag]);
196 } 198 }
197 199
198 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) 200 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
199 { 201 {
200 if (oflag == O_RDWR) { 202 if (oflag == O_RDWR) {
201 __nfs4_file_get_access(fp, O_RDONLY); 203 __nfs4_file_get_access(fp, O_RDONLY);
202 __nfs4_file_get_access(fp, O_WRONLY); 204 __nfs4_file_get_access(fp, O_WRONLY);
203 } else 205 } else
204 __nfs4_file_get_access(fp, oflag); 206 __nfs4_file_get_access(fp, oflag);
205 } 207 }
206 208
207 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) 209 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
208 { 210 {
209 if (fp->fi_fds[oflag]) { 211 if (fp->fi_fds[oflag]) {
210 fput(fp->fi_fds[oflag]); 212 fput(fp->fi_fds[oflag]);
211 fp->fi_fds[oflag] = NULL; 213 fp->fi_fds[oflag] = NULL;
212 } 214 }
213 } 215 }
214 216
215 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 217 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
216 { 218 {
217 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 219 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
218 nfs4_file_put_fd(fp, oflag); 220 nfs4_file_put_fd(fp, oflag);
219 /* 221 /*
220 * It's also safe to get rid of the RDWR open *if* 222 * It's also safe to get rid of the RDWR open *if*
221 * we no longer have need of the other kind of access 223 * we no longer have need of the other kind of access
222 * or if we already have the other kind of open: 224 * or if we already have the other kind of open:
223 */ 225 */
224 if (fp->fi_fds[1-oflag] 226 if (fp->fi_fds[1-oflag]
225 || atomic_read(&fp->fi_access[1 - oflag]) == 0) 227 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
226 nfs4_file_put_fd(fp, O_RDWR); 228 nfs4_file_put_fd(fp, O_RDWR);
227 } 229 }
228 } 230 }
229 231
230 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) 232 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
231 { 233 {
232 if (oflag == O_RDWR) { 234 if (oflag == O_RDWR) {
233 __nfs4_file_put_access(fp, O_RDONLY); 235 __nfs4_file_put_access(fp, O_RDONLY);
234 __nfs4_file_put_access(fp, O_WRONLY); 236 __nfs4_file_put_access(fp, O_WRONLY);
235 } else 237 } else
236 __nfs4_file_put_access(fp, oflag); 238 __nfs4_file_put_access(fp, oflag);
237 } 239 }
238 240
239 static inline int get_new_stid(struct nfs4_stid *stid) 241 static inline int get_new_stid(struct nfs4_stid *stid)
240 { 242 {
241 static int min_stateid = 0; 243 static int min_stateid = 0;
242 struct idr *stateids = &stid->sc_client->cl_stateids; 244 struct idr *stateids = &stid->sc_client->cl_stateids;
243 int new_stid; 245 int new_stid;
244 int error; 246 int error;
245 247
246 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); 248 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
247 /* 249 /*
248 * Note: the necessary preallocation was done in 250 * Note: the necessary preallocation was done in
249 * nfs4_alloc_stateid(). The idr code caps the number of 251 * nfs4_alloc_stateid(). The idr code caps the number of
250 * preallocations that can exist at a time, but the state lock 252 * preallocations that can exist at a time, but the state lock
251 * prevents anyone from using ours before we get here: 253 * prevents anyone from using ours before we get here:
252 */ 254 */
253 BUG_ON(error); 255 BUG_ON(error);
254 /* 256 /*
255 * It shouldn't be a problem to reuse an opaque stateid value. 257 * It shouldn't be a problem to reuse an opaque stateid value.
256 * I don't think it is for 4.1. But with 4.0 I worry that, for 258 * I don't think it is for 4.1. But with 4.0 I worry that, for
257 * example, a stray write retransmission could be accepted by 259 * example, a stray write retransmission could be accepted by
258 * the server when it should have been rejected. Therefore, 260 * the server when it should have been rejected. Therefore,
259 * adopt a trick from the sctp code to attempt to maximize the 261 * adopt a trick from the sctp code to attempt to maximize the
260 * amount of time until an id is reused, by ensuring they always 262 * amount of time until an id is reused, by ensuring they always
261 * "increase" (mod INT_MAX): 263 * "increase" (mod INT_MAX):
262 */ 264 */
263 265
264 min_stateid = new_stid+1; 266 min_stateid = new_stid+1;
265 if (min_stateid == INT_MAX) 267 if (min_stateid == INT_MAX)
266 min_stateid = 0; 268 min_stateid = 0;
267 return new_stid; 269 return new_stid;
268 } 270 }
269 271
270 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type) 272 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
271 { 273 {
272 stateid_t *s = &stid->sc_stateid; 274 stateid_t *s = &stid->sc_stateid;
273 int new_id; 275 int new_id;
274 276
275 stid->sc_type = type; 277 stid->sc_type = type;
276 stid->sc_client = cl; 278 stid->sc_client = cl;
277 s->si_opaque.so_clid = cl->cl_clientid; 279 s->si_opaque.so_clid = cl->cl_clientid;
278 new_id = get_new_stid(stid); 280 new_id = get_new_stid(stid);
279 s->si_opaque.so_id = (u32)new_id; 281 s->si_opaque.so_id = (u32)new_id;
280 /* Will be incremented before return to client: */ 282 /* Will be incremented before return to client: */
281 s->si_generation = 0; 283 s->si_generation = 0;
282 } 284 }
283 285
284 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) 286 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
285 { 287 {
286 struct idr *stateids = &cl->cl_stateids; 288 struct idr *stateids = &cl->cl_stateids;
287 289
288 if (!idr_pre_get(stateids, GFP_KERNEL)) 290 if (!idr_pre_get(stateids, GFP_KERNEL))
289 return NULL; 291 return NULL;
290 /* 292 /*
291 * Note: if we fail here (or any time between now and the time 293 * Note: if we fail here (or any time between now and the time
292 * we actually get the new idr), we won't need to undo the idr 294 * we actually get the new idr), we won't need to undo the idr
293 * preallocation, since the idr code caps the number of 295 * preallocation, since the idr code caps the number of
294 * preallocated entries. 296 * preallocated entries.
295 */ 297 */
296 return kmem_cache_alloc(slab, GFP_KERNEL); 298 return kmem_cache_alloc(slab, GFP_KERNEL);
297 } 299 }
298 300
299 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) 301 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
300 { 302 {
301 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); 303 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
302 } 304 }
303 305
304 static struct nfs4_delegation * 306 static struct nfs4_delegation *
305 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type) 307 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
306 { 308 {
307 struct nfs4_delegation *dp; 309 struct nfs4_delegation *dp;
308 struct nfs4_file *fp = stp->st_file; 310 struct nfs4_file *fp = stp->st_file;
309 311
310 dprintk("NFSD alloc_init_deleg\n"); 312 dprintk("NFSD alloc_init_deleg\n");
311 /* 313 /*
312 * Major work on the lease subsystem (for example, to support 314 * Major work on the lease subsystem (for example, to support
313 * calbacks on stat) will be required before we can support 315 * calbacks on stat) will be required before we can support
314 * write delegations properly. 316 * write delegations properly.
315 */ 317 */
316 if (type != NFS4_OPEN_DELEGATE_READ) 318 if (type != NFS4_OPEN_DELEGATE_READ)
317 return NULL; 319 return NULL;
318 if (fp->fi_had_conflict) 320 if (fp->fi_had_conflict)
319 return NULL; 321 return NULL;
320 if (num_delegations > max_delegations) 322 if (num_delegations > max_delegations)
321 return NULL; 323 return NULL;
322 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 324 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
323 if (dp == NULL) 325 if (dp == NULL)
324 return dp; 326 return dp;
325 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID); 327 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
326 /* 328 /*
327 * delegation seqid's are never incremented. The 4.1 special 329 * delegation seqid's are never incremented. The 4.1 special
328 * meaning of seqid 0 isn't meaningful, really, but let's avoid 330 * meaning of seqid 0 isn't meaningful, really, but let's avoid
329 * 0 anyway just for consistency and use 1: 331 * 0 anyway just for consistency and use 1:
330 */ 332 */
331 dp->dl_stid.sc_stateid.si_generation = 1; 333 dp->dl_stid.sc_stateid.si_generation = 1;
332 num_delegations++; 334 num_delegations++;
333 INIT_LIST_HEAD(&dp->dl_perfile); 335 INIT_LIST_HEAD(&dp->dl_perfile);
334 INIT_LIST_HEAD(&dp->dl_perclnt); 336 INIT_LIST_HEAD(&dp->dl_perclnt);
335 INIT_LIST_HEAD(&dp->dl_recall_lru); 337 INIT_LIST_HEAD(&dp->dl_recall_lru);
336 get_nfs4_file(fp); 338 get_nfs4_file(fp);
337 dp->dl_file = fp; 339 dp->dl_file = fp;
338 dp->dl_type = type; 340 dp->dl_type = type;
339 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); 341 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
340 dp->dl_time = 0; 342 dp->dl_time = 0;
341 atomic_set(&dp->dl_count, 1); 343 atomic_set(&dp->dl_count, 1);
342 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); 344 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
343 return dp; 345 return dp;
344 } 346 }
345 347
346 void 348 void
347 nfs4_put_delegation(struct nfs4_delegation *dp) 349 nfs4_put_delegation(struct nfs4_delegation *dp)
348 { 350 {
349 if (atomic_dec_and_test(&dp->dl_count)) { 351 if (atomic_dec_and_test(&dp->dl_count)) {
350 dprintk("NFSD: freeing dp %p\n",dp); 352 dprintk("NFSD: freeing dp %p\n",dp);
351 put_nfs4_file(dp->dl_file); 353 put_nfs4_file(dp->dl_file);
352 kmem_cache_free(deleg_slab, dp); 354 kmem_cache_free(deleg_slab, dp);
353 num_delegations--; 355 num_delegations--;
354 } 356 }
355 } 357 }
356 358
357 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 359 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
358 { 360 {
359 if (atomic_dec_and_test(&fp->fi_delegees)) { 361 if (atomic_dec_and_test(&fp->fi_delegees)) {
360 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 362 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
361 fp->fi_lease = NULL; 363 fp->fi_lease = NULL;
362 fput(fp->fi_deleg_file); 364 fput(fp->fi_deleg_file);
363 fp->fi_deleg_file = NULL; 365 fp->fi_deleg_file = NULL;
364 } 366 }
365 } 367 }
366 368
367 static void unhash_stid(struct nfs4_stid *s) 369 static void unhash_stid(struct nfs4_stid *s)
368 { 370 {
369 struct idr *stateids = &s->sc_client->cl_stateids; 371 struct idr *stateids = &s->sc_client->cl_stateids;
370 372
371 idr_remove(stateids, s->sc_stateid.si_opaque.so_id); 373 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
372 } 374 }
373 375
374 /* Called under the state lock. */ 376 /* Called under the state lock. */
375 static void 377 static void
376 unhash_delegation(struct nfs4_delegation *dp) 378 unhash_delegation(struct nfs4_delegation *dp)
377 { 379 {
378 unhash_stid(&dp->dl_stid); 380 unhash_stid(&dp->dl_stid);
379 list_del_init(&dp->dl_perclnt); 381 list_del_init(&dp->dl_perclnt);
380 spin_lock(&recall_lock); 382 spin_lock(&recall_lock);
381 list_del_init(&dp->dl_perfile); 383 list_del_init(&dp->dl_perfile);
382 list_del_init(&dp->dl_recall_lru); 384 list_del_init(&dp->dl_recall_lru);
383 spin_unlock(&recall_lock); 385 spin_unlock(&recall_lock);
384 nfs4_put_deleg_lease(dp->dl_file); 386 nfs4_put_deleg_lease(dp->dl_file);
385 nfs4_put_delegation(dp); 387 nfs4_put_delegation(dp);
386 } 388 }
387 389
388 /* 390 /*
389 * SETCLIENTID state 391 * SETCLIENTID state
390 */ 392 */
391 393
392 /* client_lock protects the client lru list and session hash table */ 394 /* client_lock protects the client lru list and session hash table */
393 static DEFINE_SPINLOCK(client_lock); 395 static DEFINE_SPINLOCK(client_lock);
394 396
395 /* Hash tables for nfs4_clientid state */ 397 /* Hash tables for nfs4_clientid state */
396 #define CLIENT_HASH_BITS 4 398 #define CLIENT_HASH_BITS 4
397 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS) 399 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
398 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1) 400 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
399 401
400 static unsigned int clientid_hashval(u32 id) 402 static unsigned int clientid_hashval(u32 id)
401 { 403 {
402 return id & CLIENT_HASH_MASK; 404 return id & CLIENT_HASH_MASK;
403 } 405 }
404 406
405 static unsigned int clientstr_hashval(const char *name) 407 static unsigned int clientstr_hashval(const char *name)
406 { 408 {
407 return opaque_hashval(name, 8) & CLIENT_HASH_MASK; 409 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
408 } 410 }
409 411
410 /* 412 /*
411 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot 413 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
412 * used in reboot/reset lease grace period processing 414 * used in reboot/reset lease grace period processing
413 * 415 *
414 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed 416 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
415 * setclientid_confirmed info. 417 * setclientid_confirmed info.
416 * 418 *
417 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 419 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
418 * setclientid info. 420 * setclientid info.
419 * 421 *
420 * client_lru holds client queue ordered by nfs4_client.cl_time 422 * client_lru holds client queue ordered by nfs4_client.cl_time
421 * for lease renewal. 423 * for lease renewal.
422 * 424 *
423 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time 425 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
424 * for last close replay. 426 * for last close replay.
425 */ 427 */
426 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE]; 428 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
427 static int reclaim_str_hashtbl_size = 0; 429 static int reclaim_str_hashtbl_size = 0;
428 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE]; 430 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
429 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE]; 431 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
430 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE]; 432 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
431 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; 433 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
432 static struct list_head client_lru; 434 static struct list_head client_lru;
433 static struct list_head close_lru; 435 static struct list_head close_lru;
434 436
435 /* 437 /*
436 * We store the NONE, READ, WRITE, and BOTH bits separately in the 438 * We store the NONE, READ, WRITE, and BOTH bits separately in the
437 * st_{access,deny}_bmap field of the stateid, in order to track not 439 * st_{access,deny}_bmap field of the stateid, in order to track not
438 * only what share bits are currently in force, but also what 440 * only what share bits are currently in force, but also what
439 * combinations of share bits previous opens have used. This allows us 441 * combinations of share bits previous opens have used. This allows us
440 * to enforce the recommendation of rfc 3530 14.2.19 that the server 442 * to enforce the recommendation of rfc 3530 14.2.19 that the server
441 * return an error if the client attempt to downgrade to a combination 443 * return an error if the client attempt to downgrade to a combination
442 * of share bits not explicable by closing some of its previous opens. 444 * of share bits not explicable by closing some of its previous opens.
443 * 445 *
444 * XXX: This enforcement is actually incomplete, since we don't keep 446 * XXX: This enforcement is actually incomplete, since we don't keep
445 * track of access/deny bit combinations; so, e.g., we allow: 447 * track of access/deny bit combinations; so, e.g., we allow:
446 * 448 *
447 * OPEN allow read, deny write 449 * OPEN allow read, deny write
448 * OPEN allow both, deny none 450 * OPEN allow both, deny none
449 * DOWNGRADE allow read, deny none 451 * DOWNGRADE allow read, deny none
450 * 452 *
451 * which we should reject. 453 * which we should reject.
452 */ 454 */
453 static unsigned int 455 static unsigned int
454 bmap_to_share_mode(unsigned long bmap) { 456 bmap_to_share_mode(unsigned long bmap) {
455 int i; 457 int i;
456 unsigned int access = 0; 458 unsigned int access = 0;
457 459
458 for (i = 1; i < 4; i++) { 460 for (i = 1; i < 4; i++) {
459 if (test_bit(i, &bmap)) 461 if (test_bit(i, &bmap))
460 access |= i; 462 access |= i;
461 } 463 }
462 return access; 464 return access;
463 } 465 }
464 466
465 static bool 467 static bool
466 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { 468 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
467 unsigned int access, deny; 469 unsigned int access, deny;
468 470
469 access = bmap_to_share_mode(stp->st_access_bmap); 471 access = bmap_to_share_mode(stp->st_access_bmap);
470 deny = bmap_to_share_mode(stp->st_deny_bmap); 472 deny = bmap_to_share_mode(stp->st_deny_bmap);
471 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 473 if ((access & open->op_share_deny) || (deny & open->op_share_access))
472 return false; 474 return false;
473 return true; 475 return true;
474 } 476 }
475 477
476 /* set share access for a given stateid */ 478 /* set share access for a given stateid */
477 static inline void 479 static inline void
478 set_access(u32 access, struct nfs4_ol_stateid *stp) 480 set_access(u32 access, struct nfs4_ol_stateid *stp)
479 { 481 {
480 __set_bit(access, &stp->st_access_bmap); 482 __set_bit(access, &stp->st_access_bmap);
481 } 483 }
482 484
483 /* clear share access for a given stateid */ 485 /* clear share access for a given stateid */
484 static inline void 486 static inline void
485 clear_access(u32 access, struct nfs4_ol_stateid *stp) 487 clear_access(u32 access, struct nfs4_ol_stateid *stp)
486 { 488 {
487 __clear_bit(access, &stp->st_access_bmap); 489 __clear_bit(access, &stp->st_access_bmap);
488 } 490 }
489 491
490 /* test whether a given stateid has access */ 492 /* test whether a given stateid has access */
491 static inline bool 493 static inline bool
492 test_access(u32 access, struct nfs4_ol_stateid *stp) 494 test_access(u32 access, struct nfs4_ol_stateid *stp)
493 { 495 {
494 return test_bit(access, &stp->st_access_bmap); 496 return test_bit(access, &stp->st_access_bmap);
495 } 497 }
496 498
497 /* set share deny for a given stateid */ 499 /* set share deny for a given stateid */
498 static inline void 500 static inline void
499 set_deny(u32 access, struct nfs4_ol_stateid *stp) 501 set_deny(u32 access, struct nfs4_ol_stateid *stp)
500 { 502 {
501 __set_bit(access, &stp->st_deny_bmap); 503 __set_bit(access, &stp->st_deny_bmap);
502 } 504 }
503 505
504 /* clear share deny for a given stateid */ 506 /* clear share deny for a given stateid */
505 static inline void 507 static inline void
506 clear_deny(u32 access, struct nfs4_ol_stateid *stp) 508 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
507 { 509 {
508 __clear_bit(access, &stp->st_deny_bmap); 510 __clear_bit(access, &stp->st_deny_bmap);
509 } 511 }
510 512
511 /* test whether a given stateid is denying specific access */ 513 /* test whether a given stateid is denying specific access */
512 static inline bool 514 static inline bool
513 test_deny(u32 access, struct nfs4_ol_stateid *stp) 515 test_deny(u32 access, struct nfs4_ol_stateid *stp)
514 { 516 {
515 return test_bit(access, &stp->st_deny_bmap); 517 return test_bit(access, &stp->st_deny_bmap);
516 } 518 }
517 519
518 static int nfs4_access_to_omode(u32 access) 520 static int nfs4_access_to_omode(u32 access)
519 { 521 {
520 switch (access & NFS4_SHARE_ACCESS_BOTH) { 522 switch (access & NFS4_SHARE_ACCESS_BOTH) {
521 case NFS4_SHARE_ACCESS_READ: 523 case NFS4_SHARE_ACCESS_READ:
522 return O_RDONLY; 524 return O_RDONLY;
523 case NFS4_SHARE_ACCESS_WRITE: 525 case NFS4_SHARE_ACCESS_WRITE:
524 return O_WRONLY; 526 return O_WRONLY;
525 case NFS4_SHARE_ACCESS_BOTH: 527 case NFS4_SHARE_ACCESS_BOTH:
526 return O_RDWR; 528 return O_RDWR;
527 } 529 }
528 BUG(); 530 BUG();
529 } 531 }
530 532
531 /* release all access and file references for a given stateid */ 533 /* release all access and file references for a given stateid */
532 static void 534 static void
533 release_all_access(struct nfs4_ol_stateid *stp) 535 release_all_access(struct nfs4_ol_stateid *stp)
534 { 536 {
535 int i; 537 int i;
536 538
537 for (i = 1; i < 4; i++) { 539 for (i = 1; i < 4; i++) {
538 if (test_access(i, stp)) 540 if (test_access(i, stp))
539 nfs4_file_put_access(stp->st_file, 541 nfs4_file_put_access(stp->st_file,
540 nfs4_access_to_omode(i)); 542 nfs4_access_to_omode(i));
541 clear_access(i, stp); 543 clear_access(i, stp);
542 } 544 }
543 } 545 }
544 546
545 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) 547 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
546 { 548 {
547 list_del(&stp->st_perfile); 549 list_del(&stp->st_perfile);
548 list_del(&stp->st_perstateowner); 550 list_del(&stp->st_perstateowner);
549 } 551 }
550 552
551 static void close_generic_stateid(struct nfs4_ol_stateid *stp) 553 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
552 { 554 {
553 release_all_access(stp); 555 release_all_access(stp);
554 put_nfs4_file(stp->st_file); 556 put_nfs4_file(stp->st_file);
555 stp->st_file = NULL; 557 stp->st_file = NULL;
556 } 558 }
557 559
558 static void free_generic_stateid(struct nfs4_ol_stateid *stp) 560 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
559 { 561 {
560 kmem_cache_free(stateid_slab, stp); 562 kmem_cache_free(stateid_slab, stp);
561 } 563 }
562 564
563 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 565 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
564 { 566 {
565 struct file *file; 567 struct file *file;
566 568
567 unhash_generic_stateid(stp); 569 unhash_generic_stateid(stp);
568 unhash_stid(&stp->st_stid); 570 unhash_stid(&stp->st_stid);
569 file = find_any_file(stp->st_file); 571 file = find_any_file(stp->st_file);
570 if (file) 572 if (file)
571 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner)); 573 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
572 close_generic_stateid(stp); 574 close_generic_stateid(stp);
573 free_generic_stateid(stp); 575 free_generic_stateid(stp);
574 } 576 }
575 577
576 static void unhash_lockowner(struct nfs4_lockowner *lo) 578 static void unhash_lockowner(struct nfs4_lockowner *lo)
577 { 579 {
578 struct nfs4_ol_stateid *stp; 580 struct nfs4_ol_stateid *stp;
579 581
580 list_del(&lo->lo_owner.so_strhash); 582 list_del(&lo->lo_owner.so_strhash);
581 list_del(&lo->lo_perstateid); 583 list_del(&lo->lo_perstateid);
582 list_del(&lo->lo_owner_ino_hash); 584 list_del(&lo->lo_owner_ino_hash);
583 while (!list_empty(&lo->lo_owner.so_stateids)) { 585 while (!list_empty(&lo->lo_owner.so_stateids)) {
584 stp = list_first_entry(&lo->lo_owner.so_stateids, 586 stp = list_first_entry(&lo->lo_owner.so_stateids,
585 struct nfs4_ol_stateid, st_perstateowner); 587 struct nfs4_ol_stateid, st_perstateowner);
586 release_lock_stateid(stp); 588 release_lock_stateid(stp);
587 } 589 }
588 } 590 }
589 591
590 static void release_lockowner(struct nfs4_lockowner *lo) 592 static void release_lockowner(struct nfs4_lockowner *lo)
591 { 593 {
592 unhash_lockowner(lo); 594 unhash_lockowner(lo);
593 nfs4_free_lockowner(lo); 595 nfs4_free_lockowner(lo);
594 } 596 }
595 597
596 static void 598 static void
597 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) 599 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
598 { 600 {
599 struct nfs4_lockowner *lo; 601 struct nfs4_lockowner *lo;
600 602
601 while (!list_empty(&open_stp->st_lockowners)) { 603 while (!list_empty(&open_stp->st_lockowners)) {
602 lo = list_entry(open_stp->st_lockowners.next, 604 lo = list_entry(open_stp->st_lockowners.next,
603 struct nfs4_lockowner, lo_perstateid); 605 struct nfs4_lockowner, lo_perstateid);
604 release_lockowner(lo); 606 release_lockowner(lo);
605 } 607 }
606 } 608 }
607 609
608 static void unhash_open_stateid(struct nfs4_ol_stateid *stp) 610 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
609 { 611 {
610 unhash_generic_stateid(stp); 612 unhash_generic_stateid(stp);
611 release_stateid_lockowners(stp); 613 release_stateid_lockowners(stp);
612 close_generic_stateid(stp); 614 close_generic_stateid(stp);
613 } 615 }
614 616
615 static void release_open_stateid(struct nfs4_ol_stateid *stp) 617 static void release_open_stateid(struct nfs4_ol_stateid *stp)
616 { 618 {
617 unhash_open_stateid(stp); 619 unhash_open_stateid(stp);
618 unhash_stid(&stp->st_stid); 620 unhash_stid(&stp->st_stid);
619 free_generic_stateid(stp); 621 free_generic_stateid(stp);
620 } 622 }
621 623
622 static void unhash_openowner(struct nfs4_openowner *oo) 624 static void unhash_openowner(struct nfs4_openowner *oo)
623 { 625 {
624 struct nfs4_ol_stateid *stp; 626 struct nfs4_ol_stateid *stp;
625 627
626 list_del(&oo->oo_owner.so_strhash); 628 list_del(&oo->oo_owner.so_strhash);
627 list_del(&oo->oo_perclient); 629 list_del(&oo->oo_perclient);
628 while (!list_empty(&oo->oo_owner.so_stateids)) { 630 while (!list_empty(&oo->oo_owner.so_stateids)) {
629 stp = list_first_entry(&oo->oo_owner.so_stateids, 631 stp = list_first_entry(&oo->oo_owner.so_stateids,
630 struct nfs4_ol_stateid, st_perstateowner); 632 struct nfs4_ol_stateid, st_perstateowner);
631 release_open_stateid(stp); 633 release_open_stateid(stp);
632 } 634 }
633 } 635 }
634 636
635 static void release_last_closed_stateid(struct nfs4_openowner *oo) 637 static void release_last_closed_stateid(struct nfs4_openowner *oo)
636 { 638 {
637 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; 639 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
638 640
639 if (s) { 641 if (s) {
640 unhash_stid(&s->st_stid); 642 unhash_stid(&s->st_stid);
641 free_generic_stateid(s); 643 free_generic_stateid(s);
642 oo->oo_last_closed_stid = NULL; 644 oo->oo_last_closed_stid = NULL;
643 } 645 }
644 } 646 }
645 647
646 static void release_openowner(struct nfs4_openowner *oo) 648 static void release_openowner(struct nfs4_openowner *oo)
647 { 649 {
648 unhash_openowner(oo); 650 unhash_openowner(oo);
649 list_del(&oo->oo_close_lru); 651 list_del(&oo->oo_close_lru);
650 release_last_closed_stateid(oo); 652 release_last_closed_stateid(oo);
651 nfs4_free_openowner(oo); 653 nfs4_free_openowner(oo);
652 } 654 }
653 655
654 #define SESSION_HASH_SIZE 512 656 #define SESSION_HASH_SIZE 512
655 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE]; 657 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
656 658
657 static inline int 659 static inline int
658 hash_sessionid(struct nfs4_sessionid *sessionid) 660 hash_sessionid(struct nfs4_sessionid *sessionid)
659 { 661 {
660 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 662 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
661 663
662 return sid->sequence % SESSION_HASH_SIZE; 664 return sid->sequence % SESSION_HASH_SIZE;
663 } 665 }
664 666
665 #ifdef NFSD_DEBUG 667 #ifdef NFSD_DEBUG
666 static inline void 668 static inline void
667 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 669 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
668 { 670 {
669 u32 *ptr = (u32 *)(&sessionid->data[0]); 671 u32 *ptr = (u32 *)(&sessionid->data[0]);
670 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 672 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
671 } 673 }
672 #else 674 #else
673 static inline void 675 static inline void
674 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 676 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
675 { 677 {
676 } 678 }
677 #endif 679 #endif
678 680
679 681
680 static void 682 static void
681 gen_sessionid(struct nfsd4_session *ses) 683 gen_sessionid(struct nfsd4_session *ses)
682 { 684 {
683 struct nfs4_client *clp = ses->se_client; 685 struct nfs4_client *clp = ses->se_client;
684 struct nfsd4_sessionid *sid; 686 struct nfsd4_sessionid *sid;
685 687
686 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 688 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
687 sid->clientid = clp->cl_clientid; 689 sid->clientid = clp->cl_clientid;
688 sid->sequence = current_sessionid++; 690 sid->sequence = current_sessionid++;
689 sid->reserved = 0; 691 sid->reserved = 0;
690 } 692 }
691 693
692 /* 694 /*
693 * The protocol defines ca_maxresponssize_cached to include the size of 695 * The protocol defines ca_maxresponssize_cached to include the size of
694 * the rpc header, but all we need to cache is the data starting after 696 * the rpc header, but all we need to cache is the data starting after
695 * the end of the initial SEQUENCE operation--the rest we regenerate 697 * the end of the initial SEQUENCE operation--the rest we regenerate
696 * each time. Therefore we can advertise a ca_maxresponssize_cached 698 * each time. Therefore we can advertise a ca_maxresponssize_cached
697 * value that is the number of bytes in our cache plus a few additional 699 * value that is the number of bytes in our cache plus a few additional
698 * bytes. In order to stay on the safe side, and not promise more than 700 * bytes. In order to stay on the safe side, and not promise more than
699 * we can cache, those additional bytes must be the minimum possible: 24 701 * we can cache, those additional bytes must be the minimum possible: 24
700 * bytes of rpc header (xid through accept state, with AUTH_NULL 702 * bytes of rpc header (xid through accept state, with AUTH_NULL
701 * verifier), 12 for the compound header (with zero-length tag), and 44 703 * verifier), 12 for the compound header (with zero-length tag), and 44
702 * for the SEQUENCE op response: 704 * for the SEQUENCE op response:
703 */ 705 */
704 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 706 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
705 707
706 static void 708 static void
707 free_session_slots(struct nfsd4_session *ses) 709 free_session_slots(struct nfsd4_session *ses)
708 { 710 {
709 int i; 711 int i;
710 712
711 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 713 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
712 kfree(ses->se_slots[i]); 714 kfree(ses->se_slots[i]);
713 } 715 }
714 716
715 /* 717 /*
716 * We don't actually need to cache the rpc and session headers, so we 718 * We don't actually need to cache the rpc and session headers, so we
717 * can allocate a little less for each slot: 719 * can allocate a little less for each slot:
718 */ 720 */
719 static inline int slot_bytes(struct nfsd4_channel_attrs *ca) 721 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
720 { 722 {
721 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 723 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
722 } 724 }
723 725
724 static int nfsd4_sanitize_slot_size(u32 size) 726 static int nfsd4_sanitize_slot_size(u32 size)
725 { 727 {
726 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ 728 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
727 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); 729 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
728 730
729 return size; 731 return size;
730 } 732 }
731 733
732 /* 734 /*
733 * XXX: If we run out of reserved DRC memory we could (up to a point) 735 * XXX: If we run out of reserved DRC memory we could (up to a point)
734 * re-negotiate active sessions and reduce their slot usage to make 736 * re-negotiate active sessions and reduce their slot usage to make
735 * room for new connections. For now we just fail the create session. 737 * room for new connections. For now we just fail the create session.
736 */ 738 */
737 static int nfsd4_get_drc_mem(int slotsize, u32 num) 739 static int nfsd4_get_drc_mem(int slotsize, u32 num)
738 { 740 {
739 int avail; 741 int avail;
740 742
741 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); 743 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
742 744
743 spin_lock(&nfsd_drc_lock); 745 spin_lock(&nfsd_drc_lock);
744 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, 746 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
745 nfsd_drc_max_mem - nfsd_drc_mem_used); 747 nfsd_drc_max_mem - nfsd_drc_mem_used);
746 num = min_t(int, num, avail / slotsize); 748 num = min_t(int, num, avail / slotsize);
747 nfsd_drc_mem_used += num * slotsize; 749 nfsd_drc_mem_used += num * slotsize;
748 spin_unlock(&nfsd_drc_lock); 750 spin_unlock(&nfsd_drc_lock);
749 751
750 return num; 752 return num;
751 } 753 }
752 754
753 static void nfsd4_put_drc_mem(int slotsize, int num) 755 static void nfsd4_put_drc_mem(int slotsize, int num)
754 { 756 {
755 spin_lock(&nfsd_drc_lock); 757 spin_lock(&nfsd_drc_lock);
756 nfsd_drc_mem_used -= slotsize * num; 758 nfsd_drc_mem_used -= slotsize * num;
757 spin_unlock(&nfsd_drc_lock); 759 spin_unlock(&nfsd_drc_lock);
758 } 760 }
759 761
760 static struct nfsd4_session *alloc_session(int slotsize, int numslots) 762 static struct nfsd4_session *alloc_session(int slotsize, int numslots)
761 { 763 {
762 struct nfsd4_session *new; 764 struct nfsd4_session *new;
763 int mem, i; 765 int mem, i;
764 766
765 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 767 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
766 + sizeof(struct nfsd4_session) > PAGE_SIZE); 768 + sizeof(struct nfsd4_session) > PAGE_SIZE);
767 mem = numslots * sizeof(struct nfsd4_slot *); 769 mem = numslots * sizeof(struct nfsd4_slot *);
768 770
769 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 771 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
770 if (!new) 772 if (!new)
771 return NULL; 773 return NULL;
772 /* allocate each struct nfsd4_slot and data cache in one piece */ 774 /* allocate each struct nfsd4_slot and data cache in one piece */
773 for (i = 0; i < numslots; i++) { 775 for (i = 0; i < numslots; i++) {
774 mem = sizeof(struct nfsd4_slot) + slotsize; 776 mem = sizeof(struct nfsd4_slot) + slotsize;
775 new->se_slots[i] = kzalloc(mem, GFP_KERNEL); 777 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
776 if (!new->se_slots[i]) 778 if (!new->se_slots[i])
777 goto out_free; 779 goto out_free;
778 } 780 }
779 return new; 781 return new;
780 out_free: 782 out_free:
781 while (i--) 783 while (i--)
782 kfree(new->se_slots[i]); 784 kfree(new->se_slots[i]);
783 kfree(new); 785 kfree(new);
784 return NULL; 786 return NULL;
785 } 787 }
786 788
787 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize) 789 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
788 { 790 {
789 u32 maxrpc = nfsd_serv->sv_max_mesg; 791 u32 maxrpc = nfsd_serv->sv_max_mesg;
790 792
791 new->maxreqs = numslots; 793 new->maxreqs = numslots;
792 new->maxresp_cached = min_t(u32, req->maxresp_cached, 794 new->maxresp_cached = min_t(u32, req->maxresp_cached,
793 slotsize + NFSD_MIN_HDR_SEQ_SZ); 795 slotsize + NFSD_MIN_HDR_SEQ_SZ);
794 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); 796 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
795 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); 797 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
796 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); 798 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
797 } 799 }
798 800
799 static void free_conn(struct nfsd4_conn *c) 801 static void free_conn(struct nfsd4_conn *c)
800 { 802 {
801 svc_xprt_put(c->cn_xprt); 803 svc_xprt_put(c->cn_xprt);
802 kfree(c); 804 kfree(c);
803 } 805 }
804 806
805 static void nfsd4_conn_lost(struct svc_xpt_user *u) 807 static void nfsd4_conn_lost(struct svc_xpt_user *u)
806 { 808 {
807 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 809 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
808 struct nfs4_client *clp = c->cn_session->se_client; 810 struct nfs4_client *clp = c->cn_session->se_client;
809 811
810 spin_lock(&clp->cl_lock); 812 spin_lock(&clp->cl_lock);
811 if (!list_empty(&c->cn_persession)) { 813 if (!list_empty(&c->cn_persession)) {
812 list_del(&c->cn_persession); 814 list_del(&c->cn_persession);
813 free_conn(c); 815 free_conn(c);
814 } 816 }
815 spin_unlock(&clp->cl_lock); 817 spin_unlock(&clp->cl_lock);
816 nfsd4_probe_callback(clp); 818 nfsd4_probe_callback(clp);
817 } 819 }
818 820
819 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 821 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
820 { 822 {
821 struct nfsd4_conn *conn; 823 struct nfsd4_conn *conn;
822 824
823 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 825 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
824 if (!conn) 826 if (!conn)
825 return NULL; 827 return NULL;
826 svc_xprt_get(rqstp->rq_xprt); 828 svc_xprt_get(rqstp->rq_xprt);
827 conn->cn_xprt = rqstp->rq_xprt; 829 conn->cn_xprt = rqstp->rq_xprt;
828 conn->cn_flags = flags; 830 conn->cn_flags = flags;
829 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 831 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
830 return conn; 832 return conn;
831 } 833 }
832 834
833 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 835 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
834 { 836 {
835 conn->cn_session = ses; 837 conn->cn_session = ses;
836 list_add(&conn->cn_persession, &ses->se_conns); 838 list_add(&conn->cn_persession, &ses->se_conns);
837 } 839 }
838 840
839 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 841 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
840 { 842 {
841 struct nfs4_client *clp = ses->se_client; 843 struct nfs4_client *clp = ses->se_client;
842 844
843 spin_lock(&clp->cl_lock); 845 spin_lock(&clp->cl_lock);
844 __nfsd4_hash_conn(conn, ses); 846 __nfsd4_hash_conn(conn, ses);
845 spin_unlock(&clp->cl_lock); 847 spin_unlock(&clp->cl_lock);
846 } 848 }
847 849
848 static int nfsd4_register_conn(struct nfsd4_conn *conn) 850 static int nfsd4_register_conn(struct nfsd4_conn *conn)
849 { 851 {
850 conn->cn_xpt_user.callback = nfsd4_conn_lost; 852 conn->cn_xpt_user.callback = nfsd4_conn_lost;
851 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 853 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
852 } 854 }
853 855
854 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) 856 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
855 { 857 {
856 struct nfsd4_conn *conn; 858 struct nfsd4_conn *conn;
857 int ret; 859 int ret;
858 860
859 conn = alloc_conn(rqstp, dir); 861 conn = alloc_conn(rqstp, dir);
860 if (!conn) 862 if (!conn)
861 return nfserr_jukebox; 863 return nfserr_jukebox;
862 nfsd4_hash_conn(conn, ses); 864 nfsd4_hash_conn(conn, ses);
863 ret = nfsd4_register_conn(conn); 865 ret = nfsd4_register_conn(conn);
864 if (ret) 866 if (ret)
865 /* oops; xprt is already down: */ 867 /* oops; xprt is already down: */
866 nfsd4_conn_lost(&conn->cn_xpt_user); 868 nfsd4_conn_lost(&conn->cn_xpt_user);
867 if (ses->se_client->cl_cb_state == NFSD4_CB_DOWN && 869 if (ses->se_client->cl_cb_state == NFSD4_CB_DOWN &&
868 dir & NFS4_CDFC4_BACK) { 870 dir & NFS4_CDFC4_BACK) {
869 /* callback channel may be back up */ 871 /* callback channel may be back up */
870 nfsd4_probe_callback(ses->se_client); 872 nfsd4_probe_callback(ses->se_client);
871 } 873 }
872 return nfs_ok; 874 return nfs_ok;
873 } 875 }
874 876
875 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) 877 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
876 { 878 {
877 u32 dir = NFS4_CDFC4_FORE; 879 u32 dir = NFS4_CDFC4_FORE;
878 880
879 if (ses->se_flags & SESSION4_BACK_CHAN) 881 if (ses->se_flags & SESSION4_BACK_CHAN)
880 dir |= NFS4_CDFC4_BACK; 882 dir |= NFS4_CDFC4_BACK;
881 883
882 return nfsd4_new_conn(rqstp, ses, dir); 884 return nfsd4_new_conn(rqstp, ses, dir);
883 } 885 }
884 886
885 /* must be called under client_lock */ 887 /* must be called under client_lock */
886 static void nfsd4_del_conns(struct nfsd4_session *s) 888 static void nfsd4_del_conns(struct nfsd4_session *s)
887 { 889 {
888 struct nfs4_client *clp = s->se_client; 890 struct nfs4_client *clp = s->se_client;
889 struct nfsd4_conn *c; 891 struct nfsd4_conn *c;
890 892
891 spin_lock(&clp->cl_lock); 893 spin_lock(&clp->cl_lock);
892 while (!list_empty(&s->se_conns)) { 894 while (!list_empty(&s->se_conns)) {
893 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 895 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
894 list_del_init(&c->cn_persession); 896 list_del_init(&c->cn_persession);
895 spin_unlock(&clp->cl_lock); 897 spin_unlock(&clp->cl_lock);
896 898
897 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 899 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
898 free_conn(c); 900 free_conn(c);
899 901
900 spin_lock(&clp->cl_lock); 902 spin_lock(&clp->cl_lock);
901 } 903 }
902 spin_unlock(&clp->cl_lock); 904 spin_unlock(&clp->cl_lock);
903 } 905 }
904 906
905 static void free_session(struct kref *kref) 907 static void free_session(struct kref *kref)
906 { 908 {
907 struct nfsd4_session *ses; 909 struct nfsd4_session *ses;
908 int mem; 910 int mem;
909 911
910 lockdep_assert_held(&client_lock); 912 lockdep_assert_held(&client_lock);
911 ses = container_of(kref, struct nfsd4_session, se_ref); 913 ses = container_of(kref, struct nfsd4_session, se_ref);
912 nfsd4_del_conns(ses); 914 nfsd4_del_conns(ses);
913 spin_lock(&nfsd_drc_lock); 915 spin_lock(&nfsd_drc_lock);
914 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); 916 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
915 nfsd_drc_mem_used -= mem; 917 nfsd_drc_mem_used -= mem;
916 spin_unlock(&nfsd_drc_lock); 918 spin_unlock(&nfsd_drc_lock);
917 free_session_slots(ses); 919 free_session_slots(ses);
918 kfree(ses); 920 kfree(ses);
919 } 921 }
920 922
921 void nfsd4_put_session(struct nfsd4_session *ses) 923 void nfsd4_put_session(struct nfsd4_session *ses)
922 { 924 {
923 spin_lock(&client_lock); 925 spin_lock(&client_lock);
924 nfsd4_put_session_locked(ses); 926 nfsd4_put_session_locked(ses);
925 spin_unlock(&client_lock); 927 spin_unlock(&client_lock);
926 } 928 }
927 929
928 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) 930 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
929 { 931 {
930 struct nfsd4_session *new; 932 struct nfsd4_session *new;
931 struct nfsd4_channel_attrs *fchan = &cses->fore_channel; 933 struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
932 int numslots, slotsize; 934 int numslots, slotsize;
933 __be32 status; 935 __be32 status;
934 int idx; 936 int idx;
935 937
936 /* 938 /*
937 * Note decreasing slot size below client's request may 939 * Note decreasing slot size below client's request may
938 * make it difficult for client to function correctly, whereas 940 * make it difficult for client to function correctly, whereas
939 * decreasing the number of slots will (just?) affect 941 * decreasing the number of slots will (just?) affect
940 * performance. When short on memory we therefore prefer to 942 * performance. When short on memory we therefore prefer to
941 * decrease number of slots instead of their size. 943 * decrease number of slots instead of their size.
942 */ 944 */
943 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); 945 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
944 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); 946 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
945 if (numslots < 1) 947 if (numslots < 1)
946 return NULL; 948 return NULL;
947 949
948 new = alloc_session(slotsize, numslots); 950 new = alloc_session(slotsize, numslots);
949 if (!new) { 951 if (!new) {
950 nfsd4_put_drc_mem(slotsize, fchan->maxreqs); 952 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
951 return NULL; 953 return NULL;
952 } 954 }
953 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize); 955 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
954 956
955 new->se_client = clp; 957 new->se_client = clp;
956 gen_sessionid(new); 958 gen_sessionid(new);
957 959
958 INIT_LIST_HEAD(&new->se_conns); 960 INIT_LIST_HEAD(&new->se_conns);
959 961
960 new->se_cb_seq_nr = 1; 962 new->se_cb_seq_nr = 1;
961 new->se_flags = cses->flags; 963 new->se_flags = cses->flags;
962 new->se_cb_prog = cses->callback_prog; 964 new->se_cb_prog = cses->callback_prog;
963 kref_init(&new->se_ref); 965 kref_init(&new->se_ref);
964 idx = hash_sessionid(&new->se_sessionid); 966 idx = hash_sessionid(&new->se_sessionid);
965 spin_lock(&client_lock); 967 spin_lock(&client_lock);
966 list_add(&new->se_hash, &sessionid_hashtbl[idx]); 968 list_add(&new->se_hash, &sessionid_hashtbl[idx]);
967 spin_lock(&clp->cl_lock); 969 spin_lock(&clp->cl_lock);
968 list_add(&new->se_perclnt, &clp->cl_sessions); 970 list_add(&new->se_perclnt, &clp->cl_sessions);
969 spin_unlock(&clp->cl_lock); 971 spin_unlock(&clp->cl_lock);
970 spin_unlock(&client_lock); 972 spin_unlock(&client_lock);
971 973
972 status = nfsd4_new_conn_from_crses(rqstp, new); 974 status = nfsd4_new_conn_from_crses(rqstp, new);
973 /* whoops: benny points out, status is ignored! (err, or bogus) */ 975 /* whoops: benny points out, status is ignored! (err, or bogus) */
974 if (status) { 976 if (status) {
975 spin_lock(&client_lock); 977 spin_lock(&client_lock);
976 free_session(&new->se_ref); 978 free_session(&new->se_ref);
977 spin_unlock(&client_lock); 979 spin_unlock(&client_lock);
978 return NULL; 980 return NULL;
979 } 981 }
980 if (cses->flags & SESSION4_BACK_CHAN) { 982 if (cses->flags & SESSION4_BACK_CHAN) {
981 struct sockaddr *sa = svc_addr(rqstp); 983 struct sockaddr *sa = svc_addr(rqstp);
982 /* 984 /*
983 * This is a little silly; with sessions there's no real 985 * This is a little silly; with sessions there's no real
984 * use for the callback address. Use the peer address 986 * use for the callback address. Use the peer address
985 * as a reasonable default for now, but consider fixing 987 * as a reasonable default for now, but consider fixing
986 * the rpc client not to require an address in the 988 * the rpc client not to require an address in the
987 * future: 989 * future:
988 */ 990 */
989 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 991 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
990 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 992 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
991 } 993 }
992 nfsd4_probe_callback(clp); 994 nfsd4_probe_callback(clp);
993 return new; 995 return new;
994 } 996 }
995 997
996 /* caller must hold client_lock */ 998 /* caller must hold client_lock */
997 static struct nfsd4_session * 999 static struct nfsd4_session *
998 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid) 1000 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
999 { 1001 {
1000 struct nfsd4_session *elem; 1002 struct nfsd4_session *elem;
1001 int idx; 1003 int idx;
1002 1004
1003 dump_sessionid(__func__, sessionid); 1005 dump_sessionid(__func__, sessionid);
1004 idx = hash_sessionid(sessionid); 1006 idx = hash_sessionid(sessionid);
1005 /* Search in the appropriate list */ 1007 /* Search in the appropriate list */
1006 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) { 1008 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
1007 if (!memcmp(elem->se_sessionid.data, sessionid->data, 1009 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1008 NFS4_MAX_SESSIONID_LEN)) { 1010 NFS4_MAX_SESSIONID_LEN)) {
1009 return elem; 1011 return elem;
1010 } 1012 }
1011 } 1013 }
1012 1014
1013 dprintk("%s: session not found\n", __func__); 1015 dprintk("%s: session not found\n", __func__);
1014 return NULL; 1016 return NULL;
1015 } 1017 }
1016 1018
1017 /* caller must hold client_lock */ 1019 /* caller must hold client_lock */
1018 static void 1020 static void
1019 unhash_session(struct nfsd4_session *ses) 1021 unhash_session(struct nfsd4_session *ses)
1020 { 1022 {
1021 list_del(&ses->se_hash); 1023 list_del(&ses->se_hash);
1022 spin_lock(&ses->se_client->cl_lock); 1024 spin_lock(&ses->se_client->cl_lock);
1023 list_del(&ses->se_perclnt); 1025 list_del(&ses->se_perclnt);
1024 spin_unlock(&ses->se_client->cl_lock); 1026 spin_unlock(&ses->se_client->cl_lock);
1025 } 1027 }
1026 1028
1027 /* must be called under the client_lock */ 1029 /* must be called under the client_lock */
1028 static inline void 1030 static inline void
1029 renew_client_locked(struct nfs4_client *clp) 1031 renew_client_locked(struct nfs4_client *clp)
1030 { 1032 {
1031 if (is_client_expired(clp)) { 1033 if (is_client_expired(clp)) {
1032 WARN_ON(1); 1034 WARN_ON(1);
1033 printk("%s: client (clientid %08x/%08x) already expired\n", 1035 printk("%s: client (clientid %08x/%08x) already expired\n",
1034 __func__, 1036 __func__,
1035 clp->cl_clientid.cl_boot, 1037 clp->cl_clientid.cl_boot,
1036 clp->cl_clientid.cl_id); 1038 clp->cl_clientid.cl_id);
1037 return; 1039 return;
1038 } 1040 }
1039 1041
1040 dprintk("renewing client (clientid %08x/%08x)\n", 1042 dprintk("renewing client (clientid %08x/%08x)\n",
1041 clp->cl_clientid.cl_boot, 1043 clp->cl_clientid.cl_boot,
1042 clp->cl_clientid.cl_id); 1044 clp->cl_clientid.cl_id);
1043 list_move_tail(&clp->cl_lru, &client_lru); 1045 list_move_tail(&clp->cl_lru, &client_lru);
1044 clp->cl_time = get_seconds(); 1046 clp->cl_time = get_seconds();
1045 } 1047 }
1046 1048
1047 static inline void 1049 static inline void
1048 renew_client(struct nfs4_client *clp) 1050 renew_client(struct nfs4_client *clp)
1049 { 1051 {
1050 spin_lock(&client_lock); 1052 spin_lock(&client_lock);
1051 renew_client_locked(clp); 1053 renew_client_locked(clp);
1052 spin_unlock(&client_lock); 1054 spin_unlock(&client_lock);
1053 } 1055 }
1054 1056
1055 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1057 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1056 static int 1058 static int
1057 STALE_CLIENTID(clientid_t *clid) 1059 STALE_CLIENTID(clientid_t *clid)
1058 { 1060 {
1059 if (clid->cl_boot == boot_time) 1061 if (clid->cl_boot == boot_time)
1060 return 0; 1062 return 0;
1061 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1063 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1062 clid->cl_boot, clid->cl_id, boot_time); 1064 clid->cl_boot, clid->cl_id, boot_time);
1063 return 1; 1065 return 1;
1064 } 1066 }
1065 1067
1066 /* 1068 /*
1067 * XXX Should we use a slab cache ? 1069 * XXX Should we use a slab cache ?
1068 * This type of memory management is somewhat inefficient, but we use it 1070 * This type of memory management is somewhat inefficient, but we use it
1069 * anyway since SETCLIENTID is not a common operation. 1071 * anyway since SETCLIENTID is not a common operation.
1070 */ 1072 */
1071 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1073 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1072 { 1074 {
1073 struct nfs4_client *clp; 1075 struct nfs4_client *clp;
1074 1076
1075 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1077 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1076 if (clp == NULL) 1078 if (clp == NULL)
1077 return NULL; 1079 return NULL;
1078 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1080 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1079 if (clp->cl_name.data == NULL) { 1081 if (clp->cl_name.data == NULL) {
1080 kfree(clp); 1082 kfree(clp);
1081 return NULL; 1083 return NULL;
1082 } 1084 }
1083 clp->cl_name.len = name.len; 1085 clp->cl_name.len = name.len;
1084 return clp; 1086 return clp;
1085 } 1087 }
1086 1088
1087 static inline void 1089 static inline void
1088 free_client(struct nfs4_client *clp) 1090 free_client(struct nfs4_client *clp)
1089 { 1091 {
1090 lockdep_assert_held(&client_lock); 1092 lockdep_assert_held(&client_lock);
1091 while (!list_empty(&clp->cl_sessions)) { 1093 while (!list_empty(&clp->cl_sessions)) {
1092 struct nfsd4_session *ses; 1094 struct nfsd4_session *ses;
1093 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1095 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1094 se_perclnt); 1096 se_perclnt);
1095 list_del(&ses->se_perclnt); 1097 list_del(&ses->se_perclnt);
1096 nfsd4_put_session_locked(ses); 1098 nfsd4_put_session_locked(ses);
1097 } 1099 }
1098 free_svc_cred(&clp->cl_cred); 1100 free_svc_cred(&clp->cl_cred);
1099 kfree(clp->cl_name.data); 1101 kfree(clp->cl_name.data);
1100 kfree(clp); 1102 kfree(clp);
1101 } 1103 }
1102 1104
1103 void 1105 void
1104 release_session_client(struct nfsd4_session *session) 1106 release_session_client(struct nfsd4_session *session)
1105 { 1107 {
1106 struct nfs4_client *clp = session->se_client; 1108 struct nfs4_client *clp = session->se_client;
1107 1109
1108 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock)) 1110 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
1109 return; 1111 return;
1110 if (is_client_expired(clp)) { 1112 if (is_client_expired(clp)) {
1111 free_client(clp); 1113 free_client(clp);
1112 session->se_client = NULL; 1114 session->se_client = NULL;
1113 } else 1115 } else
1114 renew_client_locked(clp); 1116 renew_client_locked(clp);
1115 spin_unlock(&client_lock); 1117 spin_unlock(&client_lock);
1116 } 1118 }
1117 1119
1118 /* must be called under the client_lock */ 1120 /* must be called under the client_lock */
1119 static inline void 1121 static inline void
1120 unhash_client_locked(struct nfs4_client *clp) 1122 unhash_client_locked(struct nfs4_client *clp)
1121 { 1123 {
1122 struct nfsd4_session *ses; 1124 struct nfsd4_session *ses;
1123 1125
1124 mark_client_expired(clp); 1126 mark_client_expired(clp);
1125 list_del(&clp->cl_lru); 1127 list_del(&clp->cl_lru);
1126 spin_lock(&clp->cl_lock); 1128 spin_lock(&clp->cl_lock);
1127 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1129 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1128 list_del_init(&ses->se_hash); 1130 list_del_init(&ses->se_hash);
1129 spin_unlock(&clp->cl_lock); 1131 spin_unlock(&clp->cl_lock);
1130 } 1132 }
1131 1133
1132 static void 1134 static void
1133 expire_client(struct nfs4_client *clp) 1135 expire_client(struct nfs4_client *clp)
1134 { 1136 {
1135 struct nfs4_openowner *oo; 1137 struct nfs4_openowner *oo;
1136 struct nfs4_delegation *dp; 1138 struct nfs4_delegation *dp;
1137 struct list_head reaplist; 1139 struct list_head reaplist;
1138 1140
1139 INIT_LIST_HEAD(&reaplist); 1141 INIT_LIST_HEAD(&reaplist);
1140 spin_lock(&recall_lock); 1142 spin_lock(&recall_lock);
1141 while (!list_empty(&clp->cl_delegations)) { 1143 while (!list_empty(&clp->cl_delegations)) {
1142 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1144 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1143 list_del_init(&dp->dl_perclnt); 1145 list_del_init(&dp->dl_perclnt);
1144 list_move(&dp->dl_recall_lru, &reaplist); 1146 list_move(&dp->dl_recall_lru, &reaplist);
1145 } 1147 }
1146 spin_unlock(&recall_lock); 1148 spin_unlock(&recall_lock);
1147 while (!list_empty(&reaplist)) { 1149 while (!list_empty(&reaplist)) {
1148 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1150 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1149 unhash_delegation(dp); 1151 unhash_delegation(dp);
1150 } 1152 }
1151 while (!list_empty(&clp->cl_openowners)) { 1153 while (!list_empty(&clp->cl_openowners)) {
1152 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1154 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1153 release_openowner(oo); 1155 release_openowner(oo);
1154 } 1156 }
1155 nfsd4_shutdown_callback(clp); 1157 nfsd4_shutdown_callback(clp);
1156 if (clp->cl_cb_conn.cb_xprt) 1158 if (clp->cl_cb_conn.cb_xprt)
1157 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1159 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1158 list_del(&clp->cl_idhash); 1160 list_del(&clp->cl_idhash);
1159 list_del(&clp->cl_strhash); 1161 list_del(&clp->cl_strhash);
1160 spin_lock(&client_lock); 1162 spin_lock(&client_lock);
1161 unhash_client_locked(clp); 1163 unhash_client_locked(clp);
1162 if (atomic_read(&clp->cl_refcount) == 0) 1164 if (atomic_read(&clp->cl_refcount) == 0)
1163 free_client(clp); 1165 free_client(clp);
1164 spin_unlock(&client_lock); 1166 spin_unlock(&client_lock);
1165 } 1167 }
1166 1168
1167 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1169 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1168 { 1170 {
1169 memcpy(target->cl_verifier.data, source->data, 1171 memcpy(target->cl_verifier.data, source->data,
1170 sizeof(target->cl_verifier.data)); 1172 sizeof(target->cl_verifier.data));
1171 } 1173 }
1172 1174
1173 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 1175 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1174 { 1176 {
1175 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 1177 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1176 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1178 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1177 } 1179 }
1178 1180
1179 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 1181 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1180 { 1182 {
1181 if (source->cr_principal) { 1183 if (source->cr_principal) {
1182 target->cr_principal = 1184 target->cr_principal =
1183 kstrdup(source->cr_principal, GFP_KERNEL); 1185 kstrdup(source->cr_principal, GFP_KERNEL);
1184 if (target->cr_principal == NULL) 1186 if (target->cr_principal == NULL)
1185 return -ENOMEM; 1187 return -ENOMEM;
1186 } else 1188 } else
1187 target->cr_principal = NULL; 1189 target->cr_principal = NULL;
1188 target->cr_flavor = source->cr_flavor; 1190 target->cr_flavor = source->cr_flavor;
1189 target->cr_uid = source->cr_uid; 1191 target->cr_uid = source->cr_uid;
1190 target->cr_gid = source->cr_gid; 1192 target->cr_gid = source->cr_gid;
1191 target->cr_group_info = source->cr_group_info; 1193 target->cr_group_info = source->cr_group_info;
1192 get_group_info(target->cr_group_info); 1194 get_group_info(target->cr_group_info);
1193 return 0; 1195 return 0;
1194 } 1196 }
1195 1197
1196 static int same_name(const char *n1, const char *n2) 1198 static int same_name(const char *n1, const char *n2)
1197 { 1199 {
1198 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1200 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1199 } 1201 }
1200 1202
1201 static int 1203 static int
1202 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1204 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1203 { 1205 {
1204 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1206 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1205 } 1207 }
1206 1208
1207 static int 1209 static int
1208 same_clid(clientid_t *cl1, clientid_t *cl2) 1210 same_clid(clientid_t *cl1, clientid_t *cl2)
1209 { 1211 {
1210 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1212 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1211 } 1213 }
1212 1214
1213 static bool groups_equal(struct group_info *g1, struct group_info *g2) 1215 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1214 { 1216 {
1215 int i; 1217 int i;
1216 1218
1217 if (g1->ngroups != g2->ngroups) 1219 if (g1->ngroups != g2->ngroups)
1218 return false; 1220 return false;
1219 for (i=0; i<g1->ngroups; i++) 1221 for (i=0; i<g1->ngroups; i++)
1220 if (GROUP_AT(g1, i) != GROUP_AT(g2, i)) 1222 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1221 return false; 1223 return false;
1222 return true; 1224 return true;
1223 } 1225 }
1224 1226
1225 static bool 1227 static bool
1226 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1228 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1227 { 1229 {
1228 if ((cr1->cr_flavor != cr2->cr_flavor) 1230 if ((cr1->cr_flavor != cr2->cr_flavor)
1229 || (cr1->cr_uid != cr2->cr_uid) 1231 || (cr1->cr_uid != cr2->cr_uid)
1230 || (cr1->cr_gid != cr2->cr_gid) 1232 || (cr1->cr_gid != cr2->cr_gid)
1231 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 1233 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1232 return false; 1234 return false;
1233 if (cr1->cr_principal == cr2->cr_principal) 1235 if (cr1->cr_principal == cr2->cr_principal)
1234 return true; 1236 return true;
1235 if (!cr1->cr_principal || !cr2->cr_principal) 1237 if (!cr1->cr_principal || !cr2->cr_principal)
1236 return false; 1238 return false;
1237 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 1239 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1238 } 1240 }
1239 1241
1240 static void gen_clid(struct nfs4_client *clp) 1242 static void gen_clid(struct nfs4_client *clp)
1241 { 1243 {
1242 static u32 current_clientid = 1; 1244 static u32 current_clientid = 1;
1243 1245
1244 clp->cl_clientid.cl_boot = boot_time; 1246 clp->cl_clientid.cl_boot = boot_time;
1245 clp->cl_clientid.cl_id = current_clientid++; 1247 clp->cl_clientid.cl_id = current_clientid++;
1246 } 1248 }
1247 1249
1248 static void gen_confirm(struct nfs4_client *clp) 1250 static void gen_confirm(struct nfs4_client *clp)
1249 { 1251 {
1250 __be32 verf[2]; 1252 __be32 verf[2];
1251 static u32 i; 1253 static u32 i;
1252 1254
1253 verf[0] = (__be32)get_seconds(); 1255 verf[0] = (__be32)get_seconds();
1254 verf[1] = (__be32)i++; 1256 verf[1] = (__be32)i++;
1255 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 1257 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1256 } 1258 }
1257 1259
1258 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) 1260 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1259 { 1261 {
1260 return idr_find(&cl->cl_stateids, t->si_opaque.so_id); 1262 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1261 } 1263 }
1262 1264
1263 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 1265 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1264 { 1266 {
1265 struct nfs4_stid *s; 1267 struct nfs4_stid *s;
1266 1268
1267 s = find_stateid(cl, t); 1269 s = find_stateid(cl, t);
1268 if (!s) 1270 if (!s)
1269 return NULL; 1271 return NULL;
1270 if (typemask & s->sc_type) 1272 if (typemask & s->sc_type)
1271 return s; 1273 return s;
1272 return NULL; 1274 return NULL;
1273 } 1275 }
1274 1276
1275 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, 1277 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1276 struct svc_rqst *rqstp, nfs4_verifier *verf) 1278 struct svc_rqst *rqstp, nfs4_verifier *verf)
1277 { 1279 {
1278 struct nfs4_client *clp; 1280 struct nfs4_client *clp;
1279 struct sockaddr *sa = svc_addr(rqstp); 1281 struct sockaddr *sa = svc_addr(rqstp);
1280 int ret; 1282 int ret;
1281 1283
1282 clp = alloc_client(name); 1284 clp = alloc_client(name);
1283 if (clp == NULL) 1285 if (clp == NULL)
1284 return NULL; 1286 return NULL;
1285 1287
1286 INIT_LIST_HEAD(&clp->cl_sessions); 1288 INIT_LIST_HEAD(&clp->cl_sessions);
1287 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1289 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1288 if (ret) { 1290 if (ret) {
1289 spin_lock(&client_lock); 1291 spin_lock(&client_lock);
1290 free_client(clp); 1292 free_client(clp);
1291 spin_unlock(&client_lock); 1293 spin_unlock(&client_lock);
1292 return NULL; 1294 return NULL;
1293 } 1295 }
1294 idr_init(&clp->cl_stateids); 1296 idr_init(&clp->cl_stateids);
1295 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); 1297 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1296 atomic_set(&clp->cl_refcount, 0); 1298 atomic_set(&clp->cl_refcount, 0);
1297 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1299 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1298 INIT_LIST_HEAD(&clp->cl_idhash); 1300 INIT_LIST_HEAD(&clp->cl_idhash);
1299 INIT_LIST_HEAD(&clp->cl_strhash); 1301 INIT_LIST_HEAD(&clp->cl_strhash);
1300 INIT_LIST_HEAD(&clp->cl_openowners); 1302 INIT_LIST_HEAD(&clp->cl_openowners);
1301 INIT_LIST_HEAD(&clp->cl_delegations); 1303 INIT_LIST_HEAD(&clp->cl_delegations);
1302 INIT_LIST_HEAD(&clp->cl_lru); 1304 INIT_LIST_HEAD(&clp->cl_lru);
1303 INIT_LIST_HEAD(&clp->cl_callbacks); 1305 INIT_LIST_HEAD(&clp->cl_callbacks);
1304 spin_lock_init(&clp->cl_lock); 1306 spin_lock_init(&clp->cl_lock);
1305 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); 1307 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1306 clp->cl_time = get_seconds(); 1308 clp->cl_time = get_seconds();
1307 clear_bit(0, &clp->cl_cb_slot_busy); 1309 clear_bit(0, &clp->cl_cb_slot_busy);
1308 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1310 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1309 copy_verf(clp, verf); 1311 copy_verf(clp, verf);
1310 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1312 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1311 gen_confirm(clp); 1313 gen_confirm(clp);
1312 clp->cl_cb_session = NULL; 1314 clp->cl_cb_session = NULL;
1313 return clp; 1315 return clp;
1314 } 1316 }
1315 1317
1316 static void 1318 static void
1317 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval) 1319 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
1318 { 1320 {
1319 unsigned int idhashval; 1321 unsigned int idhashval;
1320 1322
1321 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]); 1323 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
1322 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1324 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1323 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]); 1325 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1324 renew_client(clp); 1326 renew_client(clp);
1325 } 1327 }
1326 1328
1327 static void 1329 static void
1328 move_to_confirmed(struct nfs4_client *clp) 1330 move_to_confirmed(struct nfs4_client *clp)
1329 { 1331 {
1330 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1332 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1331 unsigned int strhashval; 1333 unsigned int strhashval;
1332 1334
1333 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 1335 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1334 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]); 1336 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1335 strhashval = clientstr_hashval(clp->cl_recdir); 1337 strhashval = clientstr_hashval(clp->cl_recdir);
1336 list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); 1338 list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1337 renew_client(clp); 1339 renew_client(clp);
1338 } 1340 }
1339 1341
1340 static struct nfs4_client * 1342 static struct nfs4_client *
1341 find_confirmed_client(clientid_t *clid) 1343 find_confirmed_client(clientid_t *clid)
1342 { 1344 {
1343 struct nfs4_client *clp; 1345 struct nfs4_client *clp;
1344 unsigned int idhashval = clientid_hashval(clid->cl_id); 1346 unsigned int idhashval = clientid_hashval(clid->cl_id);
1345 1347
1346 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) { 1348 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1347 if (same_clid(&clp->cl_clientid, clid)) { 1349 if (same_clid(&clp->cl_clientid, clid)) {
1348 renew_client(clp); 1350 renew_client(clp);
1349 return clp; 1351 return clp;
1350 } 1352 }
1351 } 1353 }
1352 return NULL; 1354 return NULL;
1353 } 1355 }
1354 1356
1355 static struct nfs4_client * 1357 static struct nfs4_client *
1356 find_unconfirmed_client(clientid_t *clid) 1358 find_unconfirmed_client(clientid_t *clid)
1357 { 1359 {
1358 struct nfs4_client *clp; 1360 struct nfs4_client *clp;
1359 unsigned int idhashval = clientid_hashval(clid->cl_id); 1361 unsigned int idhashval = clientid_hashval(clid->cl_id);
1360 1362
1361 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) { 1363 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1362 if (same_clid(&clp->cl_clientid, clid)) 1364 if (same_clid(&clp->cl_clientid, clid))
1363 return clp; 1365 return clp;
1364 } 1366 }
1365 return NULL; 1367 return NULL;
1366 } 1368 }
1367 1369
1368 static bool clp_used_exchangeid(struct nfs4_client *clp) 1370 static bool clp_used_exchangeid(struct nfs4_client *clp)
1369 { 1371 {
1370 return clp->cl_exchange_flags != 0; 1372 return clp->cl_exchange_flags != 0;
1371 } 1373 }
1372 1374
1373 static struct nfs4_client * 1375 static struct nfs4_client *
1374 find_confirmed_client_by_str(const char *dname, unsigned int hashval) 1376 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1375 { 1377 {
1376 struct nfs4_client *clp; 1378 struct nfs4_client *clp;
1377 1379
1378 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { 1380 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1379 if (same_name(clp->cl_recdir, dname)) 1381 if (same_name(clp->cl_recdir, dname))
1380 return clp; 1382 return clp;
1381 } 1383 }
1382 return NULL; 1384 return NULL;
1383 } 1385 }
1384 1386
1385 static struct nfs4_client * 1387 static struct nfs4_client *
1386 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) 1388 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1387 { 1389 {
1388 struct nfs4_client *clp; 1390 struct nfs4_client *clp;
1389 1391
1390 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { 1392 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1391 if (same_name(clp->cl_recdir, dname)) 1393 if (same_name(clp->cl_recdir, dname))
1392 return clp; 1394 return clp;
1393 } 1395 }
1394 return NULL; 1396 return NULL;
1395 } 1397 }
1396 1398
1397 static void 1399 static void
1398 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 1400 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1399 { 1401 {
1400 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 1402 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1401 struct sockaddr *sa = svc_addr(rqstp); 1403 struct sockaddr *sa = svc_addr(rqstp);
1402 u32 scopeid = rpc_get_scope_id(sa); 1404 u32 scopeid = rpc_get_scope_id(sa);
1403 unsigned short expected_family; 1405 unsigned short expected_family;
1404 1406
1405 /* Currently, we only support tcp and tcp6 for the callback channel */ 1407 /* Currently, we only support tcp and tcp6 for the callback channel */
1406 if (se->se_callback_netid_len == 3 && 1408 if (se->se_callback_netid_len == 3 &&
1407 !memcmp(se->se_callback_netid_val, "tcp", 3)) 1409 !memcmp(se->se_callback_netid_val, "tcp", 3))
1408 expected_family = AF_INET; 1410 expected_family = AF_INET;
1409 else if (se->se_callback_netid_len == 4 && 1411 else if (se->se_callback_netid_len == 4 &&
1410 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 1412 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1411 expected_family = AF_INET6; 1413 expected_family = AF_INET6;
1412 else 1414 else
1413 goto out_err; 1415 goto out_err;
1414 1416
1415 conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val, 1417 conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val,
1416 se->se_callback_addr_len, 1418 se->se_callback_addr_len,
1417 (struct sockaddr *)&conn->cb_addr, 1419 (struct sockaddr *)&conn->cb_addr,
1418 sizeof(conn->cb_addr)); 1420 sizeof(conn->cb_addr));
1419 1421
1420 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 1422 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1421 goto out_err; 1423 goto out_err;
1422 1424
1423 if (conn->cb_addr.ss_family == AF_INET6) 1425 if (conn->cb_addr.ss_family == AF_INET6)
1424 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 1426 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1425 1427
1426 conn->cb_prog = se->se_callback_prog; 1428 conn->cb_prog = se->se_callback_prog;
1427 conn->cb_ident = se->se_callback_ident; 1429 conn->cb_ident = se->se_callback_ident;
1428 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 1430 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1429 return; 1431 return;
1430 out_err: 1432 out_err:
1431 conn->cb_addr.ss_family = AF_UNSPEC; 1433 conn->cb_addr.ss_family = AF_UNSPEC;
1432 conn->cb_addrlen = 0; 1434 conn->cb_addrlen = 0;
1433 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 1435 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1434 "will not receive delegations\n", 1436 "will not receive delegations\n",
1435 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 1437 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1436 1438
1437 return; 1439 return;
1438 } 1440 }
1439 1441
1440 /* 1442 /*
1441 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. 1443 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1442 */ 1444 */
1443 void 1445 void
1444 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 1446 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1445 { 1447 {
1446 struct nfsd4_slot *slot = resp->cstate.slot; 1448 struct nfsd4_slot *slot = resp->cstate.slot;
1447 unsigned int base; 1449 unsigned int base;
1448 1450
1449 dprintk("--> %s slot %p\n", __func__, slot); 1451 dprintk("--> %s slot %p\n", __func__, slot);
1450 1452
1451 slot->sl_opcnt = resp->opcnt; 1453 slot->sl_opcnt = resp->opcnt;
1452 slot->sl_status = resp->cstate.status; 1454 slot->sl_status = resp->cstate.status;
1453 1455
1454 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 1456 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1455 if (nfsd4_not_cached(resp)) { 1457 if (nfsd4_not_cached(resp)) {
1456 slot->sl_datalen = 0; 1458 slot->sl_datalen = 0;
1457 return; 1459 return;
1458 } 1460 }
1459 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; 1461 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1460 base = (char *)resp->cstate.datap - 1462 base = (char *)resp->cstate.datap -
1461 (char *)resp->xbuf->head[0].iov_base; 1463 (char *)resp->xbuf->head[0].iov_base;
1462 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, 1464 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1463 slot->sl_datalen)) 1465 slot->sl_datalen))
1464 WARN("%s: sessions DRC could not cache compound\n", __func__); 1466 WARN("%s: sessions DRC could not cache compound\n", __func__);
1465 return; 1467 return;
1466 } 1468 }
1467 1469
1468 /* 1470 /*
1469 * Encode the replay sequence operation from the slot values. 1471 * Encode the replay sequence operation from the slot values.
1470 * If cachethis is FALSE encode the uncached rep error on the next 1472 * If cachethis is FALSE encode the uncached rep error on the next
1471 * operation which sets resp->p and increments resp->opcnt for 1473 * operation which sets resp->p and increments resp->opcnt for
1472 * nfs4svc_encode_compoundres. 1474 * nfs4svc_encode_compoundres.
1473 * 1475 *
1474 */ 1476 */
1475 static __be32 1477 static __be32
1476 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 1478 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1477 struct nfsd4_compoundres *resp) 1479 struct nfsd4_compoundres *resp)
1478 { 1480 {
1479 struct nfsd4_op *op; 1481 struct nfsd4_op *op;
1480 struct nfsd4_slot *slot = resp->cstate.slot; 1482 struct nfsd4_slot *slot = resp->cstate.slot;
1481 1483
1482 /* Encode the replayed sequence operation */ 1484 /* Encode the replayed sequence operation */
1483 op = &args->ops[resp->opcnt - 1]; 1485 op = &args->ops[resp->opcnt - 1];
1484 nfsd4_encode_operation(resp, op); 1486 nfsd4_encode_operation(resp, op);
1485 1487
1486 /* Return nfserr_retry_uncached_rep in next operation. */ 1488 /* Return nfserr_retry_uncached_rep in next operation. */
1487 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { 1489 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1488 op = &args->ops[resp->opcnt++]; 1490 op = &args->ops[resp->opcnt++];
1489 op->status = nfserr_retry_uncached_rep; 1491 op->status = nfserr_retry_uncached_rep;
1490 nfsd4_encode_operation(resp, op); 1492 nfsd4_encode_operation(resp, op);
1491 } 1493 }
1492 return op->status; 1494 return op->status;
1493 } 1495 }
1494 1496
1495 /* 1497 /*
1496 * The sequence operation is not cached because we can use the slot and 1498 * The sequence operation is not cached because we can use the slot and
1497 * session values. 1499 * session values.
1498 */ 1500 */
1499 __be32 1501 __be32
1500 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 1502 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1501 struct nfsd4_sequence *seq) 1503 struct nfsd4_sequence *seq)
1502 { 1504 {
1503 struct nfsd4_slot *slot = resp->cstate.slot; 1505 struct nfsd4_slot *slot = resp->cstate.slot;
1504 __be32 status; 1506 __be32 status;
1505 1507
1506 dprintk("--> %s slot %p\n", __func__, slot); 1508 dprintk("--> %s slot %p\n", __func__, slot);
1507 1509
1508 /* Either returns 0 or nfserr_retry_uncached */ 1510 /* Either returns 0 or nfserr_retry_uncached */
1509 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 1511 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1510 if (status == nfserr_retry_uncached_rep) 1512 if (status == nfserr_retry_uncached_rep)
1511 return status; 1513 return status;
1512 1514
1513 /* The sequence operation has been encoded, cstate->datap set. */ 1515 /* The sequence operation has been encoded, cstate->datap set. */
1514 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); 1516 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1515 1517
1516 resp->opcnt = slot->sl_opcnt; 1518 resp->opcnt = slot->sl_opcnt;
1517 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); 1519 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1518 status = slot->sl_status; 1520 status = slot->sl_status;
1519 1521
1520 return status; 1522 return status;
1521 } 1523 }
1522 1524
1523 /* 1525 /*
1524 * Set the exchange_id flags returned by the server. 1526 * Set the exchange_id flags returned by the server.
1525 */ 1527 */
1526 static void 1528 static void
1527 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 1529 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1528 { 1530 {
1529 /* pNFS is not supported */ 1531 /* pNFS is not supported */
1530 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 1532 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1531 1533
1532 /* Referrals are supported, Migration is not. */ 1534 /* Referrals are supported, Migration is not. */
1533 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 1535 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1534 1536
1535 /* set the wire flags to return to client. */ 1537 /* set the wire flags to return to client. */
1536 clid->flags = new->cl_exchange_flags; 1538 clid->flags = new->cl_exchange_flags;
1537 } 1539 }
1538 1540
1539 static bool client_has_state(struct nfs4_client *clp) 1541 static bool client_has_state(struct nfs4_client *clp)
1540 { 1542 {
1541 /* 1543 /*
1542 * Note clp->cl_openowners check isn't quite right: there's no 1544 * Note clp->cl_openowners check isn't quite right: there's no
1543 * need to count owners without stateid's. 1545 * need to count owners without stateid's.
1544 * 1546 *
1545 * Also note we should probably be using this in 4.0 case too. 1547 * Also note we should probably be using this in 4.0 case too.
1546 */ 1548 */
1547 return !list_empty(&clp->cl_openowners) 1549 return !list_empty(&clp->cl_openowners)
1548 || !list_empty(&clp->cl_delegations) 1550 || !list_empty(&clp->cl_delegations)
1549 || !list_empty(&clp->cl_sessions); 1551 || !list_empty(&clp->cl_sessions);
1550 } 1552 }
1551 1553
1552 __be32 1554 __be32
1553 nfsd4_exchange_id(struct svc_rqst *rqstp, 1555 nfsd4_exchange_id(struct svc_rqst *rqstp,
1554 struct nfsd4_compound_state *cstate, 1556 struct nfsd4_compound_state *cstate,
1555 struct nfsd4_exchange_id *exid) 1557 struct nfsd4_exchange_id *exid)
1556 { 1558 {
1557 struct nfs4_client *unconf, *conf, *new; 1559 struct nfs4_client *unconf, *conf, *new;
1558 __be32 status; 1560 __be32 status;
1559 unsigned int strhashval; 1561 unsigned int strhashval;
1560 char dname[HEXDIR_LEN]; 1562 char dname[HEXDIR_LEN];
1561 char addr_str[INET6_ADDRSTRLEN]; 1563 char addr_str[INET6_ADDRSTRLEN];
1562 nfs4_verifier verf = exid->verifier; 1564 nfs4_verifier verf = exid->verifier;
1563 struct sockaddr *sa = svc_addr(rqstp); 1565 struct sockaddr *sa = svc_addr(rqstp);
1564 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 1566 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1565 1567
1566 rpc_ntop(sa, addr_str, sizeof(addr_str)); 1568 rpc_ntop(sa, addr_str, sizeof(addr_str));
1567 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 1569 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1568 "ip_addr=%s flags %x, spa_how %d\n", 1570 "ip_addr=%s flags %x, spa_how %d\n",
1569 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 1571 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1570 addr_str, exid->flags, exid->spa_how); 1572 addr_str, exid->flags, exid->spa_how);
1571 1573
1572 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 1574 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1573 return nfserr_inval; 1575 return nfserr_inval;
1574 1576
1575 /* Currently only support SP4_NONE */ 1577 /* Currently only support SP4_NONE */
1576 switch (exid->spa_how) { 1578 switch (exid->spa_how) {
1577 case SP4_NONE: 1579 case SP4_NONE:
1578 break; 1580 break;
1579 case SP4_SSV: 1581 case SP4_SSV:
1580 return nfserr_serverfault; 1582 return nfserr_serverfault;
1581 default: 1583 default:
1582 BUG(); /* checked by xdr code */ 1584 BUG(); /* checked by xdr code */
1583 case SP4_MACH_CRED: 1585 case SP4_MACH_CRED:
1584 return nfserr_serverfault; /* no excuse :-/ */ 1586 return nfserr_serverfault; /* no excuse :-/ */
1585 } 1587 }
1586 1588
1587 status = nfs4_make_rec_clidname(dname, &exid->clname); 1589 status = nfs4_make_rec_clidname(dname, &exid->clname);
1588 1590
1589 if (status) 1591 if (status)
1590 return status; 1592 return status;
1591 1593
1592 strhashval = clientstr_hashval(dname); 1594 strhashval = clientstr_hashval(dname);
1593 1595
1594 /* Cases below refer to rfc 5661 section 18.35.4: */ 1596 /* Cases below refer to rfc 5661 section 18.35.4: */
1595 nfs4_lock_state(); 1597 nfs4_lock_state();
1596 conf = find_confirmed_client_by_str(dname, strhashval); 1598 conf = find_confirmed_client_by_str(dname, strhashval);
1597 if (conf) { 1599 if (conf) {
1598 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 1600 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1599 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 1601 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1600 1602
1601 if (update) { 1603 if (update) {
1602 if (!clp_used_exchangeid(conf)) { /* buggy client */ 1604 if (!clp_used_exchangeid(conf)) { /* buggy client */
1603 status = nfserr_inval; 1605 status = nfserr_inval;
1604 goto out; 1606 goto out;
1605 } 1607 }
1606 if (!creds_match) { /* case 9 */ 1608 if (!creds_match) { /* case 9 */
1607 status = nfserr_perm; 1609 status = nfserr_perm;
1608 goto out; 1610 goto out;
1609 } 1611 }
1610 if (!verfs_match) { /* case 8 */ 1612 if (!verfs_match) { /* case 8 */
1611 status = nfserr_not_same; 1613 status = nfserr_not_same;
1612 goto out; 1614 goto out;
1613 } 1615 }
1614 /* case 6 */ 1616 /* case 6 */
1615 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 1617 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1616 new = conf; 1618 new = conf;
1617 goto out_copy; 1619 goto out_copy;
1618 } 1620 }
1619 if (!creds_match) { /* case 3 */ 1621 if (!creds_match) { /* case 3 */
1620 if (client_has_state(conf)) { 1622 if (client_has_state(conf)) {
1621 status = nfserr_clid_inuse; 1623 status = nfserr_clid_inuse;
1622 goto out; 1624 goto out;
1623 } 1625 }
1624 expire_client(conf); 1626 expire_client(conf);
1625 goto out_new; 1627 goto out_new;
1626 } 1628 }
1627 if (verfs_match) { /* case 2 */ 1629 if (verfs_match) { /* case 2 */
1628 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 1630 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1629 new = conf; 1631 new = conf;
1630 goto out_copy; 1632 goto out_copy;
1631 } 1633 }
1632 /* case 5, client reboot */ 1634 /* case 5, client reboot */
1633 goto out_new; 1635 goto out_new;
1634 } 1636 }
1635 1637
1636 if (update) { /* case 7 */ 1638 if (update) { /* case 7 */
1637 status = nfserr_noent; 1639 status = nfserr_noent;
1638 goto out; 1640 goto out;
1639 } 1641 }
1640 1642
1641 unconf = find_unconfirmed_client_by_str(dname, strhashval); 1643 unconf = find_unconfirmed_client_by_str(dname, strhashval);
1642 if (unconf) /* case 4, possible retry or client restart */ 1644 if (unconf) /* case 4, possible retry or client restart */
1643 expire_client(unconf); 1645 expire_client(unconf);
1644 1646
1645 /* case 1 (normal case) */ 1647 /* case 1 (normal case) */
1646 out_new: 1648 out_new:
1647 new = create_client(exid->clname, dname, rqstp, &verf); 1649 new = create_client(exid->clname, dname, rqstp, &verf);
1648 if (new == NULL) { 1650 if (new == NULL) {
1649 status = nfserr_jukebox; 1651 status = nfserr_jukebox;
1650 goto out; 1652 goto out;
1651 } 1653 }
1652 1654
1653 gen_clid(new); 1655 gen_clid(new);
1654 add_to_unconfirmed(new, strhashval); 1656 add_to_unconfirmed(new, strhashval);
1655 out_copy: 1657 out_copy:
1656 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1658 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1657 exid->clientid.cl_id = new->cl_clientid.cl_id; 1659 exid->clientid.cl_id = new->cl_clientid.cl_id;
1658 1660
1659 exid->seqid = new->cl_cs_slot.sl_seqid + 1; 1661 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1660 nfsd4_set_ex_flags(new, exid); 1662 nfsd4_set_ex_flags(new, exid);
1661 1663
1662 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1664 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1663 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); 1665 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1664 status = nfs_ok; 1666 status = nfs_ok;
1665 1667
1666 out: 1668 out:
1667 nfs4_unlock_state(); 1669 nfs4_unlock_state();
1668 return status; 1670 return status;
1669 } 1671 }
1670 1672
1671 static __be32 1673 static __be32
1672 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 1674 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1673 { 1675 {
1674 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 1676 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1675 slot_seqid); 1677 slot_seqid);
1676 1678
1677 /* The slot is in use, and no response has been sent. */ 1679 /* The slot is in use, and no response has been sent. */
1678 if (slot_inuse) { 1680 if (slot_inuse) {
1679 if (seqid == slot_seqid) 1681 if (seqid == slot_seqid)
1680 return nfserr_jukebox; 1682 return nfserr_jukebox;
1681 else 1683 else
1682 return nfserr_seq_misordered; 1684 return nfserr_seq_misordered;
1683 } 1685 }
1684 /* Note unsigned 32-bit arithmetic handles wraparound: */ 1686 /* Note unsigned 32-bit arithmetic handles wraparound: */
1685 if (likely(seqid == slot_seqid + 1)) 1687 if (likely(seqid == slot_seqid + 1))
1686 return nfs_ok; 1688 return nfs_ok;
1687 if (seqid == slot_seqid) 1689 if (seqid == slot_seqid)
1688 return nfserr_replay_cache; 1690 return nfserr_replay_cache;
1689 return nfserr_seq_misordered; 1691 return nfserr_seq_misordered;
1690 } 1692 }
1691 1693
1692 /* 1694 /*
1693 * Cache the create session result into the create session single DRC 1695 * Cache the create session result into the create session single DRC
1694 * slot cache by saving the xdr structure. sl_seqid has been set. 1696 * slot cache by saving the xdr structure. sl_seqid has been set.
1695 * Do this for solo or embedded create session operations. 1697 * Do this for solo or embedded create session operations.
1696 */ 1698 */
1697 static void 1699 static void
1698 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 1700 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1699 struct nfsd4_clid_slot *slot, __be32 nfserr) 1701 struct nfsd4_clid_slot *slot, __be32 nfserr)
1700 { 1702 {
1701 slot->sl_status = nfserr; 1703 slot->sl_status = nfserr;
1702 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 1704 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1703 } 1705 }
1704 1706
1705 static __be32 1707 static __be32
1706 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 1708 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1707 struct nfsd4_clid_slot *slot) 1709 struct nfsd4_clid_slot *slot)
1708 { 1710 {
1709 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 1711 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1710 return slot->sl_status; 1712 return slot->sl_status;
1711 } 1713 }
1712 1714
1713 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 1715 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1714 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1716 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1715 1 + /* MIN tag is length with zero, only length */ \ 1717 1 + /* MIN tag is length with zero, only length */ \
1716 3 + /* version, opcount, opcode */ \ 1718 3 + /* version, opcount, opcode */ \
1717 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1719 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1718 /* seqid, slotID, slotID, cache */ \ 1720 /* seqid, slotID, slotID, cache */ \
1719 4 ) * sizeof(__be32)) 1721 4 ) * sizeof(__be32))
1720 1722
1721 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 1723 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1722 2 + /* verifier: AUTH_NULL, length 0 */\ 1724 2 + /* verifier: AUTH_NULL, length 0 */\
1723 1 + /* status */ \ 1725 1 + /* status */ \
1724 1 + /* MIN tag is length with zero, only length */ \ 1726 1 + /* MIN tag is length with zero, only length */ \
1725 3 + /* opcount, opcode, opstatus*/ \ 1727 3 + /* opcount, opcode, opstatus*/ \
1726 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1728 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1727 /* seqid, slotID, slotID, slotID, status */ \ 1729 /* seqid, slotID, slotID, slotID, status */ \
1728 5 ) * sizeof(__be32)) 1730 5 ) * sizeof(__be32))
1729 1731
1730 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) 1732 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1731 { 1733 {
1732 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ 1734 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1733 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; 1735 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1734 } 1736 }
1735 1737
1736 __be32 1738 __be32
1737 nfsd4_create_session(struct svc_rqst *rqstp, 1739 nfsd4_create_session(struct svc_rqst *rqstp,
1738 struct nfsd4_compound_state *cstate, 1740 struct nfsd4_compound_state *cstate,
1739 struct nfsd4_create_session *cr_ses) 1741 struct nfsd4_create_session *cr_ses)
1740 { 1742 {
1741 struct sockaddr *sa = svc_addr(rqstp); 1743 struct sockaddr *sa = svc_addr(rqstp);
1742 struct nfs4_client *conf, *unconf; 1744 struct nfs4_client *conf, *unconf;
1743 struct nfsd4_session *new; 1745 struct nfsd4_session *new;
1744 struct nfsd4_clid_slot *cs_slot = NULL; 1746 struct nfsd4_clid_slot *cs_slot = NULL;
1745 bool confirm_me = false; 1747 bool confirm_me = false;
1746 __be32 status = 0; 1748 __be32 status = 0;
1747 1749
1748 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1750 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1749 return nfserr_inval; 1751 return nfserr_inval;
1750 1752
1751 nfs4_lock_state(); 1753 nfs4_lock_state();
1752 unconf = find_unconfirmed_client(&cr_ses->clientid); 1754 unconf = find_unconfirmed_client(&cr_ses->clientid);
1753 conf = find_confirmed_client(&cr_ses->clientid); 1755 conf = find_confirmed_client(&cr_ses->clientid);
1754 1756
1755 if (conf) { 1757 if (conf) {
1756 cs_slot = &conf->cl_cs_slot; 1758 cs_slot = &conf->cl_cs_slot;
1757 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1759 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1758 if (status == nfserr_replay_cache) { 1760 if (status == nfserr_replay_cache) {
1759 status = nfsd4_replay_create_session(cr_ses, cs_slot); 1761 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1760 goto out; 1762 goto out;
1761 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 1763 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1762 status = nfserr_seq_misordered; 1764 status = nfserr_seq_misordered;
1763 goto out; 1765 goto out;
1764 } 1766 }
1765 } else if (unconf) { 1767 } else if (unconf) {
1766 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 1768 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1767 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 1769 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1768 status = nfserr_clid_inuse; 1770 status = nfserr_clid_inuse;
1769 goto out; 1771 goto out;
1770 } 1772 }
1771 cs_slot = &unconf->cl_cs_slot; 1773 cs_slot = &unconf->cl_cs_slot;
1772 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1774 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1773 if (status) { 1775 if (status) {
1774 /* an unconfirmed replay returns misordered */ 1776 /* an unconfirmed replay returns misordered */
1775 status = nfserr_seq_misordered; 1777 status = nfserr_seq_misordered;
1776 goto out; 1778 goto out;
1777 } 1779 }
1778 confirm_me = true; 1780 confirm_me = true;
1779 conf = unconf; 1781 conf = unconf;
1780 } else { 1782 } else {
1781 status = nfserr_stale_clientid; 1783 status = nfserr_stale_clientid;
1782 goto out; 1784 goto out;
1783 } 1785 }
1784 1786
1785 /* 1787 /*
1786 * XXX: we should probably set this at creation time, and check 1788 * XXX: we should probably set this at creation time, and check
1787 * for consistent minorversion use throughout: 1789 * for consistent minorversion use throughout:
1788 */ 1790 */
1789 conf->cl_minorversion = 1; 1791 conf->cl_minorversion = 1;
1790 /* 1792 /*
1791 * We do not support RDMA or persistent sessions 1793 * We do not support RDMA or persistent sessions
1792 */ 1794 */
1793 cr_ses->flags &= ~SESSION4_PERSIST; 1795 cr_ses->flags &= ~SESSION4_PERSIST;
1794 cr_ses->flags &= ~SESSION4_RDMA; 1796 cr_ses->flags &= ~SESSION4_RDMA;
1795 1797
1796 status = nfserr_toosmall; 1798 status = nfserr_toosmall;
1797 if (check_forechannel_attrs(cr_ses->fore_channel)) 1799 if (check_forechannel_attrs(cr_ses->fore_channel))
1798 goto out; 1800 goto out;
1799 1801
1800 status = nfserr_jukebox; 1802 status = nfserr_jukebox;
1801 new = alloc_init_session(rqstp, conf, cr_ses); 1803 new = alloc_init_session(rqstp, conf, cr_ses);
1802 if (!new) 1804 if (!new)
1803 goto out; 1805 goto out;
1804 status = nfs_ok; 1806 status = nfs_ok;
1805 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 1807 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1806 NFS4_MAX_SESSIONID_LEN); 1808 NFS4_MAX_SESSIONID_LEN);
1807 memcpy(&cr_ses->fore_channel, &new->se_fchannel, 1809 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1808 sizeof(struct nfsd4_channel_attrs)); 1810 sizeof(struct nfsd4_channel_attrs));
1809 cs_slot->sl_seqid++; 1811 cs_slot->sl_seqid++;
1810 cr_ses->seqid = cs_slot->sl_seqid; 1812 cr_ses->seqid = cs_slot->sl_seqid;
1811 1813
1812 /* cache solo and embedded create sessions under the state lock */ 1814 /* cache solo and embedded create sessions under the state lock */
1813 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1815 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1814 if (confirm_me) { 1816 if (confirm_me) {
1815 unsigned int hash = clientstr_hashval(unconf->cl_recdir); 1817 unsigned int hash = clientstr_hashval(unconf->cl_recdir);
1816 struct nfs4_client *old = 1818 struct nfs4_client *old =
1817 find_confirmed_client_by_str(conf->cl_recdir, hash); 1819 find_confirmed_client_by_str(conf->cl_recdir, hash);
1818 if (old) 1820 if (old)
1819 expire_client(old); 1821 expire_client(old);
1820 move_to_confirmed(conf); 1822 move_to_confirmed(conf);
1821 } 1823 }
1822 out: 1824 out:
1823 nfs4_unlock_state(); 1825 nfs4_unlock_state();
1824 dprintk("%s returns %d\n", __func__, ntohl(status)); 1826 dprintk("%s returns %d\n", __func__, ntohl(status));
1825 return status; 1827 return status;
1826 } 1828 }
1827 1829
1828 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) 1830 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1829 { 1831 {
1830 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1832 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1831 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 1833 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1832 1834
1833 return argp->opcnt == resp->opcnt; 1835 return argp->opcnt == resp->opcnt;
1834 } 1836 }
1835 1837
1836 static __be32 nfsd4_map_bcts_dir(u32 *dir) 1838 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1837 { 1839 {
1838 switch (*dir) { 1840 switch (*dir) {
1839 case NFS4_CDFC4_FORE: 1841 case NFS4_CDFC4_FORE:
1840 case NFS4_CDFC4_BACK: 1842 case NFS4_CDFC4_BACK:
1841 return nfs_ok; 1843 return nfs_ok;
1842 case NFS4_CDFC4_FORE_OR_BOTH: 1844 case NFS4_CDFC4_FORE_OR_BOTH:
1843 case NFS4_CDFC4_BACK_OR_BOTH: 1845 case NFS4_CDFC4_BACK_OR_BOTH:
1844 *dir = NFS4_CDFC4_BOTH; 1846 *dir = NFS4_CDFC4_BOTH;
1845 return nfs_ok; 1847 return nfs_ok;
1846 }; 1848 };
1847 return nfserr_inval; 1849 return nfserr_inval;
1848 } 1850 }
1849 1851
1850 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 1852 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1851 struct nfsd4_compound_state *cstate, 1853 struct nfsd4_compound_state *cstate,
1852 struct nfsd4_bind_conn_to_session *bcts) 1854 struct nfsd4_bind_conn_to_session *bcts)
1853 { 1855 {
1854 __be32 status; 1856 __be32 status;
1855 1857
1856 if (!nfsd4_last_compound_op(rqstp)) 1858 if (!nfsd4_last_compound_op(rqstp))
1857 return nfserr_not_only_op; 1859 return nfserr_not_only_op;
1858 spin_lock(&client_lock); 1860 spin_lock(&client_lock);
1859 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); 1861 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1860 /* Sorta weird: we only need the refcnt'ing because new_conn acquires 1862 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1861 * client_lock iself: */ 1863 * client_lock iself: */
1862 if (cstate->session) { 1864 if (cstate->session) {
1863 nfsd4_get_session(cstate->session); 1865 nfsd4_get_session(cstate->session);
1864 atomic_inc(&cstate->session->se_client->cl_refcount); 1866 atomic_inc(&cstate->session->se_client->cl_refcount);
1865 } 1867 }
1866 spin_unlock(&client_lock); 1868 spin_unlock(&client_lock);
1867 if (!cstate->session) 1869 if (!cstate->session)
1868 return nfserr_badsession; 1870 return nfserr_badsession;
1869 1871
1870 status = nfsd4_map_bcts_dir(&bcts->dir); 1872 status = nfsd4_map_bcts_dir(&bcts->dir);
1871 if (!status) 1873 if (!status)
1872 nfsd4_new_conn(rqstp, cstate->session, bcts->dir); 1874 nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
1873 return status; 1875 return status;
1874 } 1876 }
1875 1877
1876 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 1878 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1877 { 1879 {
1878 if (!session) 1880 if (!session)
1879 return 0; 1881 return 0;
1880 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 1882 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1881 } 1883 }
1882 1884
1883 __be32 1885 __be32
1884 nfsd4_destroy_session(struct svc_rqst *r, 1886 nfsd4_destroy_session(struct svc_rqst *r,
1885 struct nfsd4_compound_state *cstate, 1887 struct nfsd4_compound_state *cstate,
1886 struct nfsd4_destroy_session *sessionid) 1888 struct nfsd4_destroy_session *sessionid)
1887 { 1889 {
1888 struct nfsd4_session *ses; 1890 struct nfsd4_session *ses;
1889 __be32 status = nfserr_badsession; 1891 __be32 status = nfserr_badsession;
1890 1892
1891 /* Notes: 1893 /* Notes:
1892 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid 1894 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1893 * - Should we return nfserr_back_chan_busy if waiting for 1895 * - Should we return nfserr_back_chan_busy if waiting for
1894 * callbacks on to-be-destroyed session? 1896 * callbacks on to-be-destroyed session?
1895 * - Do we need to clear any callback info from previous session? 1897 * - Do we need to clear any callback info from previous session?
1896 */ 1898 */
1897 1899
1898 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 1900 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1899 if (!nfsd4_last_compound_op(r)) 1901 if (!nfsd4_last_compound_op(r))
1900 return nfserr_not_only_op; 1902 return nfserr_not_only_op;
1901 } 1903 }
1902 dump_sessionid(__func__, &sessionid->sessionid); 1904 dump_sessionid(__func__, &sessionid->sessionid);
1903 spin_lock(&client_lock); 1905 spin_lock(&client_lock);
1904 ses = find_in_sessionid_hashtbl(&sessionid->sessionid); 1906 ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1905 if (!ses) { 1907 if (!ses) {
1906 spin_unlock(&client_lock); 1908 spin_unlock(&client_lock);
1907 goto out; 1909 goto out;
1908 } 1910 }
1909 1911
1910 unhash_session(ses); 1912 unhash_session(ses);
1911 spin_unlock(&client_lock); 1913 spin_unlock(&client_lock);
1912 1914
1913 nfs4_lock_state(); 1915 nfs4_lock_state();
1914 nfsd4_probe_callback_sync(ses->se_client); 1916 nfsd4_probe_callback_sync(ses->se_client);
1915 nfs4_unlock_state(); 1917 nfs4_unlock_state();
1916 1918
1917 spin_lock(&client_lock); 1919 spin_lock(&client_lock);
1918 nfsd4_del_conns(ses); 1920 nfsd4_del_conns(ses);
1919 nfsd4_put_session_locked(ses); 1921 nfsd4_put_session_locked(ses);
1920 spin_unlock(&client_lock); 1922 spin_unlock(&client_lock);
1921 status = nfs_ok; 1923 status = nfs_ok;
1922 out: 1924 out:
1923 dprintk("%s returns %d\n", __func__, ntohl(status)); 1925 dprintk("%s returns %d\n", __func__, ntohl(status));
1924 return status; 1926 return status;
1925 } 1927 }
1926 1928
1927 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 1929 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1928 { 1930 {
1929 struct nfsd4_conn *c; 1931 struct nfsd4_conn *c;
1930 1932
1931 list_for_each_entry(c, &s->se_conns, cn_persession) { 1933 list_for_each_entry(c, &s->se_conns, cn_persession) {
1932 if (c->cn_xprt == xpt) { 1934 if (c->cn_xprt == xpt) {
1933 return c; 1935 return c;
1934 } 1936 }
1935 } 1937 }
1936 return NULL; 1938 return NULL;
1937 } 1939 }
1938 1940
1939 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 1941 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1940 { 1942 {
1941 struct nfs4_client *clp = ses->se_client; 1943 struct nfs4_client *clp = ses->se_client;
1942 struct nfsd4_conn *c; 1944 struct nfsd4_conn *c;
1943 int ret; 1945 int ret;
1944 1946
1945 spin_lock(&clp->cl_lock); 1947 spin_lock(&clp->cl_lock);
1946 c = __nfsd4_find_conn(new->cn_xprt, ses); 1948 c = __nfsd4_find_conn(new->cn_xprt, ses);
1947 if (c) { 1949 if (c) {
1948 spin_unlock(&clp->cl_lock); 1950 spin_unlock(&clp->cl_lock);
1949 free_conn(new); 1951 free_conn(new);
1950 return; 1952 return;
1951 } 1953 }
1952 __nfsd4_hash_conn(new, ses); 1954 __nfsd4_hash_conn(new, ses);
1953 spin_unlock(&clp->cl_lock); 1955 spin_unlock(&clp->cl_lock);
1954 ret = nfsd4_register_conn(new); 1956 ret = nfsd4_register_conn(new);
1955 if (ret) 1957 if (ret)
1956 /* oops; xprt is already down: */ 1958 /* oops; xprt is already down: */
1957 nfsd4_conn_lost(&new->cn_xpt_user); 1959 nfsd4_conn_lost(&new->cn_xpt_user);
1958 return; 1960 return;
1959 } 1961 }
1960 1962
1961 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 1963 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1962 { 1964 {
1963 struct nfsd4_compoundargs *args = rqstp->rq_argp; 1965 struct nfsd4_compoundargs *args = rqstp->rq_argp;
1964 1966
1965 return args->opcnt > session->se_fchannel.maxops; 1967 return args->opcnt > session->se_fchannel.maxops;
1966 } 1968 }
1967 1969
1968 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 1970 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1969 struct nfsd4_session *session) 1971 struct nfsd4_session *session)
1970 { 1972 {
1971 struct xdr_buf *xb = &rqstp->rq_arg; 1973 struct xdr_buf *xb = &rqstp->rq_arg;
1972 1974
1973 return xb->len > session->se_fchannel.maxreq_sz; 1975 return xb->len > session->se_fchannel.maxreq_sz;
1974 } 1976 }
1975 1977
1976 __be32 1978 __be32
1977 nfsd4_sequence(struct svc_rqst *rqstp, 1979 nfsd4_sequence(struct svc_rqst *rqstp,
1978 struct nfsd4_compound_state *cstate, 1980 struct nfsd4_compound_state *cstate,
1979 struct nfsd4_sequence *seq) 1981 struct nfsd4_sequence *seq)
1980 { 1982 {
1981 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1983 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1982 struct nfsd4_session *session; 1984 struct nfsd4_session *session;
1983 struct nfsd4_slot *slot; 1985 struct nfsd4_slot *slot;
1984 struct nfsd4_conn *conn; 1986 struct nfsd4_conn *conn;
1985 __be32 status; 1987 __be32 status;
1986 1988
1987 if (resp->opcnt != 1) 1989 if (resp->opcnt != 1)
1988 return nfserr_sequence_pos; 1990 return nfserr_sequence_pos;
1989 1991
1990 /* 1992 /*
1991 * Will be either used or freed by nfsd4_sequence_check_conn 1993 * Will be either used or freed by nfsd4_sequence_check_conn
1992 * below. 1994 * below.
1993 */ 1995 */
1994 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 1996 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1995 if (!conn) 1997 if (!conn)
1996 return nfserr_jukebox; 1998 return nfserr_jukebox;
1997 1999
1998 spin_lock(&client_lock); 2000 spin_lock(&client_lock);
1999 status = nfserr_badsession; 2001 status = nfserr_badsession;
2000 session = find_in_sessionid_hashtbl(&seq->sessionid); 2002 session = find_in_sessionid_hashtbl(&seq->sessionid);
2001 if (!session) 2003 if (!session)
2002 goto out; 2004 goto out;
2003 2005
2004 status = nfserr_too_many_ops; 2006 status = nfserr_too_many_ops;
2005 if (nfsd4_session_too_many_ops(rqstp, session)) 2007 if (nfsd4_session_too_many_ops(rqstp, session))
2006 goto out; 2008 goto out;
2007 2009
2008 status = nfserr_req_too_big; 2010 status = nfserr_req_too_big;
2009 if (nfsd4_request_too_big(rqstp, session)) 2011 if (nfsd4_request_too_big(rqstp, session))
2010 goto out; 2012 goto out;
2011 2013
2012 status = nfserr_badslot; 2014 status = nfserr_badslot;
2013 if (seq->slotid >= session->se_fchannel.maxreqs) 2015 if (seq->slotid >= session->se_fchannel.maxreqs)
2014 goto out; 2016 goto out;
2015 2017
2016 slot = session->se_slots[seq->slotid]; 2018 slot = session->se_slots[seq->slotid];
2017 dprintk("%s: slotid %d\n", __func__, seq->slotid); 2019 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2018 2020
2019 /* We do not negotiate the number of slots yet, so set the 2021 /* We do not negotiate the number of slots yet, so set the
2020 * maxslots to the session maxreqs which is used to encode 2022 * maxslots to the session maxreqs which is used to encode
2021 * sr_highest_slotid and the sr_target_slot id to maxslots */ 2023 * sr_highest_slotid and the sr_target_slot id to maxslots */
2022 seq->maxslots = session->se_fchannel.maxreqs; 2024 seq->maxslots = session->se_fchannel.maxreqs;
2023 2025
2024 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 2026 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2025 slot->sl_flags & NFSD4_SLOT_INUSE); 2027 slot->sl_flags & NFSD4_SLOT_INUSE);
2026 if (status == nfserr_replay_cache) { 2028 if (status == nfserr_replay_cache) {
2027 status = nfserr_seq_misordered; 2029 status = nfserr_seq_misordered;
2028 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 2030 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2029 goto out; 2031 goto out;
2030 cstate->slot = slot; 2032 cstate->slot = slot;
2031 cstate->session = session; 2033 cstate->session = session;
2032 /* Return the cached reply status and set cstate->status 2034 /* Return the cached reply status and set cstate->status
2033 * for nfsd4_proc_compound processing */ 2035 * for nfsd4_proc_compound processing */
2034 status = nfsd4_replay_cache_entry(resp, seq); 2036 status = nfsd4_replay_cache_entry(resp, seq);
2035 cstate->status = nfserr_replay_cache; 2037 cstate->status = nfserr_replay_cache;
2036 goto out; 2038 goto out;
2037 } 2039 }
2038 if (status) 2040 if (status)
2039 goto out; 2041 goto out;
2040 2042
2041 nfsd4_sequence_check_conn(conn, session); 2043 nfsd4_sequence_check_conn(conn, session);
2042 conn = NULL; 2044 conn = NULL;
2043 2045
2044 /* Success! bump slot seqid */ 2046 /* Success! bump slot seqid */
2045 slot->sl_seqid = seq->seqid; 2047 slot->sl_seqid = seq->seqid;
2046 slot->sl_flags |= NFSD4_SLOT_INUSE; 2048 slot->sl_flags |= NFSD4_SLOT_INUSE;
2047 if (seq->cachethis) 2049 if (seq->cachethis)
2048 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 2050 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2049 else 2051 else
2050 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 2052 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2051 2053
2052 cstate->slot = slot; 2054 cstate->slot = slot;
2053 cstate->session = session; 2055 cstate->session = session;
2054 2056
2055 out: 2057 out:
2056 /* Hold a session reference until done processing the compound. */ 2058 /* Hold a session reference until done processing the compound. */
2057 if (cstate->session) { 2059 if (cstate->session) {
2058 struct nfs4_client *clp = session->se_client; 2060 struct nfs4_client *clp = session->se_client;
2059 2061
2060 nfsd4_get_session(cstate->session); 2062 nfsd4_get_session(cstate->session);
2061 atomic_inc(&clp->cl_refcount); 2063 atomic_inc(&clp->cl_refcount);
2062 switch (clp->cl_cb_state) { 2064 switch (clp->cl_cb_state) {
2063 case NFSD4_CB_DOWN: 2065 case NFSD4_CB_DOWN:
2064 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 2066 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2065 break; 2067 break;
2066 case NFSD4_CB_FAULT: 2068 case NFSD4_CB_FAULT:
2067 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 2069 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2068 break; 2070 break;
2069 default: 2071 default:
2070 seq->status_flags = 0; 2072 seq->status_flags = 0;
2071 } 2073 }
2072 } 2074 }
2073 kfree(conn); 2075 kfree(conn);
2074 spin_unlock(&client_lock); 2076 spin_unlock(&client_lock);
2075 dprintk("%s: return %d\n", __func__, ntohl(status)); 2077 dprintk("%s: return %d\n", __func__, ntohl(status));
2076 return status; 2078 return status;
2077 } 2079 }
2078 2080
2079 __be32 2081 __be32
2080 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2082 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2081 { 2083 {
2082 struct nfs4_client *conf, *unconf, *clp; 2084 struct nfs4_client *conf, *unconf, *clp;
2083 __be32 status = 0; 2085 __be32 status = 0;
2084 2086
2085 nfs4_lock_state(); 2087 nfs4_lock_state();
2086 unconf = find_unconfirmed_client(&dc->clientid); 2088 unconf = find_unconfirmed_client(&dc->clientid);
2087 conf = find_confirmed_client(&dc->clientid); 2089 conf = find_confirmed_client(&dc->clientid);
2088 2090
2089 if (conf) { 2091 if (conf) {
2090 clp = conf; 2092 clp = conf;
2091 2093
2092 if (!is_client_expired(conf) && client_has_state(conf)) { 2094 if (!is_client_expired(conf) && client_has_state(conf)) {
2093 status = nfserr_clientid_busy; 2095 status = nfserr_clientid_busy;
2094 goto out; 2096 goto out;
2095 } 2097 }
2096 2098
2097 /* rfc5661 18.50.3 */ 2099 /* rfc5661 18.50.3 */
2098 if (cstate->session && conf == cstate->session->se_client) { 2100 if (cstate->session && conf == cstate->session->se_client) {
2099 status = nfserr_clientid_busy; 2101 status = nfserr_clientid_busy;
2100 goto out; 2102 goto out;
2101 } 2103 }
2102 } else if (unconf) 2104 } else if (unconf)
2103 clp = unconf; 2105 clp = unconf;
2104 else { 2106 else {
2105 status = nfserr_stale_clientid; 2107 status = nfserr_stale_clientid;
2106 goto out; 2108 goto out;
2107 } 2109 }
2108 2110
2109 expire_client(clp); 2111 expire_client(clp);
2110 out: 2112 out:
2111 nfs4_unlock_state(); 2113 nfs4_unlock_state();
2112 dprintk("%s return %d\n", __func__, ntohl(status)); 2114 dprintk("%s return %d\n", __func__, ntohl(status));
2113 return status; 2115 return status;
2114 } 2116 }
2115 2117
2116 __be32 2118 __be32
2117 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 2119 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2118 { 2120 {
2119 __be32 status = 0; 2121 __be32 status = 0;
2120 2122
2121 if (rc->rca_one_fs) { 2123 if (rc->rca_one_fs) {
2122 if (!cstate->current_fh.fh_dentry) 2124 if (!cstate->current_fh.fh_dentry)
2123 return nfserr_nofilehandle; 2125 return nfserr_nofilehandle;
2124 /* 2126 /*
2125 * We don't take advantage of the rca_one_fs case. 2127 * We don't take advantage of the rca_one_fs case.
2126 * That's OK, it's optional, we can safely ignore it. 2128 * That's OK, it's optional, we can safely ignore it.
2127 */ 2129 */
2128 return nfs_ok; 2130 return nfs_ok;
2129 } 2131 }
2130 2132
2131 nfs4_lock_state(); 2133 nfs4_lock_state();
2132 status = nfserr_complete_already; 2134 status = nfserr_complete_already;
2133 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 2135 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2134 &cstate->session->se_client->cl_flags)) 2136 &cstate->session->se_client->cl_flags))
2135 goto out; 2137 goto out;
2136 2138
2137 status = nfserr_stale_clientid; 2139 status = nfserr_stale_clientid;
2138 if (is_client_expired(cstate->session->se_client)) 2140 if (is_client_expired(cstate->session->se_client))
2139 /* 2141 /*
2140 * The following error isn't really legal. 2142 * The following error isn't really legal.
2141 * But we only get here if the client just explicitly 2143 * But we only get here if the client just explicitly
2142 * destroyed the client. Surely it no longer cares what 2144 * destroyed the client. Surely it no longer cares what
2143 * error it gets back on an operation for the dead 2145 * error it gets back on an operation for the dead
2144 * client. 2146 * client.
2145 */ 2147 */
2146 goto out; 2148 goto out;
2147 2149
2148 status = nfs_ok; 2150 status = nfs_ok;
2149 nfsd4_client_record_create(cstate->session->se_client); 2151 nfsd4_client_record_create(cstate->session->se_client);
2150 out: 2152 out:
2151 nfs4_unlock_state(); 2153 nfs4_unlock_state();
2152 return status; 2154 return status;
2153 } 2155 }
2154 2156
2155 __be32 2157 __be32
2156 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2158 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2157 struct nfsd4_setclientid *setclid) 2159 struct nfsd4_setclientid *setclid)
2158 { 2160 {
2159 struct xdr_netobj clname = setclid->se_name; 2161 struct xdr_netobj clname = setclid->se_name;
2160 nfs4_verifier clverifier = setclid->se_verf; 2162 nfs4_verifier clverifier = setclid->se_verf;
2161 unsigned int strhashval; 2163 unsigned int strhashval;
2162 struct nfs4_client *conf, *unconf, *new; 2164 struct nfs4_client *conf, *unconf, *new;
2163 __be32 status; 2165 __be32 status;
2164 char dname[HEXDIR_LEN]; 2166 char dname[HEXDIR_LEN];
2165 2167
2166 status = nfs4_make_rec_clidname(dname, &clname); 2168 status = nfs4_make_rec_clidname(dname, &clname);
2167 if (status) 2169 if (status)
2168 return status; 2170 return status;
2169 2171
2170 strhashval = clientstr_hashval(dname); 2172 strhashval = clientstr_hashval(dname);
2171 2173
2172 /* Cases below refer to rfc 3530 section 14.2.33: */ 2174 /* Cases below refer to rfc 3530 section 14.2.33: */
2173 nfs4_lock_state(); 2175 nfs4_lock_state();
2174 conf = find_confirmed_client_by_str(dname, strhashval); 2176 conf = find_confirmed_client_by_str(dname, strhashval);
2175 if (conf) { 2177 if (conf) {
2176 /* case 0: */ 2178 /* case 0: */
2177 status = nfserr_clid_inuse; 2179 status = nfserr_clid_inuse;
2178 if (clp_used_exchangeid(conf)) 2180 if (clp_used_exchangeid(conf))
2179 goto out; 2181 goto out;
2180 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 2182 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2181 char addr_str[INET6_ADDRSTRLEN]; 2183 char addr_str[INET6_ADDRSTRLEN];
2182 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 2184 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2183 sizeof(addr_str)); 2185 sizeof(addr_str));
2184 dprintk("NFSD: setclientid: string in use by client " 2186 dprintk("NFSD: setclientid: string in use by client "
2185 "at %s\n", addr_str); 2187 "at %s\n", addr_str);
2186 goto out; 2188 goto out;
2187 } 2189 }
2188 } 2190 }
2189 unconf = find_unconfirmed_client_by_str(dname, strhashval); 2191 unconf = find_unconfirmed_client_by_str(dname, strhashval);
2190 if (unconf) 2192 if (unconf)
2191 expire_client(unconf); 2193 expire_client(unconf);
2192 status = nfserr_jukebox; 2194 status = nfserr_jukebox;
2193 new = create_client(clname, dname, rqstp, &clverifier); 2195 new = create_client(clname, dname, rqstp, &clverifier);
2194 if (new == NULL) 2196 if (new == NULL)
2195 goto out; 2197 goto out;
2196 if (conf && same_verf(&conf->cl_verifier, &clverifier)) 2198 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2197 /* case 1: probable callback update */ 2199 /* case 1: probable callback update */
2198 copy_clid(new, conf); 2200 copy_clid(new, conf);
2199 else /* case 4 (new client) or cases 2, 3 (client reboot): */ 2201 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2200 gen_clid(new); 2202 gen_clid(new);
2201 /* 2203 /*
2202 * XXX: we should probably set this at creation time, and check 2204 * XXX: we should probably set this at creation time, and check
2203 * for consistent minorversion use throughout: 2205 * for consistent minorversion use throughout:
2204 */ 2206 */
2205 new->cl_minorversion = 0; 2207 new->cl_minorversion = 0;
2206 gen_callback(new, setclid, rqstp); 2208 gen_callback(new, setclid, rqstp);
2207 add_to_unconfirmed(new, strhashval); 2209 add_to_unconfirmed(new, strhashval);
2208 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 2210 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2209 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 2211 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2210 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 2212 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2211 status = nfs_ok; 2213 status = nfs_ok;
2212 out: 2214 out:
2213 nfs4_unlock_state(); 2215 nfs4_unlock_state();
2214 return status; 2216 return status;
2215 } 2217 }
2216 2218
2217 2219
2218 __be32 2220 __be32
2219 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 2221 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2220 struct nfsd4_compound_state *cstate, 2222 struct nfsd4_compound_state *cstate,
2221 struct nfsd4_setclientid_confirm *setclientid_confirm) 2223 struct nfsd4_setclientid_confirm *setclientid_confirm)
2222 { 2224 {
2223 struct nfs4_client *conf, *unconf; 2225 struct nfs4_client *conf, *unconf;
2224 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 2226 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2225 clientid_t * clid = &setclientid_confirm->sc_clientid; 2227 clientid_t * clid = &setclientid_confirm->sc_clientid;
2226 __be32 status; 2228 __be32 status;
2227 2229
2228 if (STALE_CLIENTID(clid)) 2230 if (STALE_CLIENTID(clid))
2229 return nfserr_stale_clientid; 2231 return nfserr_stale_clientid;
2230 nfs4_lock_state(); 2232 nfs4_lock_state();
2231 2233
2232 conf = find_confirmed_client(clid); 2234 conf = find_confirmed_client(clid);
2233 unconf = find_unconfirmed_client(clid); 2235 unconf = find_unconfirmed_client(clid);
2234 /* 2236 /*
2235 * We try hard to give out unique clientid's, so if we get an 2237 * We try hard to give out unique clientid's, so if we get an
2236 * attempt to confirm the same clientid with a different cred, 2238 * attempt to confirm the same clientid with a different cred,
2237 * there's a bug somewhere. Let's charitably assume it's our 2239 * there's a bug somewhere. Let's charitably assume it's our
2238 * bug. 2240 * bug.
2239 */ 2241 */
2240 status = nfserr_serverfault; 2242 status = nfserr_serverfault;
2241 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 2243 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2242 goto out; 2244 goto out;
2243 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 2245 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2244 goto out; 2246 goto out;
2245 /* cases below refer to rfc 3530 section 14.2.34: */ 2247 /* cases below refer to rfc 3530 section 14.2.34: */
2246 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 2248 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2247 if (conf && !unconf) /* case 2: probable retransmit */ 2249 if (conf && !unconf) /* case 2: probable retransmit */
2248 status = nfs_ok; 2250 status = nfs_ok;
2249 else /* case 4: client hasn't noticed we rebooted yet? */ 2251 else /* case 4: client hasn't noticed we rebooted yet? */
2250 status = nfserr_stale_clientid; 2252 status = nfserr_stale_clientid;
2251 goto out; 2253 goto out;
2252 } 2254 }
2253 status = nfs_ok; 2255 status = nfs_ok;
2254 if (conf) { /* case 1: callback update */ 2256 if (conf) { /* case 1: callback update */
2255 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 2257 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2256 nfsd4_probe_callback(conf); 2258 nfsd4_probe_callback(conf);
2257 expire_client(unconf); 2259 expire_client(unconf);
2258 } else { /* case 3: normal case; new or rebooted client */ 2260 } else { /* case 3: normal case; new or rebooted client */
2259 unsigned int hash = clientstr_hashval(unconf->cl_recdir); 2261 unsigned int hash = clientstr_hashval(unconf->cl_recdir);
2260 2262
2261 conf = find_confirmed_client_by_str(unconf->cl_recdir, hash); 2263 conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
2262 if (conf) { 2264 if (conf) {
2263 nfsd4_client_record_remove(conf); 2265 nfsd4_client_record_remove(conf);
2264 expire_client(conf); 2266 expire_client(conf);
2265 } 2267 }
2266 move_to_confirmed(unconf); 2268 move_to_confirmed(unconf);
2267 nfsd4_probe_callback(unconf); 2269 nfsd4_probe_callback(unconf);
2268 } 2270 }
2269 out: 2271 out:
2270 nfs4_unlock_state(); 2272 nfs4_unlock_state();
2271 return status; 2273 return status;
2272 } 2274 }
2273 2275
2274 static struct nfs4_file *nfsd4_alloc_file(void) 2276 static struct nfs4_file *nfsd4_alloc_file(void)
2275 { 2277 {
2276 return kmem_cache_alloc(file_slab, GFP_KERNEL); 2278 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2277 } 2279 }
2278 2280
2279 /* OPEN Share state helper functions */ 2281 /* OPEN Share state helper functions */
2280 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) 2282 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2281 { 2283 {
2282 unsigned int hashval = file_hashval(ino); 2284 unsigned int hashval = file_hashval(ino);
2283 2285
2284 atomic_set(&fp->fi_ref, 1); 2286 atomic_set(&fp->fi_ref, 1);
2285 INIT_LIST_HEAD(&fp->fi_hash); 2287 INIT_LIST_HEAD(&fp->fi_hash);
2286 INIT_LIST_HEAD(&fp->fi_stateids); 2288 INIT_LIST_HEAD(&fp->fi_stateids);
2287 INIT_LIST_HEAD(&fp->fi_delegations); 2289 INIT_LIST_HEAD(&fp->fi_delegations);
2288 fp->fi_inode = igrab(ino); 2290 fp->fi_inode = igrab(ino);
2289 fp->fi_had_conflict = false; 2291 fp->fi_had_conflict = false;
2290 fp->fi_lease = NULL; 2292 fp->fi_lease = NULL;
2291 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2293 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2292 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2294 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2293 spin_lock(&recall_lock); 2295 spin_lock(&recall_lock);
2294 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 2296 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2295 spin_unlock(&recall_lock); 2297 spin_unlock(&recall_lock);
2296 } 2298 }
2297 2299
2298 static void 2300 static void
2299 nfsd4_free_slab(struct kmem_cache **slab) 2301 nfsd4_free_slab(struct kmem_cache **slab)
2300 { 2302 {
2301 if (*slab == NULL) 2303 if (*slab == NULL)
2302 return; 2304 return;
2303 kmem_cache_destroy(*slab); 2305 kmem_cache_destroy(*slab);
2304 *slab = NULL; 2306 *slab = NULL;
2305 } 2307 }
2306 2308
2307 void 2309 void
2308 nfsd4_free_slabs(void) 2310 nfsd4_free_slabs(void)
2309 { 2311 {
2310 nfsd4_free_slab(&openowner_slab); 2312 nfsd4_free_slab(&openowner_slab);
2311 nfsd4_free_slab(&lockowner_slab); 2313 nfsd4_free_slab(&lockowner_slab);
2312 nfsd4_free_slab(&file_slab); 2314 nfsd4_free_slab(&file_slab);
2313 nfsd4_free_slab(&stateid_slab); 2315 nfsd4_free_slab(&stateid_slab);
2314 nfsd4_free_slab(&deleg_slab); 2316 nfsd4_free_slab(&deleg_slab);
2315 } 2317 }
2316 2318
2317 int 2319 int
2318 nfsd4_init_slabs(void) 2320 nfsd4_init_slabs(void)
2319 { 2321 {
2320 openowner_slab = kmem_cache_create("nfsd4_openowners", 2322 openowner_slab = kmem_cache_create("nfsd4_openowners",
2321 sizeof(struct nfs4_openowner), 0, 0, NULL); 2323 sizeof(struct nfs4_openowner), 0, 0, NULL);
2322 if (openowner_slab == NULL) 2324 if (openowner_slab == NULL)
2323 goto out_nomem; 2325 goto out_nomem;
2324 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 2326 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2325 sizeof(struct nfs4_openowner), 0, 0, NULL); 2327 sizeof(struct nfs4_openowner), 0, 0, NULL);
2326 if (lockowner_slab == NULL) 2328 if (lockowner_slab == NULL)
2327 goto out_nomem; 2329 goto out_nomem;
2328 file_slab = kmem_cache_create("nfsd4_files", 2330 file_slab = kmem_cache_create("nfsd4_files",
2329 sizeof(struct nfs4_file), 0, 0, NULL); 2331 sizeof(struct nfs4_file), 0, 0, NULL);
2330 if (file_slab == NULL) 2332 if (file_slab == NULL)
2331 goto out_nomem; 2333 goto out_nomem;
2332 stateid_slab = kmem_cache_create("nfsd4_stateids", 2334 stateid_slab = kmem_cache_create("nfsd4_stateids",
2333 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 2335 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2334 if (stateid_slab == NULL) 2336 if (stateid_slab == NULL)
2335 goto out_nomem; 2337 goto out_nomem;
2336 deleg_slab = kmem_cache_create("nfsd4_delegations", 2338 deleg_slab = kmem_cache_create("nfsd4_delegations",
2337 sizeof(struct nfs4_delegation), 0, 0, NULL); 2339 sizeof(struct nfs4_delegation), 0, 0, NULL);
2338 if (deleg_slab == NULL) 2340 if (deleg_slab == NULL)
2339 goto out_nomem; 2341 goto out_nomem;
2340 return 0; 2342 return 0;
2341 out_nomem: 2343 out_nomem:
2342 nfsd4_free_slabs(); 2344 nfsd4_free_slabs();
2343 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 2345 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2344 return -ENOMEM; 2346 return -ENOMEM;
2345 } 2347 }
2346 2348
2347 void nfs4_free_openowner(struct nfs4_openowner *oo) 2349 void nfs4_free_openowner(struct nfs4_openowner *oo)
2348 { 2350 {
2349 kfree(oo->oo_owner.so_owner.data); 2351 kfree(oo->oo_owner.so_owner.data);
2350 kmem_cache_free(openowner_slab, oo); 2352 kmem_cache_free(openowner_slab, oo);
2351 } 2353 }
2352 2354
2353 void nfs4_free_lockowner(struct nfs4_lockowner *lo) 2355 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2354 { 2356 {
2355 kfree(lo->lo_owner.so_owner.data); 2357 kfree(lo->lo_owner.so_owner.data);
2356 kmem_cache_free(lockowner_slab, lo); 2358 kmem_cache_free(lockowner_slab, lo);
2357 } 2359 }
2358 2360
2359 static void init_nfs4_replay(struct nfs4_replay *rp) 2361 static void init_nfs4_replay(struct nfs4_replay *rp)
2360 { 2362 {
2361 rp->rp_status = nfserr_serverfault; 2363 rp->rp_status = nfserr_serverfault;
2362 rp->rp_buflen = 0; 2364 rp->rp_buflen = 0;
2363 rp->rp_buf = rp->rp_ibuf; 2365 rp->rp_buf = rp->rp_ibuf;
2364 } 2366 }
2365 2367
2366 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 2368 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2367 { 2369 {
2368 struct nfs4_stateowner *sop; 2370 struct nfs4_stateowner *sop;
2369 2371
2370 sop = kmem_cache_alloc(slab, GFP_KERNEL); 2372 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2371 if (!sop) 2373 if (!sop)
2372 return NULL; 2374 return NULL;
2373 2375
2374 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); 2376 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2375 if (!sop->so_owner.data) { 2377 if (!sop->so_owner.data) {
2376 kmem_cache_free(slab, sop); 2378 kmem_cache_free(slab, sop);
2377 return NULL; 2379 return NULL;
2378 } 2380 }
2379 sop->so_owner.len = owner->len; 2381 sop->so_owner.len = owner->len;
2380 2382
2381 INIT_LIST_HEAD(&sop->so_stateids); 2383 INIT_LIST_HEAD(&sop->so_stateids);
2382 sop->so_client = clp; 2384 sop->so_client = clp;
2383 init_nfs4_replay(&sop->so_replay); 2385 init_nfs4_replay(&sop->so_replay);
2384 return sop; 2386 return sop;
2385 } 2387 }
2386 2388
2387 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 2389 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2388 { 2390 {
2389 list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]); 2391 list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
2390 list_add(&oo->oo_perclient, &clp->cl_openowners); 2392 list_add(&oo->oo_perclient, &clp->cl_openowners);
2391 } 2393 }
2392 2394
2393 static struct nfs4_openowner * 2395 static struct nfs4_openowner *
2394 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 2396 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2395 struct nfs4_openowner *oo; 2397 struct nfs4_openowner *oo;
2396 2398
2397 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 2399 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2398 if (!oo) 2400 if (!oo)
2399 return NULL; 2401 return NULL;
2400 oo->oo_owner.so_is_open_owner = 1; 2402 oo->oo_owner.so_is_open_owner = 1;
2401 oo->oo_owner.so_seqid = open->op_seqid; 2403 oo->oo_owner.so_seqid = open->op_seqid;
2402 oo->oo_flags = NFS4_OO_NEW; 2404 oo->oo_flags = NFS4_OO_NEW;
2403 oo->oo_time = 0; 2405 oo->oo_time = 0;
2404 oo->oo_last_closed_stid = NULL; 2406 oo->oo_last_closed_stid = NULL;
2405 INIT_LIST_HEAD(&oo->oo_close_lru); 2407 INIT_LIST_HEAD(&oo->oo_close_lru);
2406 hash_openowner(oo, clp, strhashval); 2408 hash_openowner(oo, clp, strhashval);
2407 return oo; 2409 return oo;
2408 } 2410 }
2409 2411
2410 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 2412 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2411 struct nfs4_openowner *oo = open->op_openowner; 2413 struct nfs4_openowner *oo = open->op_openowner;
2412 struct nfs4_client *clp = oo->oo_owner.so_client; 2414 struct nfs4_client *clp = oo->oo_owner.so_client;
2413 2415
2414 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID); 2416 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2415 INIT_LIST_HEAD(&stp->st_lockowners); 2417 INIT_LIST_HEAD(&stp->st_lockowners);
2416 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 2418 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2417 list_add(&stp->st_perfile, &fp->fi_stateids); 2419 list_add(&stp->st_perfile, &fp->fi_stateids);
2418 stp->st_stateowner = &oo->oo_owner; 2420 stp->st_stateowner = &oo->oo_owner;
2419 get_nfs4_file(fp); 2421 get_nfs4_file(fp);
2420 stp->st_file = fp; 2422 stp->st_file = fp;
2421 stp->st_access_bmap = 0; 2423 stp->st_access_bmap = 0;
2422 stp->st_deny_bmap = 0; 2424 stp->st_deny_bmap = 0;
2423 set_access(open->op_share_access, stp); 2425 set_access(open->op_share_access, stp);
2424 set_deny(open->op_share_deny, stp); 2426 set_deny(open->op_share_deny, stp);
2425 stp->st_openstp = NULL; 2427 stp->st_openstp = NULL;
2426 } 2428 }
2427 2429
2428 static void 2430 static void
2429 move_to_close_lru(struct nfs4_openowner *oo) 2431 move_to_close_lru(struct nfs4_openowner *oo)
2430 { 2432 {
2431 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 2433 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2432 2434
2433 list_move_tail(&oo->oo_close_lru, &close_lru); 2435 list_move_tail(&oo->oo_close_lru, &close_lru);
2434 oo->oo_time = get_seconds(); 2436 oo->oo_time = get_seconds();
2435 } 2437 }
2436 2438
2437 static int 2439 static int
2438 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, 2440 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2439 clientid_t *clid) 2441 clientid_t *clid)
2440 { 2442 {
2441 return (sop->so_owner.len == owner->len) && 2443 return (sop->so_owner.len == owner->len) &&
2442 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && 2444 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2443 (sop->so_client->cl_clientid.cl_id == clid->cl_id); 2445 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2444 } 2446 }
2445 2447
2446 static struct nfs4_openowner * 2448 static struct nfs4_openowner *
2447 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open) 2449 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2448 { 2450 {
2449 struct nfs4_stateowner *so; 2451 struct nfs4_stateowner *so;
2450 struct nfs4_openowner *oo; 2452 struct nfs4_openowner *oo;
2451 2453
2452 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) { 2454 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
2453 if (!so->so_is_open_owner) 2455 if (!so->so_is_open_owner)
2454 continue; 2456 continue;
2455 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { 2457 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2456 oo = openowner(so); 2458 oo = openowner(so);
2457 renew_client(oo->oo_owner.so_client); 2459 renew_client(oo->oo_owner.so_client);
2458 return oo; 2460 return oo;
2459 } 2461 }
2460 } 2462 }
2461 return NULL; 2463 return NULL;
2462 } 2464 }
2463 2465
2464 /* search file_hashtbl[] for file */ 2466 /* search file_hashtbl[] for file */
2465 static struct nfs4_file * 2467 static struct nfs4_file *
2466 find_file(struct inode *ino) 2468 find_file(struct inode *ino)
2467 { 2469 {
2468 unsigned int hashval = file_hashval(ino); 2470 unsigned int hashval = file_hashval(ino);
2469 struct nfs4_file *fp; 2471 struct nfs4_file *fp;
2470 2472
2471 spin_lock(&recall_lock); 2473 spin_lock(&recall_lock);
2472 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 2474 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2473 if (fp->fi_inode == ino) { 2475 if (fp->fi_inode == ino) {
2474 get_nfs4_file(fp); 2476 get_nfs4_file(fp);
2475 spin_unlock(&recall_lock); 2477 spin_unlock(&recall_lock);
2476 return fp; 2478 return fp;
2477 } 2479 }
2478 } 2480 }
2479 spin_unlock(&recall_lock); 2481 spin_unlock(&recall_lock);
2480 return NULL; 2482 return NULL;
2481 } 2483 }
2482 2484
2483 /* 2485 /*
2484 * Called to check deny when READ with all zero stateid or 2486 * Called to check deny when READ with all zero stateid or
2485 * WRITE with all zero or all one stateid 2487 * WRITE with all zero or all one stateid
2486 */ 2488 */
2487 static __be32 2489 static __be32
2488 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 2490 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2489 { 2491 {
2490 struct inode *ino = current_fh->fh_dentry->d_inode; 2492 struct inode *ino = current_fh->fh_dentry->d_inode;
2491 struct nfs4_file *fp; 2493 struct nfs4_file *fp;
2492 struct nfs4_ol_stateid *stp; 2494 struct nfs4_ol_stateid *stp;
2493 __be32 ret; 2495 __be32 ret;
2494 2496
2495 dprintk("NFSD: nfs4_share_conflict\n"); 2497 dprintk("NFSD: nfs4_share_conflict\n");
2496 2498
2497 fp = find_file(ino); 2499 fp = find_file(ino);
2498 if (!fp) 2500 if (!fp)
2499 return nfs_ok; 2501 return nfs_ok;
2500 ret = nfserr_locked; 2502 ret = nfserr_locked;
2501 /* Search for conflicting share reservations */ 2503 /* Search for conflicting share reservations */
2502 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 2504 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2503 if (test_deny(deny_type, stp) || 2505 if (test_deny(deny_type, stp) ||
2504 test_deny(NFS4_SHARE_DENY_BOTH, stp)) 2506 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2505 goto out; 2507 goto out;
2506 } 2508 }
2507 ret = nfs_ok; 2509 ret = nfs_ok;
2508 out: 2510 out:
2509 put_nfs4_file(fp); 2511 put_nfs4_file(fp);
2510 return ret; 2512 return ret;
2511 } 2513 }
2512 2514
2513 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 2515 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2514 { 2516 {
2515 /* We're assuming the state code never drops its reference 2517 /* We're assuming the state code never drops its reference
2516 * without first removing the lease. Since we're in this lease 2518 * without first removing the lease. Since we're in this lease
2517 * callback (and since the lease code is serialized by the kernel 2519 * callback (and since the lease code is serialized by the kernel
2518 * lock) we know the server hasn't removed the lease yet, we know 2520 * lock) we know the server hasn't removed the lease yet, we know
2519 * it's safe to take a reference: */ 2521 * it's safe to take a reference: */
2520 atomic_inc(&dp->dl_count); 2522 atomic_inc(&dp->dl_count);
2521 2523
2522 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2524 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2523 2525
2524 /* only place dl_time is set. protected by lock_flocks*/ 2526 /* only place dl_time is set. protected by lock_flocks*/
2525 dp->dl_time = get_seconds(); 2527 dp->dl_time = get_seconds();
2526 2528
2527 nfsd4_cb_recall(dp); 2529 nfsd4_cb_recall(dp);
2528 } 2530 }
2529 2531
2530 /* Called from break_lease() with lock_flocks() held. */ 2532 /* Called from break_lease() with lock_flocks() held. */
2531 static void nfsd_break_deleg_cb(struct file_lock *fl) 2533 static void nfsd_break_deleg_cb(struct file_lock *fl)
2532 { 2534 {
2533 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 2535 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2534 struct nfs4_delegation *dp; 2536 struct nfs4_delegation *dp;
2535 2537
2536 BUG_ON(!fp); 2538 BUG_ON(!fp);
2537 /* We assume break_lease is only called once per lease: */ 2539 /* We assume break_lease is only called once per lease: */
2538 BUG_ON(fp->fi_had_conflict); 2540 BUG_ON(fp->fi_had_conflict);
2539 /* 2541 /*
2540 * We don't want the locks code to timeout the lease for us; 2542 * We don't want the locks code to timeout the lease for us;
2541 * we'll remove it ourself if a delegation isn't returned 2543 * we'll remove it ourself if a delegation isn't returned
2542 * in time: 2544 * in time:
2543 */ 2545 */
2544 fl->fl_break_time = 0; 2546 fl->fl_break_time = 0;
2545 2547
2546 spin_lock(&recall_lock); 2548 spin_lock(&recall_lock);
2547 fp->fi_had_conflict = true; 2549 fp->fi_had_conflict = true;
2548 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2550 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2549 nfsd_break_one_deleg(dp); 2551 nfsd_break_one_deleg(dp);
2550 spin_unlock(&recall_lock); 2552 spin_unlock(&recall_lock);
2551 } 2553 }
2552 2554
2553 static 2555 static
2554 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) 2556 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2555 { 2557 {
2556 if (arg & F_UNLCK) 2558 if (arg & F_UNLCK)
2557 return lease_modify(onlist, arg); 2559 return lease_modify(onlist, arg);
2558 else 2560 else
2559 return -EAGAIN; 2561 return -EAGAIN;
2560 } 2562 }
2561 2563
2562 static const struct lock_manager_operations nfsd_lease_mng_ops = { 2564 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2563 .lm_break = nfsd_break_deleg_cb, 2565 .lm_break = nfsd_break_deleg_cb,
2564 .lm_change = nfsd_change_deleg_cb, 2566 .lm_change = nfsd_change_deleg_cb,
2565 }; 2567 };
2566 2568
2567 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 2569 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2568 { 2570 {
2569 if (nfsd4_has_session(cstate)) 2571 if (nfsd4_has_session(cstate))
2570 return nfs_ok; 2572 return nfs_ok;
2571 if (seqid == so->so_seqid - 1) 2573 if (seqid == so->so_seqid - 1)
2572 return nfserr_replay_me; 2574 return nfserr_replay_me;
2573 if (seqid == so->so_seqid) 2575 if (seqid == so->so_seqid)
2574 return nfs_ok; 2576 return nfs_ok;
2575 return nfserr_bad_seqid; 2577 return nfserr_bad_seqid;
2576 } 2578 }
2577 2579
2578 __be32 2580 __be32
2579 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 2581 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2580 struct nfsd4_open *open) 2582 struct nfsd4_open *open)
2581 { 2583 {
2582 clientid_t *clientid = &open->op_clientid; 2584 clientid_t *clientid = &open->op_clientid;
2583 struct nfs4_client *clp = NULL; 2585 struct nfs4_client *clp = NULL;
2584 unsigned int strhashval; 2586 unsigned int strhashval;
2585 struct nfs4_openowner *oo = NULL; 2587 struct nfs4_openowner *oo = NULL;
2586 __be32 status; 2588 __be32 status;
2587 2589
2588 if (STALE_CLIENTID(&open->op_clientid)) 2590 if (STALE_CLIENTID(&open->op_clientid))
2589 return nfserr_stale_clientid; 2591 return nfserr_stale_clientid;
2590 /* 2592 /*
2591 * In case we need it later, after we've already created the 2593 * In case we need it later, after we've already created the
2592 * file and don't want to risk a further failure: 2594 * file and don't want to risk a further failure:
2593 */ 2595 */
2594 open->op_file = nfsd4_alloc_file(); 2596 open->op_file = nfsd4_alloc_file();
2595 if (open->op_file == NULL) 2597 if (open->op_file == NULL)
2596 return nfserr_jukebox; 2598 return nfserr_jukebox;
2597 2599
2598 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); 2600 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2599 oo = find_openstateowner_str(strhashval, open); 2601 oo = find_openstateowner_str(strhashval, open);
2600 open->op_openowner = oo; 2602 open->op_openowner = oo;
2601 if (!oo) { 2603 if (!oo) {
2602 clp = find_confirmed_client(clientid); 2604 clp = find_confirmed_client(clientid);
2603 if (clp == NULL) 2605 if (clp == NULL)
2604 return nfserr_expired; 2606 return nfserr_expired;
2605 goto new_owner; 2607 goto new_owner;
2606 } 2608 }
2607 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 2609 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2608 /* Replace unconfirmed owners without checking for replay. */ 2610 /* Replace unconfirmed owners without checking for replay. */
2609 clp = oo->oo_owner.so_client; 2611 clp = oo->oo_owner.so_client;
2610 release_openowner(oo); 2612 release_openowner(oo);
2611 open->op_openowner = NULL; 2613 open->op_openowner = NULL;
2612 goto new_owner; 2614 goto new_owner;
2613 } 2615 }
2614 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 2616 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2615 if (status) 2617 if (status)
2616 return status; 2618 return status;
2617 clp = oo->oo_owner.so_client; 2619 clp = oo->oo_owner.so_client;
2618 goto alloc_stateid; 2620 goto alloc_stateid;
2619 new_owner: 2621 new_owner:
2620 oo = alloc_init_open_stateowner(strhashval, clp, open); 2622 oo = alloc_init_open_stateowner(strhashval, clp, open);
2621 if (oo == NULL) 2623 if (oo == NULL)
2622 return nfserr_jukebox; 2624 return nfserr_jukebox;
2623 open->op_openowner = oo; 2625 open->op_openowner = oo;
2624 alloc_stateid: 2626 alloc_stateid:
2625 open->op_stp = nfs4_alloc_stateid(clp); 2627 open->op_stp = nfs4_alloc_stateid(clp);
2626 if (!open->op_stp) 2628 if (!open->op_stp)
2627 return nfserr_jukebox; 2629 return nfserr_jukebox;
2628 return nfs_ok; 2630 return nfs_ok;
2629 } 2631 }
2630 2632
2631 static inline __be32 2633 static inline __be32
2632 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 2634 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2633 { 2635 {
2634 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 2636 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2635 return nfserr_openmode; 2637 return nfserr_openmode;
2636 else 2638 else
2637 return nfs_ok; 2639 return nfs_ok;
2638 } 2640 }
2639 2641
2640 static int share_access_to_flags(u32 share_access) 2642 static int share_access_to_flags(u32 share_access)
2641 { 2643 {
2642 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 2644 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2643 } 2645 }
2644 2646
2645 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 2647 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2646 { 2648 {
2647 struct nfs4_stid *ret; 2649 struct nfs4_stid *ret;
2648 2650
2649 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); 2651 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2650 if (!ret) 2652 if (!ret)
2651 return NULL; 2653 return NULL;
2652 return delegstateid(ret); 2654 return delegstateid(ret);
2653 } 2655 }
2654 2656
2655 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 2657 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2656 { 2658 {
2657 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 2659 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2658 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 2660 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2659 } 2661 }
2660 2662
2661 static __be32 2663 static __be32
2662 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open, 2664 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2663 struct nfs4_delegation **dp) 2665 struct nfs4_delegation **dp)
2664 { 2666 {
2665 int flags; 2667 int flags;
2666 __be32 status = nfserr_bad_stateid; 2668 __be32 status = nfserr_bad_stateid;
2667 2669
2668 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); 2670 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2669 if (*dp == NULL) 2671 if (*dp == NULL)
2670 goto out; 2672 goto out;
2671 flags = share_access_to_flags(open->op_share_access); 2673 flags = share_access_to_flags(open->op_share_access);
2672 status = nfs4_check_delegmode(*dp, flags); 2674 status = nfs4_check_delegmode(*dp, flags);
2673 if (status) 2675 if (status)
2674 *dp = NULL; 2676 *dp = NULL;
2675 out: 2677 out:
2676 if (!nfsd4_is_deleg_cur(open)) 2678 if (!nfsd4_is_deleg_cur(open))
2677 return nfs_ok; 2679 return nfs_ok;
2678 if (status) 2680 if (status)
2679 return status; 2681 return status;
2680 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2682 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2681 return nfs_ok; 2683 return nfs_ok;
2682 } 2684 }
2683 2685
2684 static __be32 2686 static __be32
2685 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) 2687 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2686 { 2688 {
2687 struct nfs4_ol_stateid *local; 2689 struct nfs4_ol_stateid *local;
2688 struct nfs4_openowner *oo = open->op_openowner; 2690 struct nfs4_openowner *oo = open->op_openowner;
2689 2691
2690 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 2692 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2691 /* ignore lock owners */ 2693 /* ignore lock owners */
2692 if (local->st_stateowner->so_is_open_owner == 0) 2694 if (local->st_stateowner->so_is_open_owner == 0)
2693 continue; 2695 continue;
2694 /* remember if we have seen this open owner */ 2696 /* remember if we have seen this open owner */
2695 if (local->st_stateowner == &oo->oo_owner) 2697 if (local->st_stateowner == &oo->oo_owner)
2696 *stpp = local; 2698 *stpp = local;
2697 /* check for conflicting share reservations */ 2699 /* check for conflicting share reservations */
2698 if (!test_share(local, open)) 2700 if (!test_share(local, open))
2699 return nfserr_share_denied; 2701 return nfserr_share_denied;
2700 } 2702 }
2701 return nfs_ok; 2703 return nfs_ok;
2702 } 2704 }
2703 2705
2704 static void nfs4_free_stateid(struct nfs4_ol_stateid *s) 2706 static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
2705 { 2707 {
2706 kmem_cache_free(stateid_slab, s); 2708 kmem_cache_free(stateid_slab, s);
2707 } 2709 }
2708 2710
2709 static inline int nfs4_access_to_access(u32 nfs4_access) 2711 static inline int nfs4_access_to_access(u32 nfs4_access)
2710 { 2712 {
2711 int flags = 0; 2713 int flags = 0;
2712 2714
2713 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 2715 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2714 flags |= NFSD_MAY_READ; 2716 flags |= NFSD_MAY_READ;
2715 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 2717 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2716 flags |= NFSD_MAY_WRITE; 2718 flags |= NFSD_MAY_WRITE;
2717 return flags; 2719 return flags;
2718 } 2720 }
2719 2721
2720 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 2722 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2721 struct svc_fh *cur_fh, struct nfsd4_open *open) 2723 struct svc_fh *cur_fh, struct nfsd4_open *open)
2722 { 2724 {
2723 __be32 status; 2725 __be32 status;
2724 int oflag = nfs4_access_to_omode(open->op_share_access); 2726 int oflag = nfs4_access_to_omode(open->op_share_access);
2725 int access = nfs4_access_to_access(open->op_share_access); 2727 int access = nfs4_access_to_access(open->op_share_access);
2726 2728
2727 if (!fp->fi_fds[oflag]) { 2729 if (!fp->fi_fds[oflag]) {
2728 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, 2730 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2729 &fp->fi_fds[oflag]); 2731 &fp->fi_fds[oflag]);
2730 if (status) 2732 if (status)
2731 return status; 2733 return status;
2732 } 2734 }
2733 nfs4_file_get_access(fp, oflag); 2735 nfs4_file_get_access(fp, oflag);
2734 2736
2735 return nfs_ok; 2737 return nfs_ok;
2736 } 2738 }
2737 2739
2738 static inline __be32 2740 static inline __be32
2739 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 2741 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2740 struct nfsd4_open *open) 2742 struct nfsd4_open *open)
2741 { 2743 {
2742 struct iattr iattr = { 2744 struct iattr iattr = {
2743 .ia_valid = ATTR_SIZE, 2745 .ia_valid = ATTR_SIZE,
2744 .ia_size = 0, 2746 .ia_size = 0,
2745 }; 2747 };
2746 if (!open->op_truncate) 2748 if (!open->op_truncate)
2747 return 0; 2749 return 0;
2748 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 2750 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2749 return nfserr_inval; 2751 return nfserr_inval;
2750 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 2752 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2751 } 2753 }
2752 2754
2753 static __be32 2755 static __be32
2754 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 2756 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2755 { 2757 {
2756 u32 op_share_access = open->op_share_access; 2758 u32 op_share_access = open->op_share_access;
2757 bool new_access; 2759 bool new_access;
2758 __be32 status; 2760 __be32 status;
2759 2761
2760 new_access = !test_access(op_share_access, stp); 2762 new_access = !test_access(op_share_access, stp);
2761 if (new_access) { 2763 if (new_access) {
2762 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 2764 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2763 if (status) 2765 if (status)
2764 return status; 2766 return status;
2765 } 2767 }
2766 status = nfsd4_truncate(rqstp, cur_fh, open); 2768 status = nfsd4_truncate(rqstp, cur_fh, open);
2767 if (status) { 2769 if (status) {
2768 if (new_access) { 2770 if (new_access) {
2769 int oflag = nfs4_access_to_omode(op_share_access); 2771 int oflag = nfs4_access_to_omode(op_share_access);
2770 nfs4_file_put_access(fp, oflag); 2772 nfs4_file_put_access(fp, oflag);
2771 } 2773 }
2772 return status; 2774 return status;
2773 } 2775 }
2774 /* remember the open */ 2776 /* remember the open */
2775 set_access(op_share_access, stp); 2777 set_access(op_share_access, stp);
2776 set_deny(open->op_share_deny, stp); 2778 set_deny(open->op_share_deny, stp);
2777 2779
2778 return nfs_ok; 2780 return nfs_ok;
2779 } 2781 }
2780 2782
2781 2783
2782 static void 2784 static void
2783 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) 2785 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2784 { 2786 {
2785 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2787 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2786 } 2788 }
2787 2789
2788 /* Should we give out recallable state?: */ 2790 /* Should we give out recallable state?: */
2789 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 2791 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2790 { 2792 {
2791 if (clp->cl_cb_state == NFSD4_CB_UP) 2793 if (clp->cl_cb_state == NFSD4_CB_UP)
2792 return true; 2794 return true;
2793 /* 2795 /*
2794 * In the sessions case, since we don't have to establish a 2796 * In the sessions case, since we don't have to establish a
2795 * separate connection for callbacks, we assume it's OK 2797 * separate connection for callbacks, we assume it's OK
2796 * until we hear otherwise: 2798 * until we hear otherwise:
2797 */ 2799 */
2798 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 2800 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2799 } 2801 }
2800 2802
2801 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) 2803 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2802 { 2804 {
2803 struct file_lock *fl; 2805 struct file_lock *fl;
2804 2806
2805 fl = locks_alloc_lock(); 2807 fl = locks_alloc_lock();
2806 if (!fl) 2808 if (!fl)
2807 return NULL; 2809 return NULL;
2808 locks_init_lock(fl); 2810 locks_init_lock(fl);
2809 fl->fl_lmops = &nfsd_lease_mng_ops; 2811 fl->fl_lmops = &nfsd_lease_mng_ops;
2810 fl->fl_flags = FL_LEASE; 2812 fl->fl_flags = FL_LEASE;
2811 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 2813 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2812 fl->fl_end = OFFSET_MAX; 2814 fl->fl_end = OFFSET_MAX;
2813 fl->fl_owner = (fl_owner_t)(dp->dl_file); 2815 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2814 fl->fl_pid = current->tgid; 2816 fl->fl_pid = current->tgid;
2815 return fl; 2817 return fl;
2816 } 2818 }
2817 2819
2818 static int nfs4_setlease(struct nfs4_delegation *dp, int flag) 2820 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2819 { 2821 {
2820 struct nfs4_file *fp = dp->dl_file; 2822 struct nfs4_file *fp = dp->dl_file;
2821 struct file_lock *fl; 2823 struct file_lock *fl;
2822 int status; 2824 int status;
2823 2825
2824 fl = nfs4_alloc_init_lease(dp, flag); 2826 fl = nfs4_alloc_init_lease(dp, flag);
2825 if (!fl) 2827 if (!fl)
2826 return -ENOMEM; 2828 return -ENOMEM;
2827 fl->fl_file = find_readable_file(fp); 2829 fl->fl_file = find_readable_file(fp);
2828 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2830 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2829 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); 2831 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2830 if (status) { 2832 if (status) {
2831 list_del_init(&dp->dl_perclnt); 2833 list_del_init(&dp->dl_perclnt);
2832 locks_free_lock(fl); 2834 locks_free_lock(fl);
2833 return -ENOMEM; 2835 return -ENOMEM;
2834 } 2836 }
2835 fp->fi_lease = fl; 2837 fp->fi_lease = fl;
2836 fp->fi_deleg_file = fl->fl_file; 2838 fp->fi_deleg_file = fl->fl_file;
2837 get_file(fp->fi_deleg_file); 2839 get_file(fp->fi_deleg_file);
2838 atomic_set(&fp->fi_delegees, 1); 2840 atomic_set(&fp->fi_delegees, 1);
2839 list_add(&dp->dl_perfile, &fp->fi_delegations); 2841 list_add(&dp->dl_perfile, &fp->fi_delegations);
2840 return 0; 2842 return 0;
2841 } 2843 }
2842 2844
2843 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) 2845 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2844 { 2846 {
2845 struct nfs4_file *fp = dp->dl_file; 2847 struct nfs4_file *fp = dp->dl_file;
2846 2848
2847 if (!fp->fi_lease) 2849 if (!fp->fi_lease)
2848 return nfs4_setlease(dp, flag); 2850 return nfs4_setlease(dp, flag);
2849 spin_lock(&recall_lock); 2851 spin_lock(&recall_lock);
2850 if (fp->fi_had_conflict) { 2852 if (fp->fi_had_conflict) {
2851 spin_unlock(&recall_lock); 2853 spin_unlock(&recall_lock);
2852 return -EAGAIN; 2854 return -EAGAIN;
2853 } 2855 }
2854 atomic_inc(&fp->fi_delegees); 2856 atomic_inc(&fp->fi_delegees);
2855 list_add(&dp->dl_perfile, &fp->fi_delegations); 2857 list_add(&dp->dl_perfile, &fp->fi_delegations);
2856 spin_unlock(&recall_lock); 2858 spin_unlock(&recall_lock);
2857 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2859 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2858 return 0; 2860 return 0;
2859 } 2861 }
2860 2862
2861 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 2863 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2862 { 2864 {
2863 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 2865 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2864 if (status == -EAGAIN) 2866 if (status == -EAGAIN)
2865 open->op_why_no_deleg = WND4_CONTENTION; 2867 open->op_why_no_deleg = WND4_CONTENTION;
2866 else { 2868 else {
2867 open->op_why_no_deleg = WND4_RESOURCE; 2869 open->op_why_no_deleg = WND4_RESOURCE;
2868 switch (open->op_deleg_want) { 2870 switch (open->op_deleg_want) {
2869 case NFS4_SHARE_WANT_READ_DELEG: 2871 case NFS4_SHARE_WANT_READ_DELEG:
2870 case NFS4_SHARE_WANT_WRITE_DELEG: 2872 case NFS4_SHARE_WANT_WRITE_DELEG:
2871 case NFS4_SHARE_WANT_ANY_DELEG: 2873 case NFS4_SHARE_WANT_ANY_DELEG:
2872 break; 2874 break;
2873 case NFS4_SHARE_WANT_CANCEL: 2875 case NFS4_SHARE_WANT_CANCEL:
2874 open->op_why_no_deleg = WND4_CANCELLED; 2876 open->op_why_no_deleg = WND4_CANCELLED;
2875 break; 2877 break;
2876 case NFS4_SHARE_WANT_NO_DELEG: 2878 case NFS4_SHARE_WANT_NO_DELEG:
2877 BUG(); /* not supposed to get here */ 2879 BUG(); /* not supposed to get here */
2878 } 2880 }
2879 } 2881 }
2880 } 2882 }
2881 2883
2882 /* 2884 /*
2883 * Attempt to hand out a delegation. 2885 * Attempt to hand out a delegation.
2884 */ 2886 */
2885 static void 2887 static void
2886 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp) 2888 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2887 { 2889 {
2888 struct nfs4_delegation *dp; 2890 struct nfs4_delegation *dp;
2889 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); 2891 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2890 int cb_up; 2892 int cb_up;
2891 int status = 0, flag = 0; 2893 int status = 0, flag = 0;
2892 2894
2893 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 2895 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2894 flag = NFS4_OPEN_DELEGATE_NONE; 2896 flag = NFS4_OPEN_DELEGATE_NONE;
2895 open->op_recall = 0; 2897 open->op_recall = 0;
2896 switch (open->op_claim_type) { 2898 switch (open->op_claim_type) {
2897 case NFS4_OPEN_CLAIM_PREVIOUS: 2899 case NFS4_OPEN_CLAIM_PREVIOUS:
2898 if (!cb_up) 2900 if (!cb_up)
2899 open->op_recall = 1; 2901 open->op_recall = 1;
2900 flag = open->op_delegate_type; 2902 flag = open->op_delegate_type;
2901 if (flag == NFS4_OPEN_DELEGATE_NONE) 2903 if (flag == NFS4_OPEN_DELEGATE_NONE)
2902 goto out; 2904 goto out;
2903 break; 2905 break;
2904 case NFS4_OPEN_CLAIM_NULL: 2906 case NFS4_OPEN_CLAIM_NULL:
2905 /* Let's not give out any delegations till everyone's 2907 /* Let's not give out any delegations till everyone's
2906 * had the chance to reclaim theirs.... */ 2908 * had the chance to reclaim theirs.... */
2907 if (locks_in_grace()) 2909 if (locks_in_grace())
2908 goto out; 2910 goto out;
2909 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 2911 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2910 goto out; 2912 goto out;
2911 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 2913 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2912 flag = NFS4_OPEN_DELEGATE_WRITE; 2914 flag = NFS4_OPEN_DELEGATE_WRITE;
2913 else 2915 else
2914 flag = NFS4_OPEN_DELEGATE_READ; 2916 flag = NFS4_OPEN_DELEGATE_READ;
2915 break; 2917 break;
2916 default: 2918 default:
2917 goto out; 2919 goto out;
2918 } 2920 }
2919 2921
2920 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag); 2922 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2921 if (dp == NULL) 2923 if (dp == NULL)
2922 goto out_no_deleg; 2924 goto out_no_deleg;
2923 status = nfs4_set_delegation(dp, flag); 2925 status = nfs4_set_delegation(dp, flag);
2924 if (status) 2926 if (status)
2925 goto out_free; 2927 goto out_free;
2926 2928
2927 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 2929 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2928 2930
2929 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 2931 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2930 STATEID_VAL(&dp->dl_stid.sc_stateid)); 2932 STATEID_VAL(&dp->dl_stid.sc_stateid));
2931 out: 2933 out:
2932 open->op_delegate_type = flag; 2934 open->op_delegate_type = flag;
2933 if (flag == NFS4_OPEN_DELEGATE_NONE) { 2935 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2934 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 2936 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2935 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 2937 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2936 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 2938 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2937 2939
2938 /* 4.1 client asking for a delegation? */ 2940 /* 4.1 client asking for a delegation? */
2939 if (open->op_deleg_want) 2941 if (open->op_deleg_want)
2940 nfsd4_open_deleg_none_ext(open, status); 2942 nfsd4_open_deleg_none_ext(open, status);
2941 } 2943 }
2942 return; 2944 return;
2943 out_free: 2945 out_free:
2944 nfs4_put_delegation(dp); 2946 nfs4_put_delegation(dp);
2945 out_no_deleg: 2947 out_no_deleg:
2946 flag = NFS4_OPEN_DELEGATE_NONE; 2948 flag = NFS4_OPEN_DELEGATE_NONE;
2947 goto out; 2949 goto out;
2948 } 2950 }
2949 2951
2950 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 2952 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2951 struct nfs4_delegation *dp) 2953 struct nfs4_delegation *dp)
2952 { 2954 {
2953 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 2955 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
2954 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 2956 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2955 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 2957 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2956 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 2958 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
2957 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 2959 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
2958 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 2960 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2959 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 2961 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2960 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 2962 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
2961 } 2963 }
2962 /* Otherwise the client must be confused wanting a delegation 2964 /* Otherwise the client must be confused wanting a delegation
2963 * it already has, therefore we don't return 2965 * it already has, therefore we don't return
2964 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 2966 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
2965 */ 2967 */
2966 } 2968 }
2967 2969
2968 /* 2970 /*
2969 * called with nfs4_lock_state() held. 2971 * called with nfs4_lock_state() held.
2970 */ 2972 */
2971 __be32 2973 __be32
2972 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 2974 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2973 { 2975 {
2974 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2976 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2975 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 2977 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
2976 struct nfs4_file *fp = NULL; 2978 struct nfs4_file *fp = NULL;
2977 struct inode *ino = current_fh->fh_dentry->d_inode; 2979 struct inode *ino = current_fh->fh_dentry->d_inode;
2978 struct nfs4_ol_stateid *stp = NULL; 2980 struct nfs4_ol_stateid *stp = NULL;
2979 struct nfs4_delegation *dp = NULL; 2981 struct nfs4_delegation *dp = NULL;
2980 __be32 status; 2982 __be32 status;
2981 2983
2982 /* 2984 /*
2983 * Lookup file; if found, lookup stateid and check open request, 2985 * Lookup file; if found, lookup stateid and check open request,
2984 * and check for delegations in the process of being recalled. 2986 * and check for delegations in the process of being recalled.
2985 * If not found, create the nfs4_file struct 2987 * If not found, create the nfs4_file struct
2986 */ 2988 */
2987 fp = find_file(ino); 2989 fp = find_file(ino);
2988 if (fp) { 2990 if (fp) {
2989 if ((status = nfs4_check_open(fp, open, &stp))) 2991 if ((status = nfs4_check_open(fp, open, &stp)))
2990 goto out; 2992 goto out;
2991 status = nfs4_check_deleg(cl, fp, open, &dp); 2993 status = nfs4_check_deleg(cl, fp, open, &dp);
2992 if (status) 2994 if (status)
2993 goto out; 2995 goto out;
2994 } else { 2996 } else {
2995 status = nfserr_bad_stateid; 2997 status = nfserr_bad_stateid;
2996 if (nfsd4_is_deleg_cur(open)) 2998 if (nfsd4_is_deleg_cur(open))
2997 goto out; 2999 goto out;
2998 status = nfserr_jukebox; 3000 status = nfserr_jukebox;
2999 fp = open->op_file; 3001 fp = open->op_file;
3000 open->op_file = NULL; 3002 open->op_file = NULL;
3001 nfsd4_init_file(fp, ino); 3003 nfsd4_init_file(fp, ino);
3002 } 3004 }
3003 3005
3004 /* 3006 /*
3005 * OPEN the file, or upgrade an existing OPEN. 3007 * OPEN the file, or upgrade an existing OPEN.
3006 * If truncate fails, the OPEN fails. 3008 * If truncate fails, the OPEN fails.
3007 */ 3009 */
3008 if (stp) { 3010 if (stp) {
3009 /* Stateid was found, this is an OPEN upgrade */ 3011 /* Stateid was found, this is an OPEN upgrade */
3010 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 3012 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3011 if (status) 3013 if (status)
3012 goto out; 3014 goto out;
3013 } else { 3015 } else {
3014 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open); 3016 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3015 if (status) 3017 if (status)
3016 goto out; 3018 goto out;
3017 status = nfsd4_truncate(rqstp, current_fh, open); 3019 status = nfsd4_truncate(rqstp, current_fh, open);
3018 if (status) 3020 if (status)
3019 goto out; 3021 goto out;
3020 stp = open->op_stp; 3022 stp = open->op_stp;
3021 open->op_stp = NULL; 3023 open->op_stp = NULL;
3022 init_open_stateid(stp, fp, open); 3024 init_open_stateid(stp, fp, open);
3023 } 3025 }
3024 update_stateid(&stp->st_stid.sc_stateid); 3026 update_stateid(&stp->st_stid.sc_stateid);
3025 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3027 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3026 3028
3027 if (nfsd4_has_session(&resp->cstate)) { 3029 if (nfsd4_has_session(&resp->cstate)) {
3028 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3030 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3029 3031
3030 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 3032 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3031 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3033 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3032 open->op_why_no_deleg = WND4_NOT_WANTED; 3034 open->op_why_no_deleg = WND4_NOT_WANTED;
3033 goto nodeleg; 3035 goto nodeleg;
3034 } 3036 }
3035 } 3037 }
3036 3038
3037 /* 3039 /*
3038 * Attempt to hand out a delegation. No error return, because the 3040 * Attempt to hand out a delegation. No error return, because the
3039 * OPEN succeeds even if we fail. 3041 * OPEN succeeds even if we fail.
3040 */ 3042 */
3041 nfs4_open_delegation(current_fh, open, stp); 3043 nfs4_open_delegation(current_fh, open, stp);
3042 nodeleg: 3044 nodeleg:
3043 status = nfs_ok; 3045 status = nfs_ok;
3044 3046
3045 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 3047 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3046 STATEID_VAL(&stp->st_stid.sc_stateid)); 3048 STATEID_VAL(&stp->st_stid.sc_stateid));
3047 out: 3049 out:
3048 /* 4.1 client trying to upgrade/downgrade delegation? */ 3050 /* 4.1 client trying to upgrade/downgrade delegation? */
3049 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 3051 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3050 open->op_deleg_want) 3052 open->op_deleg_want)
3051 nfsd4_deleg_xgrade_none_ext(open, dp); 3053 nfsd4_deleg_xgrade_none_ext(open, dp);
3052 3054
3053 if (fp) 3055 if (fp)
3054 put_nfs4_file(fp); 3056 put_nfs4_file(fp);
3055 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 3057 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3056 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate)); 3058 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3057 /* 3059 /*
3058 * To finish the open response, we just need to set the rflags. 3060 * To finish the open response, we just need to set the rflags.
3059 */ 3061 */
3060 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 3062 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3061 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && 3063 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3062 !nfsd4_has_session(&resp->cstate)) 3064 !nfsd4_has_session(&resp->cstate))
3063 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 3065 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3064 3066
3065 return status; 3067 return status;
3066 } 3068 }
3067 3069
3068 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) 3070 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3069 { 3071 {
3070 if (open->op_openowner) { 3072 if (open->op_openowner) {
3071 struct nfs4_openowner *oo = open->op_openowner; 3073 struct nfs4_openowner *oo = open->op_openowner;
3072 3074
3073 if (!list_empty(&oo->oo_owner.so_stateids)) 3075 if (!list_empty(&oo->oo_owner.so_stateids))
3074 list_del_init(&oo->oo_close_lru); 3076 list_del_init(&oo->oo_close_lru);
3075 if (oo->oo_flags & NFS4_OO_NEW) { 3077 if (oo->oo_flags & NFS4_OO_NEW) {
3076 if (status) { 3078 if (status) {
3077 release_openowner(oo); 3079 release_openowner(oo);
3078 open->op_openowner = NULL; 3080 open->op_openowner = NULL;
3079 } else 3081 } else
3080 oo->oo_flags &= ~NFS4_OO_NEW; 3082 oo->oo_flags &= ~NFS4_OO_NEW;
3081 } 3083 }
3082 } 3084 }
3083 if (open->op_file) 3085 if (open->op_file)
3084 nfsd4_free_file(open->op_file); 3086 nfsd4_free_file(open->op_file);
3085 if (open->op_stp) 3087 if (open->op_stp)
3086 nfs4_free_stateid(open->op_stp); 3088 nfs4_free_stateid(open->op_stp);
3087 } 3089 }
3088 3090
3089 __be32 3091 __be32
3090 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3092 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3091 clientid_t *clid) 3093 clientid_t *clid)
3092 { 3094 {
3093 struct nfs4_client *clp; 3095 struct nfs4_client *clp;
3094 __be32 status; 3096 __be32 status;
3095 3097
3096 nfs4_lock_state(); 3098 nfs4_lock_state();
3097 dprintk("process_renew(%08x/%08x): starting\n", 3099 dprintk("process_renew(%08x/%08x): starting\n",
3098 clid->cl_boot, clid->cl_id); 3100 clid->cl_boot, clid->cl_id);
3099 status = nfserr_stale_clientid; 3101 status = nfserr_stale_clientid;
3100 if (STALE_CLIENTID(clid)) 3102 if (STALE_CLIENTID(clid))
3101 goto out; 3103 goto out;
3102 clp = find_confirmed_client(clid); 3104 clp = find_confirmed_client(clid);
3103 status = nfserr_expired; 3105 status = nfserr_expired;
3104 if (clp == NULL) { 3106 if (clp == NULL) {
3105 /* We assume the client took too long to RENEW. */ 3107 /* We assume the client took too long to RENEW. */
3106 dprintk("nfsd4_renew: clientid not found!\n"); 3108 dprintk("nfsd4_renew: clientid not found!\n");
3107 goto out; 3109 goto out;
3108 } 3110 }
3109 status = nfserr_cb_path_down; 3111 status = nfserr_cb_path_down;
3110 if (!list_empty(&clp->cl_delegations) 3112 if (!list_empty(&clp->cl_delegations)
3111 && clp->cl_cb_state != NFSD4_CB_UP) 3113 && clp->cl_cb_state != NFSD4_CB_UP)
3112 goto out; 3114 goto out;
3113 status = nfs_ok; 3115 status = nfs_ok;
3114 out: 3116 out:
3115 nfs4_unlock_state(); 3117 nfs4_unlock_state();
3116 return status; 3118 return status;
3117 } 3119 }
3118 3120
3119 static struct lock_manager nfsd4_manager = {
3120 };
3121
3122 static bool grace_ended; 3121 static bool grace_ended;
3123 3122
3124 static void 3123 static void
3125 nfsd4_end_grace(void) 3124 nfsd4_end_grace(struct net *net)
3126 { 3125 {
3126 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3127
3127 /* do nothing if grace period already ended */ 3128 /* do nothing if grace period already ended */
3128 if (grace_ended) 3129 if (grace_ended)
3129 return; 3130 return;
3130 3131
3131 dprintk("NFSD: end of grace period\n"); 3132 dprintk("NFSD: end of grace period\n");
3132 grace_ended = true; 3133 grace_ended = true;
3133 nfsd4_record_grace_done(&init_net, boot_time); 3134 nfsd4_record_grace_done(net, boot_time);
3134 locks_end_grace(&nfsd4_manager); 3135 locks_end_grace(&nn->nfsd4_manager);
3135 /* 3136 /*
3136 * Now that every NFSv4 client has had the chance to recover and 3137 * Now that every NFSv4 client has had the chance to recover and
3137 * to see the (possibly new, possibly shorter) lease time, we 3138 * to see the (possibly new, possibly shorter) lease time, we
3138 * can safely set the next grace time to the current lease time: 3139 * can safely set the next grace time to the current lease time:
3139 */ 3140 */
3140 nfsd4_grace = nfsd4_lease; 3141 nfsd4_grace = nfsd4_lease;
3141 } 3142 }
3142 3143
3143 static time_t 3144 static time_t
3144 nfs4_laundromat(void) 3145 nfs4_laundromat(void)
3145 { 3146 {
3146 struct nfs4_client *clp; 3147 struct nfs4_client *clp;
3147 struct nfs4_openowner *oo; 3148 struct nfs4_openowner *oo;
3148 struct nfs4_delegation *dp; 3149 struct nfs4_delegation *dp;
3149 struct list_head *pos, *next, reaplist; 3150 struct list_head *pos, *next, reaplist;
3150 time_t cutoff = get_seconds() - nfsd4_lease; 3151 time_t cutoff = get_seconds() - nfsd4_lease;
3151 time_t t, clientid_val = nfsd4_lease; 3152 time_t t, clientid_val = nfsd4_lease;
3152 time_t u, test_val = nfsd4_lease; 3153 time_t u, test_val = nfsd4_lease;
3153 3154
3154 nfs4_lock_state(); 3155 nfs4_lock_state();
3155 3156
3156 dprintk("NFSD: laundromat service - starting\n"); 3157 dprintk("NFSD: laundromat service - starting\n");
3157 nfsd4_end_grace(); 3158 nfsd4_end_grace(&init_net);
3158 INIT_LIST_HEAD(&reaplist); 3159 INIT_LIST_HEAD(&reaplist);
3159 spin_lock(&client_lock); 3160 spin_lock(&client_lock);
3160 list_for_each_safe(pos, next, &client_lru) { 3161 list_for_each_safe(pos, next, &client_lru) {
3161 clp = list_entry(pos, struct nfs4_client, cl_lru); 3162 clp = list_entry(pos, struct nfs4_client, cl_lru);
3162 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 3163 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3163 t = clp->cl_time - cutoff; 3164 t = clp->cl_time - cutoff;
3164 if (clientid_val > t) 3165 if (clientid_val > t)
3165 clientid_val = t; 3166 clientid_val = t;
3166 break; 3167 break;
3167 } 3168 }
3168 if (atomic_read(&clp->cl_refcount)) { 3169 if (atomic_read(&clp->cl_refcount)) {
3169 dprintk("NFSD: client in use (clientid %08x)\n", 3170 dprintk("NFSD: client in use (clientid %08x)\n",
3170 clp->cl_clientid.cl_id); 3171 clp->cl_clientid.cl_id);
3171 continue; 3172 continue;
3172 } 3173 }
3173 unhash_client_locked(clp); 3174 unhash_client_locked(clp);
3174 list_add(&clp->cl_lru, &reaplist); 3175 list_add(&clp->cl_lru, &reaplist);
3175 } 3176 }
3176 spin_unlock(&client_lock); 3177 spin_unlock(&client_lock);
3177 list_for_each_safe(pos, next, &reaplist) { 3178 list_for_each_safe(pos, next, &reaplist) {
3178 clp = list_entry(pos, struct nfs4_client, cl_lru); 3179 clp = list_entry(pos, struct nfs4_client, cl_lru);
3179 dprintk("NFSD: purging unused client (clientid %08x)\n", 3180 dprintk("NFSD: purging unused client (clientid %08x)\n",
3180 clp->cl_clientid.cl_id); 3181 clp->cl_clientid.cl_id);
3181 nfsd4_client_record_remove(clp); 3182 nfsd4_client_record_remove(clp);
3182 expire_client(clp); 3183 expire_client(clp);
3183 } 3184 }
3184 spin_lock(&recall_lock); 3185 spin_lock(&recall_lock);
3185 list_for_each_safe(pos, next, &del_recall_lru) { 3186 list_for_each_safe(pos, next, &del_recall_lru) {
3186 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3187 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3187 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 3188 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3188 u = dp->dl_time - cutoff; 3189 u = dp->dl_time - cutoff;
3189 if (test_val > u) 3190 if (test_val > u)
3190 test_val = u; 3191 test_val = u;
3191 break; 3192 break;
3192 } 3193 }
3193 list_move(&dp->dl_recall_lru, &reaplist); 3194 list_move(&dp->dl_recall_lru, &reaplist);
3194 } 3195 }
3195 spin_unlock(&recall_lock); 3196 spin_unlock(&recall_lock);
3196 list_for_each_safe(pos, next, &reaplist) { 3197 list_for_each_safe(pos, next, &reaplist) {
3197 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3198 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3198 unhash_delegation(dp); 3199 unhash_delegation(dp);
3199 } 3200 }
3200 test_val = nfsd4_lease; 3201 test_val = nfsd4_lease;
3201 list_for_each_safe(pos, next, &close_lru) { 3202 list_for_each_safe(pos, next, &close_lru) {
3202 oo = container_of(pos, struct nfs4_openowner, oo_close_lru); 3203 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3203 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { 3204 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3204 u = oo->oo_time - cutoff; 3205 u = oo->oo_time - cutoff;
3205 if (test_val > u) 3206 if (test_val > u)
3206 test_val = u; 3207 test_val = u;
3207 break; 3208 break;
3208 } 3209 }
3209 release_openowner(oo); 3210 release_openowner(oo);
3210 } 3211 }
3211 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) 3212 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3212 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; 3213 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3213 nfs4_unlock_state(); 3214 nfs4_unlock_state();
3214 return clientid_val; 3215 return clientid_val;
3215 } 3216 }
3216 3217
3217 static struct workqueue_struct *laundry_wq; 3218 static struct workqueue_struct *laundry_wq;
3218 static void laundromat_main(struct work_struct *); 3219 static void laundromat_main(struct work_struct *);
3219 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); 3220 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3220 3221
3221 static void 3222 static void
3222 laundromat_main(struct work_struct *not_used) 3223 laundromat_main(struct work_struct *not_used)
3223 { 3224 {
3224 time_t t; 3225 time_t t;
3225 3226
3226 t = nfs4_laundromat(); 3227 t = nfs4_laundromat();
3227 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 3228 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3228 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ); 3229 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3229 } 3230 }
3230 3231
3231 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 3232 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3232 { 3233 {
3233 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) 3234 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3234 return nfserr_bad_stateid; 3235 return nfserr_bad_stateid;
3235 return nfs_ok; 3236 return nfs_ok;
3236 } 3237 }
3237 3238
3238 static int 3239 static int
3239 STALE_STATEID(stateid_t *stateid) 3240 STALE_STATEID(stateid_t *stateid)
3240 { 3241 {
3241 if (stateid->si_opaque.so_clid.cl_boot == boot_time) 3242 if (stateid->si_opaque.so_clid.cl_boot == boot_time)
3242 return 0; 3243 return 0;
3243 dprintk("NFSD: stale stateid " STATEID_FMT "!\n", 3244 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3244 STATEID_VAL(stateid)); 3245 STATEID_VAL(stateid));
3245 return 1; 3246 return 1;
3246 } 3247 }
3247 3248
3248 static inline int 3249 static inline int
3249 access_permit_read(struct nfs4_ol_stateid *stp) 3250 access_permit_read(struct nfs4_ol_stateid *stp)
3250 { 3251 {
3251 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 3252 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3252 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 3253 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3253 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 3254 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3254 } 3255 }
3255 3256
3256 static inline int 3257 static inline int
3257 access_permit_write(struct nfs4_ol_stateid *stp) 3258 access_permit_write(struct nfs4_ol_stateid *stp)
3258 { 3259 {
3259 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 3260 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3260 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 3261 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3261 } 3262 }
3262 3263
3263 static 3264 static
3264 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 3265 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3265 { 3266 {
3266 __be32 status = nfserr_openmode; 3267 __be32 status = nfserr_openmode;
3267 3268
3268 /* For lock stateid's, we test the parent open, not the lock: */ 3269 /* For lock stateid's, we test the parent open, not the lock: */
3269 if (stp->st_openstp) 3270 if (stp->st_openstp)
3270 stp = stp->st_openstp; 3271 stp = stp->st_openstp;
3271 if ((flags & WR_STATE) && !access_permit_write(stp)) 3272 if ((flags & WR_STATE) && !access_permit_write(stp))
3272 goto out; 3273 goto out;
3273 if ((flags & RD_STATE) && !access_permit_read(stp)) 3274 if ((flags & RD_STATE) && !access_permit_read(stp))
3274 goto out; 3275 goto out;
3275 status = nfs_ok; 3276 status = nfs_ok;
3276 out: 3277 out:
3277 return status; 3278 return status;
3278 } 3279 }
3279 3280
3280 static inline __be32 3281 static inline __be32
3281 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) 3282 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3282 { 3283 {
3283 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 3284 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3284 return nfs_ok; 3285 return nfs_ok;
3285 else if (locks_in_grace()) { 3286 else if (locks_in_grace()) {
3286 /* Answer in remaining cases depends on existence of 3287 /* Answer in remaining cases depends on existence of
3287 * conflicting state; so we must wait out the grace period. */ 3288 * conflicting state; so we must wait out the grace period. */
3288 return nfserr_grace; 3289 return nfserr_grace;
3289 } else if (flags & WR_STATE) 3290 } else if (flags & WR_STATE)
3290 return nfs4_share_conflict(current_fh, 3291 return nfs4_share_conflict(current_fh,
3291 NFS4_SHARE_DENY_WRITE); 3292 NFS4_SHARE_DENY_WRITE);
3292 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 3293 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3293 return nfs4_share_conflict(current_fh, 3294 return nfs4_share_conflict(current_fh,
3294 NFS4_SHARE_DENY_READ); 3295 NFS4_SHARE_DENY_READ);
3295 } 3296 }
3296 3297
3297 /* 3298 /*
3298 * Allow READ/WRITE during grace period on recovered state only for files 3299 * Allow READ/WRITE during grace period on recovered state only for files
3299 * that are not able to provide mandatory locking. 3300 * that are not able to provide mandatory locking.
3300 */ 3301 */
3301 static inline int 3302 static inline int
3302 grace_disallows_io(struct inode *inode) 3303 grace_disallows_io(struct inode *inode)
3303 { 3304 {
3304 return locks_in_grace() && mandatory_lock(inode); 3305 return locks_in_grace() && mandatory_lock(inode);
3305 } 3306 }
3306 3307
3307 /* Returns true iff a is later than b: */ 3308 /* Returns true iff a is later than b: */
3308 static bool stateid_generation_after(stateid_t *a, stateid_t *b) 3309 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3309 { 3310 {
3310 return (s32)a->si_generation - (s32)b->si_generation > 0; 3311 return (s32)a->si_generation - (s32)b->si_generation > 0;
3311 } 3312 }
3312 3313
3313 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 3314 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3314 { 3315 {
3315 /* 3316 /*
3316 * When sessions are used the stateid generation number is ignored 3317 * When sessions are used the stateid generation number is ignored
3317 * when it is zero. 3318 * when it is zero.
3318 */ 3319 */
3319 if (has_session && in->si_generation == 0) 3320 if (has_session && in->si_generation == 0)
3320 return nfs_ok; 3321 return nfs_ok;
3321 3322
3322 if (in->si_generation == ref->si_generation) 3323 if (in->si_generation == ref->si_generation)
3323 return nfs_ok; 3324 return nfs_ok;
3324 3325
3325 /* If the client sends us a stateid from the future, it's buggy: */ 3326 /* If the client sends us a stateid from the future, it's buggy: */
3326 if (stateid_generation_after(in, ref)) 3327 if (stateid_generation_after(in, ref))
3327 return nfserr_bad_stateid; 3328 return nfserr_bad_stateid;
3328 /* 3329 /*
3329 * However, we could see a stateid from the past, even from a 3330 * However, we could see a stateid from the past, even from a
3330 * non-buggy client. For example, if the client sends a lock 3331 * non-buggy client. For example, if the client sends a lock
3331 * while some IO is outstanding, the lock may bump si_generation 3332 * while some IO is outstanding, the lock may bump si_generation
3332 * while the IO is still in flight. The client could avoid that 3333 * while the IO is still in flight. The client could avoid that
3333 * situation by waiting for responses on all the IO requests, 3334 * situation by waiting for responses on all the IO requests,
3334 * but better performance may result in retrying IO that 3335 * but better performance may result in retrying IO that
3335 * receives an old_stateid error if requests are rarely 3336 * receives an old_stateid error if requests are rarely
3336 * reordered in flight: 3337 * reordered in flight:
3337 */ 3338 */
3338 return nfserr_old_stateid; 3339 return nfserr_old_stateid;
3339 } 3340 }
3340 3341
3341 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 3342 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3342 { 3343 {
3343 struct nfs4_stid *s; 3344 struct nfs4_stid *s;
3344 struct nfs4_ol_stateid *ols; 3345 struct nfs4_ol_stateid *ols;
3345 __be32 status; 3346 __be32 status;
3346 3347
3347 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3348 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3348 return nfserr_bad_stateid; 3349 return nfserr_bad_stateid;
3349 /* Client debugging aid. */ 3350 /* Client debugging aid. */
3350 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 3351 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3351 char addr_str[INET6_ADDRSTRLEN]; 3352 char addr_str[INET6_ADDRSTRLEN];
3352 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, 3353 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3353 sizeof(addr_str)); 3354 sizeof(addr_str));
3354 pr_warn_ratelimited("NFSD: client %s testing state ID " 3355 pr_warn_ratelimited("NFSD: client %s testing state ID "
3355 "with incorrect client ID\n", addr_str); 3356 "with incorrect client ID\n", addr_str);
3356 return nfserr_bad_stateid; 3357 return nfserr_bad_stateid;
3357 } 3358 }
3358 s = find_stateid(cl, stateid); 3359 s = find_stateid(cl, stateid);
3359 if (!s) 3360 if (!s)
3360 return nfserr_bad_stateid; 3361 return nfserr_bad_stateid;
3361 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 3362 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3362 if (status) 3363 if (status)
3363 return status; 3364 return status;
3364 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID))) 3365 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3365 return nfs_ok; 3366 return nfs_ok;
3366 ols = openlockstateid(s); 3367 ols = openlockstateid(s);
3367 if (ols->st_stateowner->so_is_open_owner 3368 if (ols->st_stateowner->so_is_open_owner
3368 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3369 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3369 return nfserr_bad_stateid; 3370 return nfserr_bad_stateid;
3370 return nfs_ok; 3371 return nfs_ok;
3371 } 3372 }
3372 3373
3373 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s) 3374 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
3374 { 3375 {
3375 struct nfs4_client *cl; 3376 struct nfs4_client *cl;
3376 3377
3377 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3378 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3378 return nfserr_bad_stateid; 3379 return nfserr_bad_stateid;
3379 if (STALE_STATEID(stateid)) 3380 if (STALE_STATEID(stateid))
3380 return nfserr_stale_stateid; 3381 return nfserr_stale_stateid;
3381 cl = find_confirmed_client(&stateid->si_opaque.so_clid); 3382 cl = find_confirmed_client(&stateid->si_opaque.so_clid);
3382 if (!cl) 3383 if (!cl)
3383 return nfserr_expired; 3384 return nfserr_expired;
3384 *s = find_stateid_by_type(cl, stateid, typemask); 3385 *s = find_stateid_by_type(cl, stateid, typemask);
3385 if (!*s) 3386 if (!*s)
3386 return nfserr_bad_stateid; 3387 return nfserr_bad_stateid;
3387 return nfs_ok; 3388 return nfs_ok;
3388 3389
3389 } 3390 }
3390 3391
3391 /* 3392 /*
3392 * Checks for stateid operations 3393 * Checks for stateid operations
3393 */ 3394 */
3394 __be32 3395 __be32
3395 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, 3396 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3396 stateid_t *stateid, int flags, struct file **filpp) 3397 stateid_t *stateid, int flags, struct file **filpp)
3397 { 3398 {
3398 struct nfs4_stid *s; 3399 struct nfs4_stid *s;
3399 struct nfs4_ol_stateid *stp = NULL; 3400 struct nfs4_ol_stateid *stp = NULL;
3400 struct nfs4_delegation *dp = NULL; 3401 struct nfs4_delegation *dp = NULL;
3401 struct svc_fh *current_fh = &cstate->current_fh; 3402 struct svc_fh *current_fh = &cstate->current_fh;
3402 struct inode *ino = current_fh->fh_dentry->d_inode; 3403 struct inode *ino = current_fh->fh_dentry->d_inode;
3403 __be32 status; 3404 __be32 status;
3404 3405
3405 if (filpp) 3406 if (filpp)
3406 *filpp = NULL; 3407 *filpp = NULL;
3407 3408
3408 if (grace_disallows_io(ino)) 3409 if (grace_disallows_io(ino))
3409 return nfserr_grace; 3410 return nfserr_grace;
3410 3411
3411 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3412 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3412 return check_special_stateids(current_fh, stateid, flags); 3413 return check_special_stateids(current_fh, stateid, flags);
3413 3414
3414 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s); 3415 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
3415 if (status) 3416 if (status)
3416 return status; 3417 return status;
3417 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); 3418 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3418 if (status) 3419 if (status)
3419 goto out; 3420 goto out;
3420 switch (s->sc_type) { 3421 switch (s->sc_type) {
3421 case NFS4_DELEG_STID: 3422 case NFS4_DELEG_STID:
3422 dp = delegstateid(s); 3423 dp = delegstateid(s);
3423 status = nfs4_check_delegmode(dp, flags); 3424 status = nfs4_check_delegmode(dp, flags);
3424 if (status) 3425 if (status)
3425 goto out; 3426 goto out;
3426 if (filpp) { 3427 if (filpp) {
3427 *filpp = dp->dl_file->fi_deleg_file; 3428 *filpp = dp->dl_file->fi_deleg_file;
3428 BUG_ON(!*filpp); 3429 BUG_ON(!*filpp);
3429 } 3430 }
3430 break; 3431 break;
3431 case NFS4_OPEN_STID: 3432 case NFS4_OPEN_STID:
3432 case NFS4_LOCK_STID: 3433 case NFS4_LOCK_STID:
3433 stp = openlockstateid(s); 3434 stp = openlockstateid(s);
3434 status = nfs4_check_fh(current_fh, stp); 3435 status = nfs4_check_fh(current_fh, stp);
3435 if (status) 3436 if (status)
3436 goto out; 3437 goto out;
3437 if (stp->st_stateowner->so_is_open_owner 3438 if (stp->st_stateowner->so_is_open_owner
3438 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3439 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3439 goto out; 3440 goto out;
3440 status = nfs4_check_openmode(stp, flags); 3441 status = nfs4_check_openmode(stp, flags);
3441 if (status) 3442 if (status)
3442 goto out; 3443 goto out;
3443 if (filpp) { 3444 if (filpp) {
3444 if (flags & RD_STATE) 3445 if (flags & RD_STATE)
3445 *filpp = find_readable_file(stp->st_file); 3446 *filpp = find_readable_file(stp->st_file);
3446 else 3447 else
3447 *filpp = find_writeable_file(stp->st_file); 3448 *filpp = find_writeable_file(stp->st_file);
3448 } 3449 }
3449 break; 3450 break;
3450 default: 3451 default:
3451 return nfserr_bad_stateid; 3452 return nfserr_bad_stateid;
3452 } 3453 }
3453 status = nfs_ok; 3454 status = nfs_ok;
3454 out: 3455 out:
3455 return status; 3456 return status;
3456 } 3457 }
3457 3458
3458 static __be32 3459 static __be32
3459 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3460 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3460 { 3461 {
3461 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3462 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3462 return nfserr_locks_held; 3463 return nfserr_locks_held;
3463 release_lock_stateid(stp); 3464 release_lock_stateid(stp);
3464 return nfs_ok; 3465 return nfs_ok;
3465 } 3466 }
3466 3467
3467 /* 3468 /*
3468 * Test if the stateid is valid 3469 * Test if the stateid is valid
3469 */ 3470 */
3470 __be32 3471 __be32
3471 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3472 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3472 struct nfsd4_test_stateid *test_stateid) 3473 struct nfsd4_test_stateid *test_stateid)
3473 { 3474 {
3474 struct nfsd4_test_stateid_id *stateid; 3475 struct nfsd4_test_stateid_id *stateid;
3475 struct nfs4_client *cl = cstate->session->se_client; 3476 struct nfs4_client *cl = cstate->session->se_client;
3476 3477
3477 nfs4_lock_state(); 3478 nfs4_lock_state();
3478 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 3479 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3479 stateid->ts_id_status = 3480 stateid->ts_id_status =
3480 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 3481 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3481 nfs4_unlock_state(); 3482 nfs4_unlock_state();
3482 3483
3483 return nfs_ok; 3484 return nfs_ok;
3484 } 3485 }
3485 3486
3486 __be32 3487 __be32
3487 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3488 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3488 struct nfsd4_free_stateid *free_stateid) 3489 struct nfsd4_free_stateid *free_stateid)
3489 { 3490 {
3490 stateid_t *stateid = &free_stateid->fr_stateid; 3491 stateid_t *stateid = &free_stateid->fr_stateid;
3491 struct nfs4_stid *s; 3492 struct nfs4_stid *s;
3492 struct nfs4_client *cl = cstate->session->se_client; 3493 struct nfs4_client *cl = cstate->session->se_client;
3493 __be32 ret = nfserr_bad_stateid; 3494 __be32 ret = nfserr_bad_stateid;
3494 3495
3495 nfs4_lock_state(); 3496 nfs4_lock_state();
3496 s = find_stateid(cl, stateid); 3497 s = find_stateid(cl, stateid);
3497 if (!s) 3498 if (!s)
3498 goto out; 3499 goto out;
3499 switch (s->sc_type) { 3500 switch (s->sc_type) {
3500 case NFS4_DELEG_STID: 3501 case NFS4_DELEG_STID:
3501 ret = nfserr_locks_held; 3502 ret = nfserr_locks_held;
3502 goto out; 3503 goto out;
3503 case NFS4_OPEN_STID: 3504 case NFS4_OPEN_STID:
3504 case NFS4_LOCK_STID: 3505 case NFS4_LOCK_STID:
3505 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 3506 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3506 if (ret) 3507 if (ret)
3507 goto out; 3508 goto out;
3508 if (s->sc_type == NFS4_LOCK_STID) 3509 if (s->sc_type == NFS4_LOCK_STID)
3509 ret = nfsd4_free_lock_stateid(openlockstateid(s)); 3510 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3510 else 3511 else
3511 ret = nfserr_locks_held; 3512 ret = nfserr_locks_held;
3512 break; 3513 break;
3513 default: 3514 default:
3514 ret = nfserr_bad_stateid; 3515 ret = nfserr_bad_stateid;
3515 } 3516 }
3516 out: 3517 out:
3517 nfs4_unlock_state(); 3518 nfs4_unlock_state();
3518 return ret; 3519 return ret;
3519 } 3520 }
3520 3521
3521 static inline int 3522 static inline int
3522 setlkflg (int type) 3523 setlkflg (int type)
3523 { 3524 {
3524 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 3525 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3525 RD_STATE : WR_STATE; 3526 RD_STATE : WR_STATE;
3526 } 3527 }
3527 3528
3528 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 3529 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3529 { 3530 {
3530 struct svc_fh *current_fh = &cstate->current_fh; 3531 struct svc_fh *current_fh = &cstate->current_fh;
3531 struct nfs4_stateowner *sop = stp->st_stateowner; 3532 struct nfs4_stateowner *sop = stp->st_stateowner;
3532 __be32 status; 3533 __be32 status;
3533 3534
3534 status = nfsd4_check_seqid(cstate, sop, seqid); 3535 status = nfsd4_check_seqid(cstate, sop, seqid);
3535 if (status) 3536 if (status)
3536 return status; 3537 return status;
3537 if (stp->st_stid.sc_type == NFS4_CLOSED_STID) 3538 if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3538 /* 3539 /*
3539 * "Closed" stateid's exist *only* to return 3540 * "Closed" stateid's exist *only* to return
3540 * nfserr_replay_me from the previous step. 3541 * nfserr_replay_me from the previous step.
3541 */ 3542 */
3542 return nfserr_bad_stateid; 3543 return nfserr_bad_stateid;
3543 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 3544 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3544 if (status) 3545 if (status)
3545 return status; 3546 return status;
3546 return nfs4_check_fh(current_fh, stp); 3547 return nfs4_check_fh(current_fh, stp);
3547 } 3548 }
3548 3549
3549 /* 3550 /*
3550 * Checks for sequence id mutating operations. 3551 * Checks for sequence id mutating operations.
3551 */ 3552 */
3552 static __be32 3553 static __be32
3553 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3554 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3554 stateid_t *stateid, char typemask, 3555 stateid_t *stateid, char typemask,
3555 struct nfs4_ol_stateid **stpp) 3556 struct nfs4_ol_stateid **stpp)
3556 { 3557 {
3557 __be32 status; 3558 __be32 status;
3558 struct nfs4_stid *s; 3559 struct nfs4_stid *s;
3559 3560
3560 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 3561 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3561 seqid, STATEID_VAL(stateid)); 3562 seqid, STATEID_VAL(stateid));
3562 3563
3563 *stpp = NULL; 3564 *stpp = NULL;
3564 status = nfsd4_lookup_stateid(stateid, typemask, &s); 3565 status = nfsd4_lookup_stateid(stateid, typemask, &s);
3565 if (status) 3566 if (status)
3566 return status; 3567 return status;
3567 *stpp = openlockstateid(s); 3568 *stpp = openlockstateid(s);
3568 cstate->replay_owner = (*stpp)->st_stateowner; 3569 cstate->replay_owner = (*stpp)->st_stateowner;
3569 3570
3570 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); 3571 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3571 } 3572 }
3572 3573
3573 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp) 3574 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3574 { 3575 {
3575 __be32 status; 3576 __be32 status;
3576 struct nfs4_openowner *oo; 3577 struct nfs4_openowner *oo;
3577 3578
3578 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 3579 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3579 NFS4_OPEN_STID, stpp); 3580 NFS4_OPEN_STID, stpp);
3580 if (status) 3581 if (status)
3581 return status; 3582 return status;
3582 oo = openowner((*stpp)->st_stateowner); 3583 oo = openowner((*stpp)->st_stateowner);
3583 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) 3584 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3584 return nfserr_bad_stateid; 3585 return nfserr_bad_stateid;
3585 return nfs_ok; 3586 return nfs_ok;
3586 } 3587 }
3587 3588
3588 __be32 3589 __be32
3589 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3590 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3590 struct nfsd4_open_confirm *oc) 3591 struct nfsd4_open_confirm *oc)
3591 { 3592 {
3592 __be32 status; 3593 __be32 status;
3593 struct nfs4_openowner *oo; 3594 struct nfs4_openowner *oo;
3594 struct nfs4_ol_stateid *stp; 3595 struct nfs4_ol_stateid *stp;
3595 3596
3596 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 3597 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3597 (int)cstate->current_fh.fh_dentry->d_name.len, 3598 (int)cstate->current_fh.fh_dentry->d_name.len,
3598 cstate->current_fh.fh_dentry->d_name.name); 3599 cstate->current_fh.fh_dentry->d_name.name);
3599 3600
3600 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 3601 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3601 if (status) 3602 if (status)
3602 return status; 3603 return status;
3603 3604
3604 nfs4_lock_state(); 3605 nfs4_lock_state();
3605 3606
3606 status = nfs4_preprocess_seqid_op(cstate, 3607 status = nfs4_preprocess_seqid_op(cstate,
3607 oc->oc_seqid, &oc->oc_req_stateid, 3608 oc->oc_seqid, &oc->oc_req_stateid,
3608 NFS4_OPEN_STID, &stp); 3609 NFS4_OPEN_STID, &stp);
3609 if (status) 3610 if (status)
3610 goto out; 3611 goto out;
3611 oo = openowner(stp->st_stateowner); 3612 oo = openowner(stp->st_stateowner);
3612 status = nfserr_bad_stateid; 3613 status = nfserr_bad_stateid;
3613 if (oo->oo_flags & NFS4_OO_CONFIRMED) 3614 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3614 goto out; 3615 goto out;
3615 oo->oo_flags |= NFS4_OO_CONFIRMED; 3616 oo->oo_flags |= NFS4_OO_CONFIRMED;
3616 update_stateid(&stp->st_stid.sc_stateid); 3617 update_stateid(&stp->st_stid.sc_stateid);
3617 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3618 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3618 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 3619 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3619 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 3620 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3620 3621
3621 nfsd4_client_record_create(oo->oo_owner.so_client); 3622 nfsd4_client_record_create(oo->oo_owner.so_client);
3622 status = nfs_ok; 3623 status = nfs_ok;
3623 out: 3624 out:
3624 if (!cstate->replay_owner) 3625 if (!cstate->replay_owner)
3625 nfs4_unlock_state(); 3626 nfs4_unlock_state();
3626 return status; 3627 return status;
3627 } 3628 }
3628 3629
3629 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 3630 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3630 { 3631 {
3631 if (!test_access(access, stp)) 3632 if (!test_access(access, stp))
3632 return; 3633 return;
3633 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); 3634 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3634 clear_access(access, stp); 3635 clear_access(access, stp);
3635 } 3636 }
3636 3637
3637 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 3638 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3638 { 3639 {
3639 switch (to_access) { 3640 switch (to_access) {
3640 case NFS4_SHARE_ACCESS_READ: 3641 case NFS4_SHARE_ACCESS_READ:
3641 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 3642 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3642 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3643 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3643 break; 3644 break;
3644 case NFS4_SHARE_ACCESS_WRITE: 3645 case NFS4_SHARE_ACCESS_WRITE:
3645 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 3646 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3646 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3647 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3647 break; 3648 break;
3648 case NFS4_SHARE_ACCESS_BOTH: 3649 case NFS4_SHARE_ACCESS_BOTH:
3649 break; 3650 break;
3650 default: 3651 default:
3651 BUG(); 3652 BUG();
3652 } 3653 }
3653 } 3654 }
3654 3655
3655 static void 3656 static void
3656 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp) 3657 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3657 { 3658 {
3658 int i; 3659 int i;
3659 for (i = 0; i < 4; i++) { 3660 for (i = 0; i < 4; i++) {
3660 if ((i & deny) != i) 3661 if ((i & deny) != i)
3661 clear_deny(i, stp); 3662 clear_deny(i, stp);
3662 } 3663 }
3663 } 3664 }
3664 3665
3665 __be32 3666 __be32
3666 nfsd4_open_downgrade(struct svc_rqst *rqstp, 3667 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3667 struct nfsd4_compound_state *cstate, 3668 struct nfsd4_compound_state *cstate,
3668 struct nfsd4_open_downgrade *od) 3669 struct nfsd4_open_downgrade *od)
3669 { 3670 {
3670 __be32 status; 3671 __be32 status;
3671 struct nfs4_ol_stateid *stp; 3672 struct nfs4_ol_stateid *stp;
3672 3673
3673 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 3674 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3674 (int)cstate->current_fh.fh_dentry->d_name.len, 3675 (int)cstate->current_fh.fh_dentry->d_name.len,
3675 cstate->current_fh.fh_dentry->d_name.name); 3676 cstate->current_fh.fh_dentry->d_name.name);
3676 3677
3677 /* We don't yet support WANT bits: */ 3678 /* We don't yet support WANT bits: */
3678 if (od->od_deleg_want) 3679 if (od->od_deleg_want)
3679 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 3680 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3680 od->od_deleg_want); 3681 od->od_deleg_want);
3681 3682
3682 nfs4_lock_state(); 3683 nfs4_lock_state();
3683 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 3684 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3684 &od->od_stateid, &stp); 3685 &od->od_stateid, &stp);
3685 if (status) 3686 if (status)
3686 goto out; 3687 goto out;
3687 status = nfserr_inval; 3688 status = nfserr_inval;
3688 if (!test_access(od->od_share_access, stp)) { 3689 if (!test_access(od->od_share_access, stp)) {
3689 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n", 3690 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3690 stp->st_access_bmap, od->od_share_access); 3691 stp->st_access_bmap, od->od_share_access);
3691 goto out; 3692 goto out;
3692 } 3693 }
3693 if (!test_deny(od->od_share_deny, stp)) { 3694 if (!test_deny(od->od_share_deny, stp)) {
3694 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 3695 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3695 stp->st_deny_bmap, od->od_share_deny); 3696 stp->st_deny_bmap, od->od_share_deny);
3696 goto out; 3697 goto out;
3697 } 3698 }
3698 nfs4_stateid_downgrade(stp, od->od_share_access); 3699 nfs4_stateid_downgrade(stp, od->od_share_access);
3699 3700
3700 reset_union_bmap_deny(od->od_share_deny, stp); 3701 reset_union_bmap_deny(od->od_share_deny, stp);
3701 3702
3702 update_stateid(&stp->st_stid.sc_stateid); 3703 update_stateid(&stp->st_stid.sc_stateid);
3703 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3704 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3704 status = nfs_ok; 3705 status = nfs_ok;
3705 out: 3706 out:
3706 if (!cstate->replay_owner) 3707 if (!cstate->replay_owner)
3707 nfs4_unlock_state(); 3708 nfs4_unlock_state();
3708 return status; 3709 return status;
3709 } 3710 }
3710 3711
3711 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so) 3712 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3712 { 3713 {
3713 struct nfs4_openowner *oo; 3714 struct nfs4_openowner *oo;
3714 struct nfs4_ol_stateid *s; 3715 struct nfs4_ol_stateid *s;
3715 3716
3716 if (!so->so_is_open_owner) 3717 if (!so->so_is_open_owner)
3717 return; 3718 return;
3718 oo = openowner(so); 3719 oo = openowner(so);
3719 s = oo->oo_last_closed_stid; 3720 s = oo->oo_last_closed_stid;
3720 if (!s) 3721 if (!s)
3721 return; 3722 return;
3722 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) { 3723 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3723 /* Release the last_closed_stid on the next seqid bump: */ 3724 /* Release the last_closed_stid on the next seqid bump: */
3724 oo->oo_flags |= NFS4_OO_PURGE_CLOSE; 3725 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3725 return; 3726 return;
3726 } 3727 }
3727 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE; 3728 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3728 release_last_closed_stateid(oo); 3729 release_last_closed_stateid(oo);
3729 } 3730 }
3730 3731
3731 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 3732 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3732 { 3733 {
3733 unhash_open_stateid(s); 3734 unhash_open_stateid(s);
3734 s->st_stid.sc_type = NFS4_CLOSED_STID; 3735 s->st_stid.sc_type = NFS4_CLOSED_STID;
3735 } 3736 }
3736 3737
3737 /* 3738 /*
3738 * nfs4_unlock_state() called after encode 3739 * nfs4_unlock_state() called after encode
3739 */ 3740 */
3740 __be32 3741 __be32
3741 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3742 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3742 struct nfsd4_close *close) 3743 struct nfsd4_close *close)
3743 { 3744 {
3744 __be32 status; 3745 __be32 status;
3745 struct nfs4_openowner *oo; 3746 struct nfs4_openowner *oo;
3746 struct nfs4_ol_stateid *stp; 3747 struct nfs4_ol_stateid *stp;
3747 3748
3748 dprintk("NFSD: nfsd4_close on file %.*s\n", 3749 dprintk("NFSD: nfsd4_close on file %.*s\n",
3749 (int)cstate->current_fh.fh_dentry->d_name.len, 3750 (int)cstate->current_fh.fh_dentry->d_name.len,
3750 cstate->current_fh.fh_dentry->d_name.name); 3751 cstate->current_fh.fh_dentry->d_name.name);
3751 3752
3752 nfs4_lock_state(); 3753 nfs4_lock_state();
3753 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 3754 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3754 &close->cl_stateid, 3755 &close->cl_stateid,
3755 NFS4_OPEN_STID|NFS4_CLOSED_STID, 3756 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3756 &stp); 3757 &stp);
3757 if (status) 3758 if (status)
3758 goto out; 3759 goto out;
3759 oo = openowner(stp->st_stateowner); 3760 oo = openowner(stp->st_stateowner);
3760 status = nfs_ok; 3761 status = nfs_ok;
3761 update_stateid(&stp->st_stid.sc_stateid); 3762 update_stateid(&stp->st_stid.sc_stateid);
3762 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3763 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3763 3764
3764 nfsd4_close_open_stateid(stp); 3765 nfsd4_close_open_stateid(stp);
3765 oo->oo_last_closed_stid = stp; 3766 oo->oo_last_closed_stid = stp;
3766 3767
3767 if (list_empty(&oo->oo_owner.so_stateids)) { 3768 if (list_empty(&oo->oo_owner.so_stateids)) {
3768 if (cstate->minorversion) { 3769 if (cstate->minorversion) {
3769 release_openowner(oo); 3770 release_openowner(oo);
3770 cstate->replay_owner = NULL; 3771 cstate->replay_owner = NULL;
3771 } else { 3772 } else {
3772 /* 3773 /*
3773 * In the 4.0 case we need to keep the owners around a 3774 * In the 4.0 case we need to keep the owners around a
3774 * little while to handle CLOSE replay. 3775 * little while to handle CLOSE replay.
3775 */ 3776 */
3776 if (list_empty(&oo->oo_owner.so_stateids)) 3777 if (list_empty(&oo->oo_owner.so_stateids))
3777 move_to_close_lru(oo); 3778 move_to_close_lru(oo);
3778 } 3779 }
3779 } 3780 }
3780 out: 3781 out:
3781 if (!cstate->replay_owner) 3782 if (!cstate->replay_owner)
3782 nfs4_unlock_state(); 3783 nfs4_unlock_state();
3783 return status; 3784 return status;
3784 } 3785 }
3785 3786
3786 __be32 3787 __be32
3787 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3788 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3788 struct nfsd4_delegreturn *dr) 3789 struct nfsd4_delegreturn *dr)
3789 { 3790 {
3790 struct nfs4_delegation *dp; 3791 struct nfs4_delegation *dp;
3791 stateid_t *stateid = &dr->dr_stateid; 3792 stateid_t *stateid = &dr->dr_stateid;
3792 struct nfs4_stid *s; 3793 struct nfs4_stid *s;
3793 struct inode *inode; 3794 struct inode *inode;
3794 __be32 status; 3795 __be32 status;
3795 3796
3796 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 3797 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3797 return status; 3798 return status;
3798 inode = cstate->current_fh.fh_dentry->d_inode; 3799 inode = cstate->current_fh.fh_dentry->d_inode;
3799 3800
3800 nfs4_lock_state(); 3801 nfs4_lock_state();
3801 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s); 3802 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
3802 if (status) 3803 if (status)
3803 goto out; 3804 goto out;
3804 dp = delegstateid(s); 3805 dp = delegstateid(s);
3805 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 3806 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3806 if (status) 3807 if (status)
3807 goto out; 3808 goto out;
3808 3809
3809 unhash_delegation(dp); 3810 unhash_delegation(dp);
3810 out: 3811 out:
3811 nfs4_unlock_state(); 3812 nfs4_unlock_state();
3812 3813
3813 return status; 3814 return status;
3814 } 3815 }
3815 3816
3816 3817
3817 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 3818 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3818 3819
3819 #define LOCKOWNER_INO_HASH_BITS 8 3820 #define LOCKOWNER_INO_HASH_BITS 8
3820 #define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS) 3821 #define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS)
3821 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1) 3822 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
3822 3823
3823 static inline u64 3824 static inline u64
3824 end_offset(u64 start, u64 len) 3825 end_offset(u64 start, u64 len)
3825 { 3826 {
3826 u64 end; 3827 u64 end;
3827 3828
3828 end = start + len; 3829 end = start + len;
3829 return end >= start ? end: NFS4_MAX_UINT64; 3830 return end >= start ? end: NFS4_MAX_UINT64;
3830 } 3831 }
3831 3832
3832 /* last octet in a range */ 3833 /* last octet in a range */
3833 static inline u64 3834 static inline u64
3834 last_byte_offset(u64 start, u64 len) 3835 last_byte_offset(u64 start, u64 len)
3835 { 3836 {
3836 u64 end; 3837 u64 end;
3837 3838
3838 BUG_ON(!len); 3839 BUG_ON(!len);
3839 end = start + len; 3840 end = start + len;
3840 return end > start ? end - 1: NFS4_MAX_UINT64; 3841 return end > start ? end - 1: NFS4_MAX_UINT64;
3841 } 3842 }
3842 3843
3843 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername) 3844 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
3844 { 3845 {
3845 return (file_hashval(inode) + cl_id 3846 return (file_hashval(inode) + cl_id
3846 + opaque_hashval(ownername->data, ownername->len)) 3847 + opaque_hashval(ownername->data, ownername->len))
3847 & LOCKOWNER_INO_HASH_MASK; 3848 & LOCKOWNER_INO_HASH_MASK;
3848 } 3849 }
3849 3850
3850 static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE]; 3851 static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE];
3851 3852
3852 /* 3853 /*
3853 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 3854 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3854 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 3855 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3855 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 3856 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3856 * locking, this prevents us from being completely protocol-compliant. The 3857 * locking, this prevents us from being completely protocol-compliant. The
3857 * real solution to this problem is to start using unsigned file offsets in 3858 * real solution to this problem is to start using unsigned file offsets in
3858 * the VFS, but this is a very deep change! 3859 * the VFS, but this is a very deep change!
3859 */ 3860 */
3860 static inline void 3861 static inline void
3861 nfs4_transform_lock_offset(struct file_lock *lock) 3862 nfs4_transform_lock_offset(struct file_lock *lock)
3862 { 3863 {
3863 if (lock->fl_start < 0) 3864 if (lock->fl_start < 0)
3864 lock->fl_start = OFFSET_MAX; 3865 lock->fl_start = OFFSET_MAX;
3865 if (lock->fl_end < 0) 3866 if (lock->fl_end < 0)
3866 lock->fl_end = OFFSET_MAX; 3867 lock->fl_end = OFFSET_MAX;
3867 } 3868 }
3868 3869
3869 /* Hack!: For now, we're defining this just so we can use a pointer to it 3870 /* Hack!: For now, we're defining this just so we can use a pointer to it
3870 * as a unique cookie to identify our (NFSv4's) posix locks. */ 3871 * as a unique cookie to identify our (NFSv4's) posix locks. */
3871 static const struct lock_manager_operations nfsd_posix_mng_ops = { 3872 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3872 }; 3873 };
3873 3874
3874 static inline void 3875 static inline void
3875 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 3876 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3876 { 3877 {
3877 struct nfs4_lockowner *lo; 3878 struct nfs4_lockowner *lo;
3878 3879
3879 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 3880 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3880 lo = (struct nfs4_lockowner *) fl->fl_owner; 3881 lo = (struct nfs4_lockowner *) fl->fl_owner;
3881 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, 3882 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3882 lo->lo_owner.so_owner.len, GFP_KERNEL); 3883 lo->lo_owner.so_owner.len, GFP_KERNEL);
3883 if (!deny->ld_owner.data) 3884 if (!deny->ld_owner.data)
3884 /* We just don't care that much */ 3885 /* We just don't care that much */
3885 goto nevermind; 3886 goto nevermind;
3886 deny->ld_owner.len = lo->lo_owner.so_owner.len; 3887 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3887 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 3888 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3888 } else { 3889 } else {
3889 nevermind: 3890 nevermind:
3890 deny->ld_owner.len = 0; 3891 deny->ld_owner.len = 0;
3891 deny->ld_owner.data = NULL; 3892 deny->ld_owner.data = NULL;
3892 deny->ld_clientid.cl_boot = 0; 3893 deny->ld_clientid.cl_boot = 0;
3893 deny->ld_clientid.cl_id = 0; 3894 deny->ld_clientid.cl_id = 0;
3894 } 3895 }
3895 deny->ld_start = fl->fl_start; 3896 deny->ld_start = fl->fl_start;
3896 deny->ld_length = NFS4_MAX_UINT64; 3897 deny->ld_length = NFS4_MAX_UINT64;
3897 if (fl->fl_end != NFS4_MAX_UINT64) 3898 if (fl->fl_end != NFS4_MAX_UINT64)
3898 deny->ld_length = fl->fl_end - fl->fl_start + 1; 3899 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3899 deny->ld_type = NFS4_READ_LT; 3900 deny->ld_type = NFS4_READ_LT;
3900 if (fl->fl_type != F_RDLCK) 3901 if (fl->fl_type != F_RDLCK)
3901 deny->ld_type = NFS4_WRITE_LT; 3902 deny->ld_type = NFS4_WRITE_LT;
3902 } 3903 }
3903 3904
3904 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) 3905 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
3905 { 3906 {
3906 struct nfs4_ol_stateid *lst; 3907 struct nfs4_ol_stateid *lst;
3907 3908
3908 if (!same_owner_str(&lo->lo_owner, owner, clid)) 3909 if (!same_owner_str(&lo->lo_owner, owner, clid))
3909 return false; 3910 return false;
3910 lst = list_first_entry(&lo->lo_owner.so_stateids, 3911 lst = list_first_entry(&lo->lo_owner.so_stateids,
3911 struct nfs4_ol_stateid, st_perstateowner); 3912 struct nfs4_ol_stateid, st_perstateowner);
3912 return lst->st_file->fi_inode == inode; 3913 return lst->st_file->fi_inode == inode;
3913 } 3914 }
3914 3915
3915 static struct nfs4_lockowner * 3916 static struct nfs4_lockowner *
3916 find_lockowner_str(struct inode *inode, clientid_t *clid, 3917 find_lockowner_str(struct inode *inode, clientid_t *clid,
3917 struct xdr_netobj *owner) 3918 struct xdr_netobj *owner)
3918 { 3919 {
3919 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner); 3920 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
3920 struct nfs4_lockowner *lo; 3921 struct nfs4_lockowner *lo;
3921 3922
3922 list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { 3923 list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
3923 if (same_lockowner_ino(lo, inode, clid, owner)) 3924 if (same_lockowner_ino(lo, inode, clid, owner))
3924 return lo; 3925 return lo;
3925 } 3926 }
3926 return NULL; 3927 return NULL;
3927 } 3928 }
3928 3929
3929 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) 3930 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3930 { 3931 {
3931 struct inode *inode = open_stp->st_file->fi_inode; 3932 struct inode *inode = open_stp->st_file->fi_inode;
3932 unsigned int inohash = lockowner_ino_hashval(inode, 3933 unsigned int inohash = lockowner_ino_hashval(inode,
3933 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); 3934 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
3934 3935
3935 list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]); 3936 list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
3936 list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]); 3937 list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]);
3937 list_add(&lo->lo_perstateid, &open_stp->st_lockowners); 3938 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3938 } 3939 }
3939 3940
3940 /* 3941 /*
3941 * Alloc a lock owner structure. 3942 * Alloc a lock owner structure.
3942 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 3943 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
3943 * occurred. 3944 * occurred.
3944 * 3945 *
3945 * strhashval = ownerstr_hashval 3946 * strhashval = ownerstr_hashval
3946 */ 3947 */
3947 3948
3948 static struct nfs4_lockowner * 3949 static struct nfs4_lockowner *
3949 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { 3950 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3950 struct nfs4_lockowner *lo; 3951 struct nfs4_lockowner *lo;
3951 3952
3952 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 3953 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3953 if (!lo) 3954 if (!lo)
3954 return NULL; 3955 return NULL;
3955 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 3956 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3956 lo->lo_owner.so_is_open_owner = 0; 3957 lo->lo_owner.so_is_open_owner = 0;
3957 /* It is the openowner seqid that will be incremented in encode in the 3958 /* It is the openowner seqid that will be incremented in encode in the
3958 * case of new lockowners; so increment the lock seqid manually: */ 3959 * case of new lockowners; so increment the lock seqid manually: */
3959 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; 3960 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3960 hash_lockowner(lo, strhashval, clp, open_stp); 3961 hash_lockowner(lo, strhashval, clp, open_stp);
3961 return lo; 3962 return lo;
3962 } 3963 }
3963 3964
3964 static struct nfs4_ol_stateid * 3965 static struct nfs4_ol_stateid *
3965 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) 3966 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
3966 { 3967 {
3967 struct nfs4_ol_stateid *stp; 3968 struct nfs4_ol_stateid *stp;
3968 struct nfs4_client *clp = lo->lo_owner.so_client; 3969 struct nfs4_client *clp = lo->lo_owner.so_client;
3969 3970
3970 stp = nfs4_alloc_stateid(clp); 3971 stp = nfs4_alloc_stateid(clp);
3971 if (stp == NULL) 3972 if (stp == NULL)
3972 return NULL; 3973 return NULL;
3973 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID); 3974 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
3974 list_add(&stp->st_perfile, &fp->fi_stateids); 3975 list_add(&stp->st_perfile, &fp->fi_stateids);
3975 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 3976 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3976 stp->st_stateowner = &lo->lo_owner; 3977 stp->st_stateowner = &lo->lo_owner;
3977 get_nfs4_file(fp); 3978 get_nfs4_file(fp);
3978 stp->st_file = fp; 3979 stp->st_file = fp;
3979 stp->st_access_bmap = 0; 3980 stp->st_access_bmap = 0;
3980 stp->st_deny_bmap = open_stp->st_deny_bmap; 3981 stp->st_deny_bmap = open_stp->st_deny_bmap;
3981 stp->st_openstp = open_stp; 3982 stp->st_openstp = open_stp;
3982 return stp; 3983 return stp;
3983 } 3984 }
3984 3985
3985 static int 3986 static int
3986 check_lock_length(u64 offset, u64 length) 3987 check_lock_length(u64 offset, u64 length)
3987 { 3988 {
3988 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 3989 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
3989 LOFF_OVERFLOW(offset, length))); 3990 LOFF_OVERFLOW(offset, length)));
3990 } 3991 }
3991 3992
3992 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 3993 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3993 { 3994 {
3994 struct nfs4_file *fp = lock_stp->st_file; 3995 struct nfs4_file *fp = lock_stp->st_file;
3995 int oflag = nfs4_access_to_omode(access); 3996 int oflag = nfs4_access_to_omode(access);
3996 3997
3997 if (test_access(access, lock_stp)) 3998 if (test_access(access, lock_stp))
3998 return; 3999 return;
3999 nfs4_file_get_access(fp, oflag); 4000 nfs4_file_get_access(fp, oflag);
4000 set_access(access, lock_stp); 4001 set_access(access, lock_stp);
4001 } 4002 }
4002 4003
4003 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) 4004 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4004 { 4005 {
4005 struct nfs4_file *fi = ost->st_file; 4006 struct nfs4_file *fi = ost->st_file;
4006 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 4007 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4007 struct nfs4_client *cl = oo->oo_owner.so_client; 4008 struct nfs4_client *cl = oo->oo_owner.so_client;
4008 struct nfs4_lockowner *lo; 4009 struct nfs4_lockowner *lo;
4009 unsigned int strhashval; 4010 unsigned int strhashval;
4010 4011
4011 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner); 4012 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner);
4012 if (lo) { 4013 if (lo) {
4013 if (!cstate->minorversion) 4014 if (!cstate->minorversion)
4014 return nfserr_bad_seqid; 4015 return nfserr_bad_seqid;
4015 /* XXX: a lockowner always has exactly one stateid: */ 4016 /* XXX: a lockowner always has exactly one stateid: */
4016 *lst = list_first_entry(&lo->lo_owner.so_stateids, 4017 *lst = list_first_entry(&lo->lo_owner.so_stateids,
4017 struct nfs4_ol_stateid, st_perstateowner); 4018 struct nfs4_ol_stateid, st_perstateowner);
4018 return nfs_ok; 4019 return nfs_ok;
4019 } 4020 }
4020 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, 4021 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4021 &lock->v.new.owner); 4022 &lock->v.new.owner);
4022 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 4023 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4023 if (lo == NULL) 4024 if (lo == NULL)
4024 return nfserr_jukebox; 4025 return nfserr_jukebox;
4025 *lst = alloc_init_lock_stateid(lo, fi, ost); 4026 *lst = alloc_init_lock_stateid(lo, fi, ost);
4026 if (*lst == NULL) { 4027 if (*lst == NULL) {
4027 release_lockowner(lo); 4028 release_lockowner(lo);
4028 return nfserr_jukebox; 4029 return nfserr_jukebox;
4029 } 4030 }
4030 *new = true; 4031 *new = true;
4031 return nfs_ok; 4032 return nfs_ok;
4032 } 4033 }
4033 4034
4034 /* 4035 /*
4035 * LOCK operation 4036 * LOCK operation
4036 */ 4037 */
4037 __be32 4038 __be32
4038 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4039 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4039 struct nfsd4_lock *lock) 4040 struct nfsd4_lock *lock)
4040 { 4041 {
4041 struct nfs4_openowner *open_sop = NULL; 4042 struct nfs4_openowner *open_sop = NULL;
4042 struct nfs4_lockowner *lock_sop = NULL; 4043 struct nfs4_lockowner *lock_sop = NULL;
4043 struct nfs4_ol_stateid *lock_stp; 4044 struct nfs4_ol_stateid *lock_stp;
4044 struct file *filp = NULL; 4045 struct file *filp = NULL;
4045 struct file_lock file_lock; 4046 struct file_lock file_lock;
4046 struct file_lock conflock; 4047 struct file_lock conflock;
4047 __be32 status = 0; 4048 __be32 status = 0;
4048 bool new_state = false; 4049 bool new_state = false;
4049 int lkflg; 4050 int lkflg;
4050 int err; 4051 int err;
4051 4052
4052 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 4053 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4053 (long long) lock->lk_offset, 4054 (long long) lock->lk_offset,
4054 (long long) lock->lk_length); 4055 (long long) lock->lk_length);
4055 4056
4056 if (check_lock_length(lock->lk_offset, lock->lk_length)) 4057 if (check_lock_length(lock->lk_offset, lock->lk_length))
4057 return nfserr_inval; 4058 return nfserr_inval;
4058 4059
4059 if ((status = fh_verify(rqstp, &cstate->current_fh, 4060 if ((status = fh_verify(rqstp, &cstate->current_fh,
4060 S_IFREG, NFSD_MAY_LOCK))) { 4061 S_IFREG, NFSD_MAY_LOCK))) {
4061 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 4062 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4062 return status; 4063 return status;
4063 } 4064 }
4064 4065
4065 nfs4_lock_state(); 4066 nfs4_lock_state();
4066 4067
4067 if (lock->lk_is_new) { 4068 if (lock->lk_is_new) {
4068 struct nfs4_ol_stateid *open_stp = NULL; 4069 struct nfs4_ol_stateid *open_stp = NULL;
4069 4070
4070 if (nfsd4_has_session(cstate)) 4071 if (nfsd4_has_session(cstate))
4071 /* See rfc 5661 18.10.3: given clientid is ignored: */ 4072 /* See rfc 5661 18.10.3: given clientid is ignored: */
4072 memcpy(&lock->v.new.clientid, 4073 memcpy(&lock->v.new.clientid,
4073 &cstate->session->se_client->cl_clientid, 4074 &cstate->session->se_client->cl_clientid,
4074 sizeof(clientid_t)); 4075 sizeof(clientid_t));
4075 4076
4076 status = nfserr_stale_clientid; 4077 status = nfserr_stale_clientid;
4077 if (STALE_CLIENTID(&lock->lk_new_clientid)) 4078 if (STALE_CLIENTID(&lock->lk_new_clientid))
4078 goto out; 4079 goto out;
4079 4080
4080 /* validate and update open stateid and open seqid */ 4081 /* validate and update open stateid and open seqid */
4081 status = nfs4_preprocess_confirmed_seqid_op(cstate, 4082 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4082 lock->lk_new_open_seqid, 4083 lock->lk_new_open_seqid,
4083 &lock->lk_new_open_stateid, 4084 &lock->lk_new_open_stateid,
4084 &open_stp); 4085 &open_stp);
4085 if (status) 4086 if (status)
4086 goto out; 4087 goto out;
4087 open_sop = openowner(open_stp->st_stateowner); 4088 open_sop = openowner(open_stp->st_stateowner);
4088 status = nfserr_bad_stateid; 4089 status = nfserr_bad_stateid;
4089 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 4090 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4090 &lock->v.new.clientid)) 4091 &lock->v.new.clientid))
4091 goto out; 4092 goto out;
4092 status = lookup_or_create_lock_state(cstate, open_stp, lock, 4093 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4093 &lock_stp, &new_state); 4094 &lock_stp, &new_state);
4094 } else 4095 } else
4095 status = nfs4_preprocess_seqid_op(cstate, 4096 status = nfs4_preprocess_seqid_op(cstate,
4096 lock->lk_old_lock_seqid, 4097 lock->lk_old_lock_seqid,
4097 &lock->lk_old_lock_stateid, 4098 &lock->lk_old_lock_stateid,
4098 NFS4_LOCK_STID, &lock_stp); 4099 NFS4_LOCK_STID, &lock_stp);
4099 if (status) 4100 if (status)
4100 goto out; 4101 goto out;
4101 lock_sop = lockowner(lock_stp->st_stateowner); 4102 lock_sop = lockowner(lock_stp->st_stateowner);
4102 4103
4103 lkflg = setlkflg(lock->lk_type); 4104 lkflg = setlkflg(lock->lk_type);
4104 status = nfs4_check_openmode(lock_stp, lkflg); 4105 status = nfs4_check_openmode(lock_stp, lkflg);
4105 if (status) 4106 if (status)
4106 goto out; 4107 goto out;
4107 4108
4108 status = nfserr_grace; 4109 status = nfserr_grace;
4109 if (locks_in_grace() && !lock->lk_reclaim) 4110 if (locks_in_grace() && !lock->lk_reclaim)
4110 goto out; 4111 goto out;
4111 status = nfserr_no_grace; 4112 status = nfserr_no_grace;
4112 if (!locks_in_grace() && lock->lk_reclaim) 4113 if (!locks_in_grace() && lock->lk_reclaim)
4113 goto out; 4114 goto out;
4114 4115
4115 locks_init_lock(&file_lock); 4116 locks_init_lock(&file_lock);
4116 switch (lock->lk_type) { 4117 switch (lock->lk_type) {
4117 case NFS4_READ_LT: 4118 case NFS4_READ_LT:
4118 case NFS4_READW_LT: 4119 case NFS4_READW_LT:
4119 filp = find_readable_file(lock_stp->st_file); 4120 filp = find_readable_file(lock_stp->st_file);
4120 if (filp) 4121 if (filp)
4121 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 4122 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4122 file_lock.fl_type = F_RDLCK; 4123 file_lock.fl_type = F_RDLCK;
4123 break; 4124 break;
4124 case NFS4_WRITE_LT: 4125 case NFS4_WRITE_LT:
4125 case NFS4_WRITEW_LT: 4126 case NFS4_WRITEW_LT:
4126 filp = find_writeable_file(lock_stp->st_file); 4127 filp = find_writeable_file(lock_stp->st_file);
4127 if (filp) 4128 if (filp)
4128 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 4129 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4129 file_lock.fl_type = F_WRLCK; 4130 file_lock.fl_type = F_WRLCK;
4130 break; 4131 break;
4131 default: 4132 default:
4132 status = nfserr_inval; 4133 status = nfserr_inval;
4133 goto out; 4134 goto out;
4134 } 4135 }
4135 if (!filp) { 4136 if (!filp) {
4136 status = nfserr_openmode; 4137 status = nfserr_openmode;
4137 goto out; 4138 goto out;
4138 } 4139 }
4139 file_lock.fl_owner = (fl_owner_t)lock_sop; 4140 file_lock.fl_owner = (fl_owner_t)lock_sop;
4140 file_lock.fl_pid = current->tgid; 4141 file_lock.fl_pid = current->tgid;
4141 file_lock.fl_file = filp; 4142 file_lock.fl_file = filp;
4142 file_lock.fl_flags = FL_POSIX; 4143 file_lock.fl_flags = FL_POSIX;
4143 file_lock.fl_lmops = &nfsd_posix_mng_ops; 4144 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4144 4145
4145 file_lock.fl_start = lock->lk_offset; 4146 file_lock.fl_start = lock->lk_offset;
4146 file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 4147 file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4147 nfs4_transform_lock_offset(&file_lock); 4148 nfs4_transform_lock_offset(&file_lock);
4148 4149
4149 /* 4150 /*
4150 * Try to lock the file in the VFS. 4151 * Try to lock the file in the VFS.
4151 * Note: locks.c uses the BKL to protect the inode's lock list. 4152 * Note: locks.c uses the BKL to protect the inode's lock list.
4152 */ 4153 */
4153 4154
4154 err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); 4155 err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
4155 switch (-err) { 4156 switch (-err) {
4156 case 0: /* success! */ 4157 case 0: /* success! */
4157 update_stateid(&lock_stp->st_stid.sc_stateid); 4158 update_stateid(&lock_stp->st_stid.sc_stateid);
4158 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 4159 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4159 sizeof(stateid_t)); 4160 sizeof(stateid_t));
4160 status = 0; 4161 status = 0;
4161 break; 4162 break;
4162 case (EAGAIN): /* conflock holds conflicting lock */ 4163 case (EAGAIN): /* conflock holds conflicting lock */
4163 status = nfserr_denied; 4164 status = nfserr_denied;
4164 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 4165 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4165 nfs4_set_lock_denied(&conflock, &lock->lk_denied); 4166 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4166 break; 4167 break;
4167 case (EDEADLK): 4168 case (EDEADLK):
4168 status = nfserr_deadlock; 4169 status = nfserr_deadlock;
4169 break; 4170 break;
4170 default: 4171 default:
4171 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 4172 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4172 status = nfserrno(err); 4173 status = nfserrno(err);
4173 break; 4174 break;
4174 } 4175 }
4175 out: 4176 out:
4176 if (status && new_state) 4177 if (status && new_state)
4177 release_lockowner(lock_sop); 4178 release_lockowner(lock_sop);
4178 if (!cstate->replay_owner) 4179 if (!cstate->replay_owner)
4179 nfs4_unlock_state(); 4180 nfs4_unlock_state();
4180 return status; 4181 return status;
4181 } 4182 }
4182 4183
4183 /* 4184 /*
4184 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 4185 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4185 * so we do a temporary open here just to get an open file to pass to 4186 * so we do a temporary open here just to get an open file to pass to
4186 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 4187 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4187 * inode operation.) 4188 * inode operation.)
4188 */ 4189 */
4189 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 4190 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4190 { 4191 {
4191 struct file *file; 4192 struct file *file;
4192 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 4193 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4193 if (!err) { 4194 if (!err) {
4194 err = nfserrno(vfs_test_lock(file, lock)); 4195 err = nfserrno(vfs_test_lock(file, lock));
4195 nfsd_close(file); 4196 nfsd_close(file);
4196 } 4197 }
4197 return err; 4198 return err;
4198 } 4199 }
4199 4200
4200 /* 4201 /*
4201 * LOCKT operation 4202 * LOCKT operation
4202 */ 4203 */
4203 __be32 4204 __be32
4204 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4205 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4205 struct nfsd4_lockt *lockt) 4206 struct nfsd4_lockt *lockt)
4206 { 4207 {
4207 struct inode *inode; 4208 struct inode *inode;
4208 struct file_lock file_lock; 4209 struct file_lock file_lock;
4209 struct nfs4_lockowner *lo; 4210 struct nfs4_lockowner *lo;
4210 __be32 status; 4211 __be32 status;
4211 4212
4212 if (locks_in_grace()) 4213 if (locks_in_grace())
4213 return nfserr_grace; 4214 return nfserr_grace;
4214 4215
4215 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 4216 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4216 return nfserr_inval; 4217 return nfserr_inval;
4217 4218
4218 nfs4_lock_state(); 4219 nfs4_lock_state();
4219 4220
4220 status = nfserr_stale_clientid; 4221 status = nfserr_stale_clientid;
4221 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid)) 4222 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4222 goto out; 4223 goto out;
4223 4224
4224 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 4225 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4225 goto out; 4226 goto out;
4226 4227
4227 inode = cstate->current_fh.fh_dentry->d_inode; 4228 inode = cstate->current_fh.fh_dentry->d_inode;
4228 locks_init_lock(&file_lock); 4229 locks_init_lock(&file_lock);
4229 switch (lockt->lt_type) { 4230 switch (lockt->lt_type) {
4230 case NFS4_READ_LT: 4231 case NFS4_READ_LT:
4231 case NFS4_READW_LT: 4232 case NFS4_READW_LT:
4232 file_lock.fl_type = F_RDLCK; 4233 file_lock.fl_type = F_RDLCK;
4233 break; 4234 break;
4234 case NFS4_WRITE_LT: 4235 case NFS4_WRITE_LT:
4235 case NFS4_WRITEW_LT: 4236 case NFS4_WRITEW_LT:
4236 file_lock.fl_type = F_WRLCK; 4237 file_lock.fl_type = F_WRLCK;
4237 break; 4238 break;
4238 default: 4239 default:
4239 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 4240 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4240 status = nfserr_inval; 4241 status = nfserr_inval;
4241 goto out; 4242 goto out;
4242 } 4243 }
4243 4244
4244 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner); 4245 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4245 if (lo) 4246 if (lo)
4246 file_lock.fl_owner = (fl_owner_t)lo; 4247 file_lock.fl_owner = (fl_owner_t)lo;
4247 file_lock.fl_pid = current->tgid; 4248 file_lock.fl_pid = current->tgid;
4248 file_lock.fl_flags = FL_POSIX; 4249 file_lock.fl_flags = FL_POSIX;
4249 4250
4250 file_lock.fl_start = lockt->lt_offset; 4251 file_lock.fl_start = lockt->lt_offset;
4251 file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 4252 file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4252 4253
4253 nfs4_transform_lock_offset(&file_lock); 4254 nfs4_transform_lock_offset(&file_lock);
4254 4255
4255 status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock); 4256 status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4256 if (status) 4257 if (status)
4257 goto out; 4258 goto out;
4258 4259
4259 if (file_lock.fl_type != F_UNLCK) { 4260 if (file_lock.fl_type != F_UNLCK) {
4260 status = nfserr_denied; 4261 status = nfserr_denied;
4261 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied); 4262 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4262 } 4263 }
4263 out: 4264 out:
4264 nfs4_unlock_state(); 4265 nfs4_unlock_state();
4265 return status; 4266 return status;
4266 } 4267 }
4267 4268
4268 __be32 4269 __be32
4269 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4270 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4270 struct nfsd4_locku *locku) 4271 struct nfsd4_locku *locku)
4271 { 4272 {
4272 struct nfs4_ol_stateid *stp; 4273 struct nfs4_ol_stateid *stp;
4273 struct file *filp = NULL; 4274 struct file *filp = NULL;
4274 struct file_lock file_lock; 4275 struct file_lock file_lock;
4275 __be32 status; 4276 __be32 status;
4276 int err; 4277 int err;
4277 4278
4278 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 4279 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4279 (long long) locku->lu_offset, 4280 (long long) locku->lu_offset,
4280 (long long) locku->lu_length); 4281 (long long) locku->lu_length);
4281 4282
4282 if (check_lock_length(locku->lu_offset, locku->lu_length)) 4283 if (check_lock_length(locku->lu_offset, locku->lu_length))
4283 return nfserr_inval; 4284 return nfserr_inval;
4284 4285
4285 nfs4_lock_state(); 4286 nfs4_lock_state();
4286 4287
4287 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 4288 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4288 &locku->lu_stateid, NFS4_LOCK_STID, &stp); 4289 &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4289 if (status) 4290 if (status)
4290 goto out; 4291 goto out;
4291 filp = find_any_file(stp->st_file); 4292 filp = find_any_file(stp->st_file);
4292 if (!filp) { 4293 if (!filp) {
4293 status = nfserr_lock_range; 4294 status = nfserr_lock_range;
4294 goto out; 4295 goto out;
4295 } 4296 }
4296 BUG_ON(!filp); 4297 BUG_ON(!filp);
4297 locks_init_lock(&file_lock); 4298 locks_init_lock(&file_lock);
4298 file_lock.fl_type = F_UNLCK; 4299 file_lock.fl_type = F_UNLCK;
4299 file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner); 4300 file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4300 file_lock.fl_pid = current->tgid; 4301 file_lock.fl_pid = current->tgid;
4301 file_lock.fl_file = filp; 4302 file_lock.fl_file = filp;
4302 file_lock.fl_flags = FL_POSIX; 4303 file_lock.fl_flags = FL_POSIX;
4303 file_lock.fl_lmops = &nfsd_posix_mng_ops; 4304 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4304 file_lock.fl_start = locku->lu_offset; 4305 file_lock.fl_start = locku->lu_offset;
4305 4306
4306 file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length); 4307 file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4307 nfs4_transform_lock_offset(&file_lock); 4308 nfs4_transform_lock_offset(&file_lock);
4308 4309
4309 /* 4310 /*
4310 * Try to unlock the file in the VFS. 4311 * Try to unlock the file in the VFS.
4311 */ 4312 */
4312 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL); 4313 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4313 if (err) { 4314 if (err) {
4314 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 4315 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4315 goto out_nfserr; 4316 goto out_nfserr;
4316 } 4317 }
4317 /* 4318 /*
4318 * OK, unlock succeeded; the only thing left to do is update the stateid. 4319 * OK, unlock succeeded; the only thing left to do is update the stateid.
4319 */ 4320 */
4320 update_stateid(&stp->st_stid.sc_stateid); 4321 update_stateid(&stp->st_stid.sc_stateid);
4321 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4322 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4322 4323
4323 out: 4324 out:
4324 if (!cstate->replay_owner) 4325 if (!cstate->replay_owner)
4325 nfs4_unlock_state(); 4326 nfs4_unlock_state();
4326 return status; 4327 return status;
4327 4328
4328 out_nfserr: 4329 out_nfserr:
4329 status = nfserrno(err); 4330 status = nfserrno(err);
4330 goto out; 4331 goto out;
4331 } 4332 }
4332 4333
4333 /* 4334 /*
4334 * returns 4335 * returns
4335 * 1: locks held by lockowner 4336 * 1: locks held by lockowner
4336 * 0: no locks held by lockowner 4337 * 0: no locks held by lockowner
4337 */ 4338 */
4338 static int 4339 static int
4339 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) 4340 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4340 { 4341 {
4341 struct file_lock **flpp; 4342 struct file_lock **flpp;
4342 struct inode *inode = filp->fi_inode; 4343 struct inode *inode = filp->fi_inode;
4343 int status = 0; 4344 int status = 0;
4344 4345
4345 lock_flocks(); 4346 lock_flocks();
4346 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 4347 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4347 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 4348 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4348 status = 1; 4349 status = 1;
4349 goto out; 4350 goto out;
4350 } 4351 }
4351 } 4352 }
4352 out: 4353 out:
4353 unlock_flocks(); 4354 unlock_flocks();
4354 return status; 4355 return status;
4355 } 4356 }
4356 4357
4357 __be32 4358 __be32
4358 nfsd4_release_lockowner(struct svc_rqst *rqstp, 4359 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4359 struct nfsd4_compound_state *cstate, 4360 struct nfsd4_compound_state *cstate,
4360 struct nfsd4_release_lockowner *rlockowner) 4361 struct nfsd4_release_lockowner *rlockowner)
4361 { 4362 {
4362 clientid_t *clid = &rlockowner->rl_clientid; 4363 clientid_t *clid = &rlockowner->rl_clientid;
4363 struct nfs4_stateowner *sop; 4364 struct nfs4_stateowner *sop;
4364 struct nfs4_lockowner *lo; 4365 struct nfs4_lockowner *lo;
4365 struct nfs4_ol_stateid *stp; 4366 struct nfs4_ol_stateid *stp;
4366 struct xdr_netobj *owner = &rlockowner->rl_owner; 4367 struct xdr_netobj *owner = &rlockowner->rl_owner;
4367 struct list_head matches; 4368 struct list_head matches;
4368 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner); 4369 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4369 __be32 status; 4370 __be32 status;
4370 4371
4371 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 4372 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4372 clid->cl_boot, clid->cl_id); 4373 clid->cl_boot, clid->cl_id);
4373 4374
4374 /* XXX check for lease expiration */ 4375 /* XXX check for lease expiration */
4375 4376
4376 status = nfserr_stale_clientid; 4377 status = nfserr_stale_clientid;
4377 if (STALE_CLIENTID(clid)) 4378 if (STALE_CLIENTID(clid))
4378 return status; 4379 return status;
4379 4380
4380 nfs4_lock_state(); 4381 nfs4_lock_state();
4381 4382
4382 status = nfserr_locks_held; 4383 status = nfserr_locks_held;
4383 INIT_LIST_HEAD(&matches); 4384 INIT_LIST_HEAD(&matches);
4384 4385
4385 list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) { 4386 list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) {
4386 if (sop->so_is_open_owner) 4387 if (sop->so_is_open_owner)
4387 continue; 4388 continue;
4388 if (!same_owner_str(sop, owner, clid)) 4389 if (!same_owner_str(sop, owner, clid))
4389 continue; 4390 continue;
4390 list_for_each_entry(stp, &sop->so_stateids, 4391 list_for_each_entry(stp, &sop->so_stateids,
4391 st_perstateowner) { 4392 st_perstateowner) {
4392 lo = lockowner(sop); 4393 lo = lockowner(sop);
4393 if (check_for_locks(stp->st_file, lo)) 4394 if (check_for_locks(stp->st_file, lo))
4394 goto out; 4395 goto out;
4395 list_add(&lo->lo_list, &matches); 4396 list_add(&lo->lo_list, &matches);
4396 } 4397 }
4397 } 4398 }
4398 /* Clients probably won't expect us to return with some (but not all) 4399 /* Clients probably won't expect us to return with some (but not all)
4399 * of the lockowner state released; so don't release any until all 4400 * of the lockowner state released; so don't release any until all
4400 * have been checked. */ 4401 * have been checked. */
4401 status = nfs_ok; 4402 status = nfs_ok;
4402 while (!list_empty(&matches)) { 4403 while (!list_empty(&matches)) {
4403 lo = list_entry(matches.next, struct nfs4_lockowner, 4404 lo = list_entry(matches.next, struct nfs4_lockowner,
4404 lo_list); 4405 lo_list);
4405 /* unhash_stateowner deletes so_perclient only 4406 /* unhash_stateowner deletes so_perclient only
4406 * for openowners. */ 4407 * for openowners. */
4407 list_del(&lo->lo_list); 4408 list_del(&lo->lo_list);
4408 release_lockowner(lo); 4409 release_lockowner(lo);
4409 } 4410 }
4410 out: 4411 out:
4411 nfs4_unlock_state(); 4412 nfs4_unlock_state();
4412 return status; 4413 return status;
4413 } 4414 }
4414 4415
4415 static inline struct nfs4_client_reclaim * 4416 static inline struct nfs4_client_reclaim *
4416 alloc_reclaim(void) 4417 alloc_reclaim(void)
4417 { 4418 {
4418 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 4419 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4419 } 4420 }
4420 4421
4421 int 4422 int
4422 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) 4423 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4423 { 4424 {
4424 unsigned int strhashval = clientstr_hashval(name); 4425 unsigned int strhashval = clientstr_hashval(name);
4425 struct nfs4_client *clp; 4426 struct nfs4_client *clp;
4426 4427
4427 clp = find_confirmed_client_by_str(name, strhashval); 4428 clp = find_confirmed_client_by_str(name, strhashval);
4428 if (!clp) 4429 if (!clp)
4429 return 0; 4430 return 0;
4430 return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); 4431 return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
4431 } 4432 }
4432 4433
4433 /* 4434 /*
4434 * failure => all reset bets are off, nfserr_no_grace... 4435 * failure => all reset bets are off, nfserr_no_grace...
4435 */ 4436 */
4436 int 4437 int
4437 nfs4_client_to_reclaim(const char *name) 4438 nfs4_client_to_reclaim(const char *name)
4438 { 4439 {
4439 unsigned int strhashval; 4440 unsigned int strhashval;
4440 struct nfs4_client_reclaim *crp = NULL; 4441 struct nfs4_client_reclaim *crp = NULL;
4441 4442
4442 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 4443 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4443 crp = alloc_reclaim(); 4444 crp = alloc_reclaim();
4444 if (!crp) 4445 if (!crp)
4445 return 0; 4446 return 0;
4446 strhashval = clientstr_hashval(name); 4447 strhashval = clientstr_hashval(name);
4447 INIT_LIST_HEAD(&crp->cr_strhash); 4448 INIT_LIST_HEAD(&crp->cr_strhash);
4448 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]); 4449 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4449 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 4450 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4450 reclaim_str_hashtbl_size++; 4451 reclaim_str_hashtbl_size++;
4451 return 1; 4452 return 1;
4452 } 4453 }
4453 4454
4454 void 4455 void
4455 nfs4_release_reclaim(void) 4456 nfs4_release_reclaim(void)
4456 { 4457 {
4457 struct nfs4_client_reclaim *crp = NULL; 4458 struct nfs4_client_reclaim *crp = NULL;
4458 int i; 4459 int i;
4459 4460
4460 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4461 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4461 while (!list_empty(&reclaim_str_hashtbl[i])) { 4462 while (!list_empty(&reclaim_str_hashtbl[i])) {
4462 crp = list_entry(reclaim_str_hashtbl[i].next, 4463 crp = list_entry(reclaim_str_hashtbl[i].next,
4463 struct nfs4_client_reclaim, cr_strhash); 4464 struct nfs4_client_reclaim, cr_strhash);
4464 list_del(&crp->cr_strhash); 4465 list_del(&crp->cr_strhash);
4465 kfree(crp); 4466 kfree(crp);
4466 reclaim_str_hashtbl_size--; 4467 reclaim_str_hashtbl_size--;
4467 } 4468 }
4468 } 4469 }
4469 BUG_ON(reclaim_str_hashtbl_size); 4470 BUG_ON(reclaim_str_hashtbl_size);
4470 } 4471 }
4471 4472
4472 /* 4473 /*
4473 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 4474 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4474 struct nfs4_client_reclaim * 4475 struct nfs4_client_reclaim *
4475 nfsd4_find_reclaim_client(struct nfs4_client *clp) 4476 nfsd4_find_reclaim_client(struct nfs4_client *clp)
4476 { 4477 {
4477 unsigned int strhashval; 4478 unsigned int strhashval;
4478 struct nfs4_client_reclaim *crp = NULL; 4479 struct nfs4_client_reclaim *crp = NULL;
4479 4480
4480 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n", 4481 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4481 clp->cl_name.len, clp->cl_name.data, 4482 clp->cl_name.len, clp->cl_name.data,
4482 clp->cl_recdir); 4483 clp->cl_recdir);
4483 4484
4484 /* find clp->cl_name in reclaim_str_hashtbl */ 4485 /* find clp->cl_name in reclaim_str_hashtbl */
4485 strhashval = clientstr_hashval(clp->cl_recdir); 4486 strhashval = clientstr_hashval(clp->cl_recdir);
4486 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) { 4487 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4487 if (same_name(crp->cr_recdir, clp->cl_recdir)) { 4488 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4488 return crp; 4489 return crp;
4489 } 4490 }
4490 } 4491 }
4491 return NULL; 4492 return NULL;
4492 } 4493 }
4493 4494
4494 /* 4495 /*
4495 * Called from OPEN. Look for clientid in reclaim list. 4496 * Called from OPEN. Look for clientid in reclaim list.
4496 */ 4497 */
4497 __be32 4498 __be32
4498 nfs4_check_open_reclaim(clientid_t *clid) 4499 nfs4_check_open_reclaim(clientid_t *clid)
4499 { 4500 {
4500 struct nfs4_client *clp; 4501 struct nfs4_client *clp;
4501 4502
4502 /* find clientid in conf_id_hashtbl */ 4503 /* find clientid in conf_id_hashtbl */
4503 clp = find_confirmed_client(clid); 4504 clp = find_confirmed_client(clid);
4504 if (clp == NULL) 4505 if (clp == NULL)
4505 return nfserr_reclaim_bad; 4506 return nfserr_reclaim_bad;
4506 4507
4507 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; 4508 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4508 } 4509 }
4509 4510
4510 #ifdef CONFIG_NFSD_FAULT_INJECTION 4511 #ifdef CONFIG_NFSD_FAULT_INJECTION
4511 4512
4512 void nfsd_forget_clients(u64 num) 4513 void nfsd_forget_clients(u64 num)
4513 { 4514 {
4514 struct nfs4_client *clp, *next; 4515 struct nfs4_client *clp, *next;
4515 int count = 0; 4516 int count = 0;
4516 4517
4517 nfs4_lock_state(); 4518 nfs4_lock_state();
4518 list_for_each_entry_safe(clp, next, &client_lru, cl_lru) { 4519 list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
4519 nfsd4_client_record_remove(clp); 4520 nfsd4_client_record_remove(clp);
4520 expire_client(clp); 4521 expire_client(clp);
4521 if (++count == num) 4522 if (++count == num)
4522 break; 4523 break;
4523 } 4524 }
4524 nfs4_unlock_state(); 4525 nfs4_unlock_state();
4525 4526
4526 printk(KERN_INFO "NFSD: Forgot %d clients", count); 4527 printk(KERN_INFO "NFSD: Forgot %d clients", count);
4527 } 4528 }
4528 4529
4529 static void release_lockowner_sop(struct nfs4_stateowner *sop) 4530 static void release_lockowner_sop(struct nfs4_stateowner *sop)
4530 { 4531 {
4531 release_lockowner(lockowner(sop)); 4532 release_lockowner(lockowner(sop));
4532 } 4533 }
4533 4534
4534 static void release_openowner_sop(struct nfs4_stateowner *sop) 4535 static void release_openowner_sop(struct nfs4_stateowner *sop)
4535 { 4536 {
4536 release_openowner(openowner(sop)); 4537 release_openowner(openowner(sop));
4537 } 4538 }
4538 4539
4539 static int nfsd_release_n_owners(u64 num, bool is_open_owner, 4540 static int nfsd_release_n_owners(u64 num, bool is_open_owner,
4540 void (*release_sop)(struct nfs4_stateowner *)) 4541 void (*release_sop)(struct nfs4_stateowner *))
4541 { 4542 {
4542 int i, count = 0; 4543 int i, count = 0;
4543 struct nfs4_stateowner *sop, *next; 4544 struct nfs4_stateowner *sop, *next;
4544 4545
4545 for (i = 0; i < OWNER_HASH_SIZE; i++) { 4546 for (i = 0; i < OWNER_HASH_SIZE; i++) {
4546 list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) { 4547 list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) {
4547 if (sop->so_is_open_owner != is_open_owner) 4548 if (sop->so_is_open_owner != is_open_owner)
4548 continue; 4549 continue;
4549 release_sop(sop); 4550 release_sop(sop);
4550 if (++count == num) 4551 if (++count == num)
4551 return count; 4552 return count;
4552 } 4553 }
4553 } 4554 }
4554 return count; 4555 return count;
4555 } 4556 }
4556 4557
4557 void nfsd_forget_locks(u64 num) 4558 void nfsd_forget_locks(u64 num)
4558 { 4559 {
4559 int count; 4560 int count;
4560 4561
4561 nfs4_lock_state(); 4562 nfs4_lock_state();
4562 count = nfsd_release_n_owners(num, false, release_lockowner_sop); 4563 count = nfsd_release_n_owners(num, false, release_lockowner_sop);
4563 nfs4_unlock_state(); 4564 nfs4_unlock_state();
4564 4565
4565 printk(KERN_INFO "NFSD: Forgot %d locks", count); 4566 printk(KERN_INFO "NFSD: Forgot %d locks", count);
4566 } 4567 }
4567 4568
4568 void nfsd_forget_openowners(u64 num) 4569 void nfsd_forget_openowners(u64 num)
4569 { 4570 {
4570 int count; 4571 int count;
4571 4572
4572 nfs4_lock_state(); 4573 nfs4_lock_state();
4573 count = nfsd_release_n_owners(num, true, release_openowner_sop); 4574 count = nfsd_release_n_owners(num, true, release_openowner_sop);
4574 nfs4_unlock_state(); 4575 nfs4_unlock_state();
4575 4576
4576 printk(KERN_INFO "NFSD: Forgot %d open owners", count); 4577 printk(KERN_INFO "NFSD: Forgot %d open owners", count);
4577 } 4578 }
4578 4579
4579 int nfsd_process_n_delegations(u64 num, struct list_head *list) 4580 int nfsd_process_n_delegations(u64 num, struct list_head *list)
4580 { 4581 {
4581 int i, count = 0; 4582 int i, count = 0;
4582 struct nfs4_file *fp, *fnext; 4583 struct nfs4_file *fp, *fnext;
4583 struct nfs4_delegation *dp, *dnext; 4584 struct nfs4_delegation *dp, *dnext;
4584 4585
4585 for (i = 0; i < FILE_HASH_SIZE; i++) { 4586 for (i = 0; i < FILE_HASH_SIZE; i++) {
4586 list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) { 4587 list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) {
4587 list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) { 4588 list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) {
4588 list_move(&dp->dl_recall_lru, list); 4589 list_move(&dp->dl_recall_lru, list);
4589 if (++count == num) 4590 if (++count == num)
4590 return count; 4591 return count;
4591 } 4592 }
4592 } 4593 }
4593 } 4594 }
4594 4595
4595 return count; 4596 return count;
4596 } 4597 }
4597 4598
4598 void nfsd_forget_delegations(u64 num) 4599 void nfsd_forget_delegations(u64 num)
4599 { 4600 {
4600 unsigned int count; 4601 unsigned int count;
4601 LIST_HEAD(victims); 4602 LIST_HEAD(victims);
4602 struct nfs4_delegation *dp, *dnext; 4603 struct nfs4_delegation *dp, *dnext;
4603 4604
4604 spin_lock(&recall_lock); 4605 spin_lock(&recall_lock);
4605 count = nfsd_process_n_delegations(num, &victims); 4606 count = nfsd_process_n_delegations(num, &victims);
4606 spin_unlock(&recall_lock); 4607 spin_unlock(&recall_lock);
4607 4608
4608 nfs4_lock_state(); 4609 nfs4_lock_state();
4609 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) 4610 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru)
4610 unhash_delegation(dp); 4611 unhash_delegation(dp);
4611 nfs4_unlock_state(); 4612 nfs4_unlock_state();
4612 4613
4613 printk(KERN_INFO "NFSD: Forgot %d delegations", count); 4614 printk(KERN_INFO "NFSD: Forgot %d delegations", count);
4614 } 4615 }
4615 4616
4616 void nfsd_recall_delegations(u64 num) 4617 void nfsd_recall_delegations(u64 num)
4617 { 4618 {
4618 unsigned int count; 4619 unsigned int count;
4619 LIST_HEAD(victims); 4620 LIST_HEAD(victims);
4620 struct nfs4_delegation *dp, *dnext; 4621 struct nfs4_delegation *dp, *dnext;
4621 4622
4622 spin_lock(&recall_lock); 4623 spin_lock(&recall_lock);
4623 count = nfsd_process_n_delegations(num, &victims); 4624 count = nfsd_process_n_delegations(num, &victims);
4624 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) { 4625 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) {
4625 list_del(&dp->dl_recall_lru); 4626 list_del(&dp->dl_recall_lru);
4626 nfsd_break_one_deleg(dp); 4627 nfsd_break_one_deleg(dp);
4627 } 4628 }
4628 spin_unlock(&recall_lock); 4629 spin_unlock(&recall_lock);
4629 4630
4630 printk(KERN_INFO "NFSD: Recalled %d delegations", count); 4631 printk(KERN_INFO "NFSD: Recalled %d delegations", count);
4631 } 4632 }
4632 4633
4633 #endif /* CONFIG_NFSD_FAULT_INJECTION */ 4634 #endif /* CONFIG_NFSD_FAULT_INJECTION */
4634 4635
4635 /* initialization to perform at module load time: */ 4636 /* initialization to perform at module load time: */
4636 4637
4637 void 4638 void
4638 nfs4_state_init(void) 4639 nfs4_state_init(void)
4639 { 4640 {
4640 int i; 4641 int i;
4641 4642
4642 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4643 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4643 INIT_LIST_HEAD(&conf_id_hashtbl[i]); 4644 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4644 INIT_LIST_HEAD(&conf_str_hashtbl[i]); 4645 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4645 INIT_LIST_HEAD(&unconf_str_hashtbl[i]); 4646 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4646 INIT_LIST_HEAD(&unconf_id_hashtbl[i]); 4647 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4647 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]); 4648 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4648 } 4649 }
4649 for (i = 0; i < SESSION_HASH_SIZE; i++) 4650 for (i = 0; i < SESSION_HASH_SIZE; i++)
4650 INIT_LIST_HEAD(&sessionid_hashtbl[i]); 4651 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4651 for (i = 0; i < FILE_HASH_SIZE; i++) { 4652 for (i = 0; i < FILE_HASH_SIZE; i++) {
4652 INIT_LIST_HEAD(&file_hashtbl[i]); 4653 INIT_LIST_HEAD(&file_hashtbl[i]);
4653 } 4654 }
4654 for (i = 0; i < OWNER_HASH_SIZE; i++) { 4655 for (i = 0; i < OWNER_HASH_SIZE; i++) {
4655 INIT_LIST_HEAD(&ownerstr_hashtbl[i]); 4656 INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
4656 } 4657 }
4657 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++) 4658 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
4658 INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]); 4659 INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]);
4659 INIT_LIST_HEAD(&close_lru); 4660 INIT_LIST_HEAD(&close_lru);
4660 INIT_LIST_HEAD(&client_lru); 4661 INIT_LIST_HEAD(&client_lru);
4661 INIT_LIST_HEAD(&del_recall_lru); 4662 INIT_LIST_HEAD(&del_recall_lru);
4662 reclaim_str_hashtbl_size = 0; 4663 reclaim_str_hashtbl_size = 0;
4663 } 4664 }
4664 4665
4665 /* 4666 /*
4666 * Since the lifetime of a delegation isn't limited to that of an open, a 4667 * Since the lifetime of a delegation isn't limited to that of an open, a
4667 * client may quite reasonably hang on to a delegation as long as it has 4668 * client may quite reasonably hang on to a delegation as long as it has
4668 * the inode cached. This becomes an obvious problem the first time a 4669 * the inode cached. This becomes an obvious problem the first time a
4669 * client's inode cache approaches the size of the server's total memory. 4670 * client's inode cache approaches the size of the server's total memory.
4670 * 4671 *
4671 * For now we avoid this problem by imposing a hard limit on the number 4672 * For now we avoid this problem by imposing a hard limit on the number
4672 * of delegations, which varies according to the server's memory size. 4673 * of delegations, which varies according to the server's memory size.
4673 */ 4674 */
4674 static void 4675 static void
4675 set_max_delegations(void) 4676 set_max_delegations(void)
4676 { 4677 {
4677 /* 4678 /*
4678 * Allow at most 4 delegations per megabyte of RAM. Quick 4679 * Allow at most 4 delegations per megabyte of RAM. Quick
4679 * estimates suggest that in the worst case (where every delegation 4680 * estimates suggest that in the worst case (where every delegation
4680 * is for a different inode), a delegation could take about 1.5K, 4681 * is for a different inode), a delegation could take about 1.5K,
4681 * giving a worst case usage of about 6% of memory. 4682 * giving a worst case usage of about 6% of memory.
4682 */ 4683 */
4683 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 4684 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4684 } 4685 }
4685 4686
4686 /* initialization to perform when the nfsd service is started: */ 4687 /* initialization to perform when the nfsd service is started: */
4687 4688
4688 int 4689 int
4689 nfs4_state_start(void) 4690 nfs4_state_start(void)
4690 { 4691 {
4692 struct net *net = &init_net;
4693 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4691 int ret; 4694 int ret;
4692 4695
4693 /* 4696 /*
4694 * FIXME: For now, we hang most of the pernet global stuff off of 4697 * FIXME: For now, we hang most of the pernet global stuff off of
4695 * init_net until nfsd is fully containerized. Eventually, we'll 4698 * init_net until nfsd is fully containerized. Eventually, we'll
4696 * need to pass a net pointer into this function, take a reference 4699 * need to pass a net pointer into this function, take a reference
4697 * to that instead and then do most of the rest of this on a per-net 4700 * to that instead and then do most of the rest of this on a per-net
4698 * basis. 4701 * basis.
4699 */ 4702 */
4700 get_net(&init_net); 4703 get_net(net);
4701 nfsd4_client_tracking_init(&init_net); 4704 nfsd4_client_tracking_init(net);
4702 boot_time = get_seconds(); 4705 boot_time = get_seconds();
4703 locks_start_grace(&nfsd4_manager); 4706 locks_start_grace(&nn->nfsd4_manager);
4704 grace_ended = false; 4707 grace_ended = false;
4705 printk(KERN_INFO "NFSD: starting %ld-second grace period\n", 4708 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4706 nfsd4_grace); 4709 nfsd4_grace);
4707 ret = set_callback_cred(); 4710 ret = set_callback_cred();
4708 if (ret) { 4711 if (ret) {
4709 ret = -ENOMEM; 4712 ret = -ENOMEM;
4710 goto out_recovery; 4713 goto out_recovery;
4711 } 4714 }
4712 laundry_wq = create_singlethread_workqueue("nfsd4"); 4715 laundry_wq = create_singlethread_workqueue("nfsd4");
4713 if (laundry_wq == NULL) { 4716 if (laundry_wq == NULL) {
4714 ret = -ENOMEM; 4717 ret = -ENOMEM;
4715 goto out_recovery; 4718 goto out_recovery;
4716 } 4719 }
4717 ret = nfsd4_create_callback_queue(); 4720 ret = nfsd4_create_callback_queue();
4718 if (ret) 4721 if (ret)
4719 goto out_free_laundry; 4722 goto out_free_laundry;
4720 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ); 4723 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4721 set_max_delegations(); 4724 set_max_delegations();
4722 return 0; 4725 return 0;
4723 out_free_laundry: 4726 out_free_laundry:
4724 destroy_workqueue(laundry_wq); 4727 destroy_workqueue(laundry_wq);
4725 out_recovery: 4728 out_recovery:
4726 nfsd4_client_tracking_exit(&init_net); 4729 nfsd4_client_tracking_exit(net);
4727 put_net(&init_net); 4730 put_net(net);
4728 return ret; 4731 return ret;
4729 } 4732 }
4730 4733
4731 static void 4734 static void
4732 __nfs4_state_shutdown(void) 4735 __nfs4_state_shutdown(void)
4733 { 4736 {
4734 int i; 4737 int i;
4735 struct nfs4_client *clp = NULL; 4738 struct nfs4_client *clp = NULL;
4736 struct nfs4_delegation *dp = NULL; 4739 struct nfs4_delegation *dp = NULL;
4737 struct list_head *pos, *next, reaplist; 4740 struct list_head *pos, *next, reaplist;
4738 4741
4739 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4742 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4740 while (!list_empty(&conf_id_hashtbl[i])) { 4743 while (!list_empty(&conf_id_hashtbl[i])) {
4741 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 4744 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4742 expire_client(clp); 4745 expire_client(clp);
4743 } 4746 }
4744 while (!list_empty(&unconf_str_hashtbl[i])) { 4747 while (!list_empty(&unconf_str_hashtbl[i])) {
4745 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash); 4748 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4746 expire_client(clp); 4749 expire_client(clp);
4747 } 4750 }
4748 } 4751 }
4749 INIT_LIST_HEAD(&reaplist); 4752 INIT_LIST_HEAD(&reaplist);
4750 spin_lock(&recall_lock); 4753 spin_lock(&recall_lock);
4751 list_for_each_safe(pos, next, &del_recall_lru) { 4754 list_for_each_safe(pos, next, &del_recall_lru) {
4752 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4755 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4753 list_move(&dp->dl_recall_lru, &reaplist); 4756 list_move(&dp->dl_recall_lru, &reaplist);
4754 } 4757 }
4755 spin_unlock(&recall_lock); 4758 spin_unlock(&recall_lock);
4756 list_for_each_safe(pos, next, &reaplist) { 4759 list_for_each_safe(pos, next, &reaplist) {
4757 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4760 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4758 unhash_delegation(dp); 4761 unhash_delegation(dp);
4759 } 4762 }
4760 4763
4761 nfsd4_client_tracking_exit(&init_net); 4764 nfsd4_client_tracking_exit(&init_net);
4762 put_net(&init_net); 4765 put_net(&init_net);
4763 } 4766 }
4764 4767
4765 void 4768 void
4766 nfs4_state_shutdown(void) 4769 nfs4_state_shutdown(void)
4767 { 4770 {
4771 struct net *net = &init_net;
4772 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4773
4768 cancel_delayed_work_sync(&laundromat_work); 4774 cancel_delayed_work_sync(&laundromat_work);
4769 destroy_workqueue(laundry_wq); 4775 destroy_workqueue(laundry_wq);
4770 locks_end_grace(&nfsd4_manager); 4776 locks_end_grace(&nn->nfsd4_manager);
4771 nfs4_lock_state(); 4777 nfs4_lock_state();
4772 __nfs4_state_shutdown(); 4778 __nfs4_state_shutdown();
4773 nfs4_unlock_state(); 4779 nfs4_unlock_state();
4774 nfsd4_destroy_callback_queue(); 4780 nfsd4_destroy_callback_queue();
4775 } 4781 }
4776 4782
4777 static void 4783 static void
4778 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 4784 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4779 { 4785 {
4780 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 4786 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
4781 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 4787 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
4782 } 4788 }
4783 4789
4784 static void 4790 static void
4785 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 4791 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4786 { 4792 {
4787 if (cstate->minorversion) { 4793 if (cstate->minorversion) {
4788 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 4794 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
4789 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 4795 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4790 } 4796 }
4791 } 4797 }
4792 4798
4793 void 4799 void
4794 clear_current_stateid(struct nfsd4_compound_state *cstate) 4800 clear_current_stateid(struct nfsd4_compound_state *cstate)
4795 { 4801 {
4796 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 4802 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4797 } 4803 }
4798 4804
4799 /* 4805 /*
4800 * functions to set current state id 4806 * functions to set current state id
4801 */ 4807 */
4802 void 4808 void
4803 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 4809 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4804 { 4810 {
4805 put_stateid(cstate, &odp->od_stateid); 4811 put_stateid(cstate, &odp->od_stateid);
4806 } 4812 }
4807 4813
4808 void 4814 void
4809 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) 4815 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
4810 { 4816 {
4811 put_stateid(cstate, &open->op_stateid); 4817 put_stateid(cstate, &open->op_stateid);
4812 } 4818 }
4813 4819
4814 void 4820 void
4815 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 4821 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4816 { 4822 {
4817 put_stateid(cstate, &close->cl_stateid); 4823 put_stateid(cstate, &close->cl_stateid);
4818 } 4824 }
4819 4825
4820 void 4826 void
4821 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) 4827 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
4822 { 4828 {
4823 put_stateid(cstate, &lock->lk_resp_stateid); 4829 put_stateid(cstate, &lock->lk_resp_stateid);
4824 } 4830 }
4825 4831
4826 /* 4832 /*
4827 * functions to consume current state id 4833 * functions to consume current state id
4828 */ 4834 */
4829 4835
4830 void 4836 void
4831 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 4837 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4832 { 4838 {
4833 get_stateid(cstate, &odp->od_stateid); 4839 get_stateid(cstate, &odp->od_stateid);
4834 } 4840 }
4835 4841
4836 void 4842 void
4837 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) 4843 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
4838 { 4844 {
4839 get_stateid(cstate, &drp->dr_stateid); 4845 get_stateid(cstate, &drp->dr_stateid);
4840 } 4846 }
4841 4847
4842 void 4848 void
4843 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) 4849 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
4844 { 4850 {
4845 get_stateid(cstate, &fsp->fr_stateid); 4851 get_stateid(cstate, &fsp->fr_stateid);
4846 } 4852 }
4847 4853
4848 void 4854 void
4849 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) 4855 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
4850 { 4856 {
4851 get_stateid(cstate, &setattr->sa_stateid); 4857 get_stateid(cstate, &setattr->sa_stateid);
4852 } 4858 }
4853 4859
4854 void 4860 void
4855 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 4861 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4856 { 4862 {
4857 get_stateid(cstate, &close->cl_stateid); 4863 get_stateid(cstate, &close->cl_stateid);
4858 } 4864 }
4859 4865
4860 void 4866 void
4861 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) 4867 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
4862 { 4868 {
4863 get_stateid(cstate, &locku->lu_stateid); 4869 get_stateid(cstate, &locku->lu_stateid);
4864 } 4870 }
4865 4871
4866 void 4872 void
4867 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) 4873 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
4868 { 4874 {
4869 get_stateid(cstate, &read->rd_stateid); 4875 get_stateid(cstate, &read->rd_stateid);
4870 } 4876 }
4871 4877
4872 void 4878 void
4873 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) 4879 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
4874 { 4880 {