Commit 0a5c33e23c4d781ecc815002c54f1f91012c703d

Authored by Bryan Schumaker
Committed by J. Bruce Fields
1 parent 88c4766617

NFSD: Pass correct buffer size to rpc_ntop

I honestly have no idea where I got 129 from, but it's a much bigger
value than the actual buffer size (INET6_ADDRSTRLEN).

Signed-off-by: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>

Showing 2 changed files with 3 additions and 3 deletions Inline Diff

fs/nfsd/fault_inject.c
1 /* 1 /*
2 * Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com> 2 * Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com>
3 * 3 *
4 * Uses debugfs to create fault injection points for client testing 4 * Uses debugfs to create fault injection points for client testing
5 */ 5 */
6 6
7 #include <linux/types.h> 7 #include <linux/types.h>
8 #include <linux/fs.h> 8 #include <linux/fs.h>
9 #include <linux/debugfs.h> 9 #include <linux/debugfs.h>
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/nsproxy.h> 11 #include <linux/nsproxy.h>
12 #include <linux/sunrpc/clnt.h> 12 #include <linux/sunrpc/clnt.h>
13 #include <asm/uaccess.h> 13 #include <asm/uaccess.h>
14 14
15 #include "state.h" 15 #include "state.h"
16 #include "netns.h" 16 #include "netns.h"
17 17
18 struct nfsd_fault_inject_op { 18 struct nfsd_fault_inject_op {
19 char *file; 19 char *file;
20 u64 (*forget)(struct nfs4_client *, u64); 20 u64 (*forget)(struct nfs4_client *, u64);
21 u64 (*print)(struct nfs4_client *, u64); 21 u64 (*print)(struct nfs4_client *, u64);
22 }; 22 };
23 23
24 static struct nfsd_fault_inject_op inject_ops[] = { 24 static struct nfsd_fault_inject_op inject_ops[] = {
25 { 25 {
26 .file = "forget_clients", 26 .file = "forget_clients",
27 .forget = nfsd_forget_client, 27 .forget = nfsd_forget_client,
28 .print = nfsd_print_client, 28 .print = nfsd_print_client,
29 }, 29 },
30 { 30 {
31 .file = "forget_locks", 31 .file = "forget_locks",
32 .forget = nfsd_forget_client_locks, 32 .forget = nfsd_forget_client_locks,
33 .print = nfsd_print_client_locks, 33 .print = nfsd_print_client_locks,
34 }, 34 },
35 { 35 {
36 .file = "forget_openowners", 36 .file = "forget_openowners",
37 .forget = nfsd_forget_client_openowners, 37 .forget = nfsd_forget_client_openowners,
38 .print = nfsd_print_client_openowners, 38 .print = nfsd_print_client_openowners,
39 }, 39 },
40 { 40 {
41 .file = "forget_delegations", 41 .file = "forget_delegations",
42 .forget = nfsd_forget_client_delegations, 42 .forget = nfsd_forget_client_delegations,
43 .print = nfsd_print_client_delegations, 43 .print = nfsd_print_client_delegations,
44 }, 44 },
45 { 45 {
46 .file = "recall_delegations", 46 .file = "recall_delegations",
47 .forget = nfsd_recall_client_delegations, 47 .forget = nfsd_recall_client_delegations,
48 .print = nfsd_print_client_delegations, 48 .print = nfsd_print_client_delegations,
49 }, 49 },
50 }; 50 };
51 51
52 static long int NUM_INJECT_OPS = sizeof(inject_ops) / sizeof(struct nfsd_fault_inject_op); 52 static long int NUM_INJECT_OPS = sizeof(inject_ops) / sizeof(struct nfsd_fault_inject_op);
53 static struct dentry *debug_dir; 53 static struct dentry *debug_dir;
54 54
55 static void nfsd_inject_set(struct nfsd_fault_inject_op *op, u64 val) 55 static void nfsd_inject_set(struct nfsd_fault_inject_op *op, u64 val)
56 { 56 {
57 u64 count = 0; 57 u64 count = 0;
58 58
59 if (val == 0) 59 if (val == 0)
60 printk(KERN_INFO "NFSD Fault Injection: %s (all)", op->file); 60 printk(KERN_INFO "NFSD Fault Injection: %s (all)", op->file);
61 else 61 else
62 printk(KERN_INFO "NFSD Fault Injection: %s (n = %llu)", op->file, val); 62 printk(KERN_INFO "NFSD Fault Injection: %s (n = %llu)", op->file, val);
63 63
64 nfs4_lock_state(); 64 nfs4_lock_state();
65 count = nfsd_for_n_state(val, op->forget); 65 count = nfsd_for_n_state(val, op->forget);
66 nfs4_unlock_state(); 66 nfs4_unlock_state();
67 printk(KERN_INFO "NFSD: %s: found %llu", op->file, count); 67 printk(KERN_INFO "NFSD: %s: found %llu", op->file, count);
68 } 68 }
69 69
70 static void nfsd_inject_set_client(struct nfsd_fault_inject_op *op, 70 static void nfsd_inject_set_client(struct nfsd_fault_inject_op *op,
71 struct sockaddr_storage *addr, 71 struct sockaddr_storage *addr,
72 size_t addr_size) 72 size_t addr_size)
73 { 73 {
74 char buf[INET6_ADDRSTRLEN]; 74 char buf[INET6_ADDRSTRLEN];
75 struct nfs4_client *clp; 75 struct nfs4_client *clp;
76 u64 count; 76 u64 count;
77 77
78 nfs4_lock_state(); 78 nfs4_lock_state();
79 clp = nfsd_find_client(addr, addr_size); 79 clp = nfsd_find_client(addr, addr_size);
80 if (clp) { 80 if (clp) {
81 count = op->forget(clp, 0); 81 count = op->forget(clp, 0);
82 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, 129); 82 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
83 printk(KERN_INFO "NFSD [%s]: Client %s had %llu state object(s)\n", op->file, buf, count); 83 printk(KERN_INFO "NFSD [%s]: Client %s had %llu state object(s)\n", op->file, buf, count);
84 } 84 }
85 nfs4_unlock_state(); 85 nfs4_unlock_state();
86 } 86 }
87 87
88 static void nfsd_inject_get(struct nfsd_fault_inject_op *op, u64 *val) 88 static void nfsd_inject_get(struct nfsd_fault_inject_op *op, u64 *val)
89 { 89 {
90 nfs4_lock_state(); 90 nfs4_lock_state();
91 *val = nfsd_for_n_state(0, op->print); 91 *val = nfsd_for_n_state(0, op->print);
92 nfs4_unlock_state(); 92 nfs4_unlock_state();
93 } 93 }
94 94
95 static ssize_t fault_inject_read(struct file *file, char __user *buf, 95 static ssize_t fault_inject_read(struct file *file, char __user *buf,
96 size_t len, loff_t *ppos) 96 size_t len, loff_t *ppos)
97 { 97 {
98 static u64 val; 98 static u64 val;
99 char read_buf[25]; 99 char read_buf[25];
100 size_t size, ret; 100 size_t size, ret;
101 loff_t pos = *ppos; 101 loff_t pos = *ppos;
102 102
103 if (!pos) 103 if (!pos)
104 nfsd_inject_get(file->f_dentry->d_inode->i_private, &val); 104 nfsd_inject_get(file->f_dentry->d_inode->i_private, &val);
105 size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val); 105 size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
106 106
107 if (pos < 0) 107 if (pos < 0)
108 return -EINVAL; 108 return -EINVAL;
109 if (pos >= size || !len) 109 if (pos >= size || !len)
110 return 0; 110 return 0;
111 if (len > size - pos) 111 if (len > size - pos)
112 len = size - pos; 112 len = size - pos;
113 ret = copy_to_user(buf, read_buf + pos, len); 113 ret = copy_to_user(buf, read_buf + pos, len);
114 if (ret == len) 114 if (ret == len)
115 return -EFAULT; 115 return -EFAULT;
116 len -= ret; 116 len -= ret;
117 *ppos = pos + len; 117 *ppos = pos + len;
118 return len; 118 return len;
119 } 119 }
120 120
121 static ssize_t fault_inject_write(struct file *file, const char __user *buf, 121 static ssize_t fault_inject_write(struct file *file, const char __user *buf,
122 size_t len, loff_t *ppos) 122 size_t len, loff_t *ppos)
123 { 123 {
124 char write_buf[INET6_ADDRSTRLEN]; 124 char write_buf[INET6_ADDRSTRLEN];
125 size_t size = min(sizeof(write_buf), len) - 1; 125 size_t size = min(sizeof(write_buf), len) - 1;
126 struct net *net = current->nsproxy->net_ns; 126 struct net *net = current->nsproxy->net_ns;
127 struct sockaddr_storage sa; 127 struct sockaddr_storage sa;
128 u64 val; 128 u64 val;
129 129
130 if (copy_from_user(write_buf, buf, size)) 130 if (copy_from_user(write_buf, buf, size))
131 return -EFAULT; 131 return -EFAULT;
132 write_buf[size] = '\0'; 132 write_buf[size] = '\0';
133 133
134 size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa)); 134 size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
135 if (size > 0) 135 if (size > 0)
136 nfsd_inject_set_client(file->f_dentry->d_inode->i_private, &sa, size); 136 nfsd_inject_set_client(file->f_dentry->d_inode->i_private, &sa, size);
137 else { 137 else {
138 val = simple_strtoll(write_buf, NULL, 0); 138 val = simple_strtoll(write_buf, NULL, 0);
139 nfsd_inject_set(file->f_dentry->d_inode->i_private, val); 139 nfsd_inject_set(file->f_dentry->d_inode->i_private, val);
140 } 140 }
141 return len; /* on success, claim we got the whole input */ 141 return len; /* on success, claim we got the whole input */
142 } 142 }
143 143
144 static const struct file_operations fops_nfsd = { 144 static const struct file_operations fops_nfsd = {
145 .owner = THIS_MODULE, 145 .owner = THIS_MODULE,
146 .read = fault_inject_read, 146 .read = fault_inject_read,
147 .write = fault_inject_write, 147 .write = fault_inject_write,
148 }; 148 };
149 149
150 void nfsd_fault_inject_cleanup(void) 150 void nfsd_fault_inject_cleanup(void)
151 { 151 {
152 debugfs_remove_recursive(debug_dir); 152 debugfs_remove_recursive(debug_dir);
153 } 153 }
154 154
155 int nfsd_fault_inject_init(void) 155 int nfsd_fault_inject_init(void)
156 { 156 {
157 unsigned int i; 157 unsigned int i;
158 struct nfsd_fault_inject_op *op; 158 struct nfsd_fault_inject_op *op;
159 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 159 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
160 160
161 debug_dir = debugfs_create_dir("nfsd", NULL); 161 debug_dir = debugfs_create_dir("nfsd", NULL);
162 if (!debug_dir) 162 if (!debug_dir)
163 goto fail; 163 goto fail;
164 164
165 for (i = 0; i < NUM_INJECT_OPS; i++) { 165 for (i = 0; i < NUM_INJECT_OPS; i++) {
166 op = &inject_ops[i]; 166 op = &inject_ops[i];
167 if (!debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd)) 167 if (!debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd))
168 goto fail; 168 goto fail;
169 } 169 }
170 return 0; 170 return 0;
171 171
172 fail: 172 fail:
173 nfsd_fault_inject_cleanup(); 173 nfsd_fault_inject_cleanup();
174 return -ENOMEM; 174 return -ENOMEM;
175 } 175 }
176 176
1 /* 1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan. 2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * Kendrick Smith <kmsmith@umich.edu> 5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu> 6 * Andy Adamson <kandros@umich.edu>
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 11 *
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its 17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived 18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission. 19 * from this software without specific prior written permission.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/file.h> 35 #include <linux/file.h>
36 #include <linux/fs.h> 36 #include <linux/fs.h>
37 #include <linux/slab.h> 37 #include <linux/slab.h>
38 #include <linux/namei.h> 38 #include <linux/namei.h>
39 #include <linux/swap.h> 39 #include <linux/swap.h>
40 #include <linux/pagemap.h> 40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h> 41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h> 42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/clnt.h> 43 #include <linux/sunrpc/clnt.h>
44 #include "xdr4.h" 44 #include "xdr4.h"
45 #include "vfs.h" 45 #include "vfs.h"
46 #include "current_stateid.h" 46 #include "current_stateid.h"
47 47
48 #include "netns.h" 48 #include "netns.h"
49 49
50 #define NFSDDBG_FACILITY NFSDDBG_PROC 50 #define NFSDDBG_FACILITY NFSDDBG_PROC
51 51
52 #define all_ones {{~0,~0},~0} 52 #define all_ones {{~0,~0},~0}
53 static const stateid_t one_stateid = { 53 static const stateid_t one_stateid = {
54 .si_generation = ~0, 54 .si_generation = ~0,
55 .si_opaque = all_ones, 55 .si_opaque = all_ones,
56 }; 56 };
57 static const stateid_t zero_stateid = { 57 static const stateid_t zero_stateid = {
58 /* all fields zero */ 58 /* all fields zero */
59 }; 59 };
60 static const stateid_t currentstateid = { 60 static const stateid_t currentstateid = {
61 .si_generation = 1, 61 .si_generation = 1,
62 }; 62 };
63 63
64 static u64 current_sessionid = 1; 64 static u64 current_sessionid = 1;
65 65
66 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 66 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
67 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 67 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
68 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) 68 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
69 69
70 /* forward declarations */ 70 /* forward declarations */
71 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); 71 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
72 72
73 /* Locking: */ 73 /* Locking: */
74 74
75 /* Currently used for almost all code touching nfsv4 state: */ 75 /* Currently used for almost all code touching nfsv4 state: */
76 static DEFINE_MUTEX(client_mutex); 76 static DEFINE_MUTEX(client_mutex);
77 77
78 /* 78 /*
79 * Currently used for the del_recall_lru and file hash table. In an 79 * Currently used for the del_recall_lru and file hash table. In an
80 * effort to decrease the scope of the client_mutex, this spinlock may 80 * effort to decrease the scope of the client_mutex, this spinlock may
81 * eventually cover more: 81 * eventually cover more:
82 */ 82 */
83 static DEFINE_SPINLOCK(recall_lock); 83 static DEFINE_SPINLOCK(recall_lock);
84 84
85 static struct kmem_cache *openowner_slab = NULL; 85 static struct kmem_cache *openowner_slab = NULL;
86 static struct kmem_cache *lockowner_slab = NULL; 86 static struct kmem_cache *lockowner_slab = NULL;
87 static struct kmem_cache *file_slab = NULL; 87 static struct kmem_cache *file_slab = NULL;
88 static struct kmem_cache *stateid_slab = NULL; 88 static struct kmem_cache *stateid_slab = NULL;
89 static struct kmem_cache *deleg_slab = NULL; 89 static struct kmem_cache *deleg_slab = NULL;
90 90
91 void 91 void
92 nfs4_lock_state(void) 92 nfs4_lock_state(void)
93 { 93 {
94 mutex_lock(&client_mutex); 94 mutex_lock(&client_mutex);
95 } 95 }
96 96
97 static void free_session(struct kref *); 97 static void free_session(struct kref *);
98 98
99 /* Must be called under the client_lock */ 99 /* Must be called under the client_lock */
100 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 100 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
101 { 101 {
102 kref_put(&ses->se_ref, free_session); 102 kref_put(&ses->se_ref, free_session);
103 } 103 }
104 104
105 static void nfsd4_get_session(struct nfsd4_session *ses) 105 static void nfsd4_get_session(struct nfsd4_session *ses)
106 { 106 {
107 kref_get(&ses->se_ref); 107 kref_get(&ses->se_ref);
108 } 108 }
109 109
110 void 110 void
111 nfs4_unlock_state(void) 111 nfs4_unlock_state(void)
112 { 112 {
113 mutex_unlock(&client_mutex); 113 mutex_unlock(&client_mutex);
114 } 114 }
115 115
116 static inline u32 116 static inline u32
117 opaque_hashval(const void *ptr, int nbytes) 117 opaque_hashval(const void *ptr, int nbytes)
118 { 118 {
119 unsigned char *cptr = (unsigned char *) ptr; 119 unsigned char *cptr = (unsigned char *) ptr;
120 120
121 u32 x = 0; 121 u32 x = 0;
122 while (nbytes--) { 122 while (nbytes--) {
123 x *= 37; 123 x *= 37;
124 x += *cptr++; 124 x += *cptr++;
125 } 125 }
126 return x; 126 return x;
127 } 127 }
128 128
129 static struct list_head del_recall_lru; 129 static struct list_head del_recall_lru;
130 130
131 static void nfsd4_free_file(struct nfs4_file *f) 131 static void nfsd4_free_file(struct nfs4_file *f)
132 { 132 {
133 kmem_cache_free(file_slab, f); 133 kmem_cache_free(file_slab, f);
134 } 134 }
135 135
136 static inline void 136 static inline void
137 put_nfs4_file(struct nfs4_file *fi) 137 put_nfs4_file(struct nfs4_file *fi)
138 { 138 {
139 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { 139 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
140 list_del(&fi->fi_hash); 140 list_del(&fi->fi_hash);
141 spin_unlock(&recall_lock); 141 spin_unlock(&recall_lock);
142 iput(fi->fi_inode); 142 iput(fi->fi_inode);
143 nfsd4_free_file(fi); 143 nfsd4_free_file(fi);
144 } 144 }
145 } 145 }
146 146
147 static inline void 147 static inline void
148 get_nfs4_file(struct nfs4_file *fi) 148 get_nfs4_file(struct nfs4_file *fi)
149 { 149 {
150 atomic_inc(&fi->fi_ref); 150 atomic_inc(&fi->fi_ref);
151 } 151 }
152 152
153 static int num_delegations; 153 static int num_delegations;
154 unsigned int max_delegations; 154 unsigned int max_delegations;
155 155
156 /* 156 /*
157 * Open owner state (share locks) 157 * Open owner state (share locks)
158 */ 158 */
159 159
160 /* hash tables for lock and open owners */ 160 /* hash tables for lock and open owners */
161 #define OWNER_HASH_BITS 8 161 #define OWNER_HASH_BITS 8
162 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 162 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
163 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 163 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
164 164
165 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) 165 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
166 { 166 {
167 unsigned int ret; 167 unsigned int ret;
168 168
169 ret = opaque_hashval(ownername->data, ownername->len); 169 ret = opaque_hashval(ownername->data, ownername->len);
170 ret += clientid; 170 ret += clientid;
171 return ret & OWNER_HASH_MASK; 171 return ret & OWNER_HASH_MASK;
172 } 172 }
173 173
174 /* hash table for nfs4_file */ 174 /* hash table for nfs4_file */
175 #define FILE_HASH_BITS 8 175 #define FILE_HASH_BITS 8
176 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 176 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
177 177
178 static unsigned int file_hashval(struct inode *ino) 178 static unsigned int file_hashval(struct inode *ino)
179 { 179 {
180 /* XXX: why are we hashing on inode pointer, anyway? */ 180 /* XXX: why are we hashing on inode pointer, anyway? */
181 return hash_ptr(ino, FILE_HASH_BITS); 181 return hash_ptr(ino, FILE_HASH_BITS);
182 } 182 }
183 183
184 static struct list_head file_hashtbl[FILE_HASH_SIZE]; 184 static struct list_head file_hashtbl[FILE_HASH_SIZE];
185 185
186 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 186 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
187 { 187 {
188 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); 188 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
189 atomic_inc(&fp->fi_access[oflag]); 189 atomic_inc(&fp->fi_access[oflag]);
190 } 190 }
191 191
192 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) 192 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
193 { 193 {
194 if (oflag == O_RDWR) { 194 if (oflag == O_RDWR) {
195 __nfs4_file_get_access(fp, O_RDONLY); 195 __nfs4_file_get_access(fp, O_RDONLY);
196 __nfs4_file_get_access(fp, O_WRONLY); 196 __nfs4_file_get_access(fp, O_WRONLY);
197 } else 197 } else
198 __nfs4_file_get_access(fp, oflag); 198 __nfs4_file_get_access(fp, oflag);
199 } 199 }
200 200
201 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) 201 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
202 { 202 {
203 if (fp->fi_fds[oflag]) { 203 if (fp->fi_fds[oflag]) {
204 fput(fp->fi_fds[oflag]); 204 fput(fp->fi_fds[oflag]);
205 fp->fi_fds[oflag] = NULL; 205 fp->fi_fds[oflag] = NULL;
206 } 206 }
207 } 207 }
208 208
209 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 209 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
210 { 210 {
211 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 211 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
212 nfs4_file_put_fd(fp, oflag); 212 nfs4_file_put_fd(fp, oflag);
213 /* 213 /*
214 * It's also safe to get rid of the RDWR open *if* 214 * It's also safe to get rid of the RDWR open *if*
215 * we no longer have need of the other kind of access 215 * we no longer have need of the other kind of access
216 * or if we already have the other kind of open: 216 * or if we already have the other kind of open:
217 */ 217 */
218 if (fp->fi_fds[1-oflag] 218 if (fp->fi_fds[1-oflag]
219 || atomic_read(&fp->fi_access[1 - oflag]) == 0) 219 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
220 nfs4_file_put_fd(fp, O_RDWR); 220 nfs4_file_put_fd(fp, O_RDWR);
221 } 221 }
222 } 222 }
223 223
224 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) 224 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
225 { 225 {
226 if (oflag == O_RDWR) { 226 if (oflag == O_RDWR) {
227 __nfs4_file_put_access(fp, O_RDONLY); 227 __nfs4_file_put_access(fp, O_RDONLY);
228 __nfs4_file_put_access(fp, O_WRONLY); 228 __nfs4_file_put_access(fp, O_WRONLY);
229 } else 229 } else
230 __nfs4_file_put_access(fp, oflag); 230 __nfs4_file_put_access(fp, oflag);
231 } 231 }
232 232
233 static inline int get_new_stid(struct nfs4_stid *stid) 233 static inline int get_new_stid(struct nfs4_stid *stid)
234 { 234 {
235 static int min_stateid = 0; 235 static int min_stateid = 0;
236 struct idr *stateids = &stid->sc_client->cl_stateids; 236 struct idr *stateids = &stid->sc_client->cl_stateids;
237 int new_stid; 237 int new_stid;
238 int error; 238 int error;
239 239
240 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid); 240 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
241 /* 241 /*
242 * Note: the necessary preallocation was done in 242 * Note: the necessary preallocation was done in
243 * nfs4_alloc_stateid(). The idr code caps the number of 243 * nfs4_alloc_stateid(). The idr code caps the number of
244 * preallocations that can exist at a time, but the state lock 244 * preallocations that can exist at a time, but the state lock
245 * prevents anyone from using ours before we get here: 245 * prevents anyone from using ours before we get here:
246 */ 246 */
247 WARN_ON_ONCE(error); 247 WARN_ON_ONCE(error);
248 /* 248 /*
249 * It shouldn't be a problem to reuse an opaque stateid value. 249 * It shouldn't be a problem to reuse an opaque stateid value.
250 * I don't think it is for 4.1. But with 4.0 I worry that, for 250 * I don't think it is for 4.1. But with 4.0 I worry that, for
251 * example, a stray write retransmission could be accepted by 251 * example, a stray write retransmission could be accepted by
252 * the server when it should have been rejected. Therefore, 252 * the server when it should have been rejected. Therefore,
253 * adopt a trick from the sctp code to attempt to maximize the 253 * adopt a trick from the sctp code to attempt to maximize the
254 * amount of time until an id is reused, by ensuring they always 254 * amount of time until an id is reused, by ensuring they always
255 * "increase" (mod INT_MAX): 255 * "increase" (mod INT_MAX):
256 */ 256 */
257 257
258 min_stateid = new_stid+1; 258 min_stateid = new_stid+1;
259 if (min_stateid == INT_MAX) 259 if (min_stateid == INT_MAX)
260 min_stateid = 0; 260 min_stateid = 0;
261 return new_stid; 261 return new_stid;
262 } 262 }
263 263
264 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type) 264 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
265 { 265 {
266 stateid_t *s = &stid->sc_stateid; 266 stateid_t *s = &stid->sc_stateid;
267 int new_id; 267 int new_id;
268 268
269 stid->sc_type = type; 269 stid->sc_type = type;
270 stid->sc_client = cl; 270 stid->sc_client = cl;
271 s->si_opaque.so_clid = cl->cl_clientid; 271 s->si_opaque.so_clid = cl->cl_clientid;
272 new_id = get_new_stid(stid); 272 new_id = get_new_stid(stid);
273 s->si_opaque.so_id = (u32)new_id; 273 s->si_opaque.so_id = (u32)new_id;
274 /* Will be incremented before return to client: */ 274 /* Will be incremented before return to client: */
275 s->si_generation = 0; 275 s->si_generation = 0;
276 } 276 }
277 277
278 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) 278 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
279 { 279 {
280 struct idr *stateids = &cl->cl_stateids; 280 struct idr *stateids = &cl->cl_stateids;
281 281
282 if (!idr_pre_get(stateids, GFP_KERNEL)) 282 if (!idr_pre_get(stateids, GFP_KERNEL))
283 return NULL; 283 return NULL;
284 /* 284 /*
285 * Note: if we fail here (or any time between now and the time 285 * Note: if we fail here (or any time between now and the time
286 * we actually get the new idr), we won't need to undo the idr 286 * we actually get the new idr), we won't need to undo the idr
287 * preallocation, since the idr code caps the number of 287 * preallocation, since the idr code caps the number of
288 * preallocated entries. 288 * preallocated entries.
289 */ 289 */
290 return kmem_cache_alloc(slab, GFP_KERNEL); 290 return kmem_cache_alloc(slab, GFP_KERNEL);
291 } 291 }
292 292
293 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) 293 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
294 { 294 {
295 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); 295 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
296 } 296 }
297 297
298 static struct nfs4_delegation * 298 static struct nfs4_delegation *
299 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type) 299 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
300 { 300 {
301 struct nfs4_delegation *dp; 301 struct nfs4_delegation *dp;
302 struct nfs4_file *fp = stp->st_file; 302 struct nfs4_file *fp = stp->st_file;
303 303
304 dprintk("NFSD alloc_init_deleg\n"); 304 dprintk("NFSD alloc_init_deleg\n");
305 /* 305 /*
306 * Major work on the lease subsystem (for example, to support 306 * Major work on the lease subsystem (for example, to support
307 * calbacks on stat) will be required before we can support 307 * calbacks on stat) will be required before we can support
308 * write delegations properly. 308 * write delegations properly.
309 */ 309 */
310 if (type != NFS4_OPEN_DELEGATE_READ) 310 if (type != NFS4_OPEN_DELEGATE_READ)
311 return NULL; 311 return NULL;
312 if (fp->fi_had_conflict) 312 if (fp->fi_had_conflict)
313 return NULL; 313 return NULL;
314 if (num_delegations > max_delegations) 314 if (num_delegations > max_delegations)
315 return NULL; 315 return NULL;
316 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 316 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
317 if (dp == NULL) 317 if (dp == NULL)
318 return dp; 318 return dp;
319 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID); 319 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
320 /* 320 /*
321 * delegation seqid's are never incremented. The 4.1 special 321 * delegation seqid's are never incremented. The 4.1 special
322 * meaning of seqid 0 isn't meaningful, really, but let's avoid 322 * meaning of seqid 0 isn't meaningful, really, but let's avoid
323 * 0 anyway just for consistency and use 1: 323 * 0 anyway just for consistency and use 1:
324 */ 324 */
325 dp->dl_stid.sc_stateid.si_generation = 1; 325 dp->dl_stid.sc_stateid.si_generation = 1;
326 num_delegations++; 326 num_delegations++;
327 INIT_LIST_HEAD(&dp->dl_perfile); 327 INIT_LIST_HEAD(&dp->dl_perfile);
328 INIT_LIST_HEAD(&dp->dl_perclnt); 328 INIT_LIST_HEAD(&dp->dl_perclnt);
329 INIT_LIST_HEAD(&dp->dl_recall_lru); 329 INIT_LIST_HEAD(&dp->dl_recall_lru);
330 get_nfs4_file(fp); 330 get_nfs4_file(fp);
331 dp->dl_file = fp; 331 dp->dl_file = fp;
332 dp->dl_type = type; 332 dp->dl_type = type;
333 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); 333 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
334 dp->dl_time = 0; 334 dp->dl_time = 0;
335 atomic_set(&dp->dl_count, 1); 335 atomic_set(&dp->dl_count, 1);
336 nfsd4_init_callback(&dp->dl_recall); 336 nfsd4_init_callback(&dp->dl_recall);
337 return dp; 337 return dp;
338 } 338 }
339 339
340 void 340 void
341 nfs4_put_delegation(struct nfs4_delegation *dp) 341 nfs4_put_delegation(struct nfs4_delegation *dp)
342 { 342 {
343 if (atomic_dec_and_test(&dp->dl_count)) { 343 if (atomic_dec_and_test(&dp->dl_count)) {
344 dprintk("NFSD: freeing dp %p\n",dp); 344 dprintk("NFSD: freeing dp %p\n",dp);
345 put_nfs4_file(dp->dl_file); 345 put_nfs4_file(dp->dl_file);
346 kmem_cache_free(deleg_slab, dp); 346 kmem_cache_free(deleg_slab, dp);
347 num_delegations--; 347 num_delegations--;
348 } 348 }
349 } 349 }
350 350
351 static void nfs4_put_deleg_lease(struct nfs4_file *fp) 351 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
352 { 352 {
353 if (atomic_dec_and_test(&fp->fi_delegees)) { 353 if (atomic_dec_and_test(&fp->fi_delegees)) {
354 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 354 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
355 fp->fi_lease = NULL; 355 fp->fi_lease = NULL;
356 fput(fp->fi_deleg_file); 356 fput(fp->fi_deleg_file);
357 fp->fi_deleg_file = NULL; 357 fp->fi_deleg_file = NULL;
358 } 358 }
359 } 359 }
360 360
361 static void unhash_stid(struct nfs4_stid *s) 361 static void unhash_stid(struct nfs4_stid *s)
362 { 362 {
363 struct idr *stateids = &s->sc_client->cl_stateids; 363 struct idr *stateids = &s->sc_client->cl_stateids;
364 364
365 idr_remove(stateids, s->sc_stateid.si_opaque.so_id); 365 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
366 } 366 }
367 367
368 /* Called under the state lock. */ 368 /* Called under the state lock. */
369 static void 369 static void
370 unhash_delegation(struct nfs4_delegation *dp) 370 unhash_delegation(struct nfs4_delegation *dp)
371 { 371 {
372 unhash_stid(&dp->dl_stid); 372 unhash_stid(&dp->dl_stid);
373 list_del_init(&dp->dl_perclnt); 373 list_del_init(&dp->dl_perclnt);
374 spin_lock(&recall_lock); 374 spin_lock(&recall_lock);
375 list_del_init(&dp->dl_perfile); 375 list_del_init(&dp->dl_perfile);
376 list_del_init(&dp->dl_recall_lru); 376 list_del_init(&dp->dl_recall_lru);
377 spin_unlock(&recall_lock); 377 spin_unlock(&recall_lock);
378 nfs4_put_deleg_lease(dp->dl_file); 378 nfs4_put_deleg_lease(dp->dl_file);
379 nfs4_put_delegation(dp); 379 nfs4_put_delegation(dp);
380 } 380 }
381 381
382 /* 382 /*
383 * SETCLIENTID state 383 * SETCLIENTID state
384 */ 384 */
385 385
386 static unsigned int clientid_hashval(u32 id) 386 static unsigned int clientid_hashval(u32 id)
387 { 387 {
388 return id & CLIENT_HASH_MASK; 388 return id & CLIENT_HASH_MASK;
389 } 389 }
390 390
391 static unsigned int clientstr_hashval(const char *name) 391 static unsigned int clientstr_hashval(const char *name)
392 { 392 {
393 return opaque_hashval(name, 8) & CLIENT_HASH_MASK; 393 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
394 } 394 }
395 395
396 /* 396 /*
397 * We store the NONE, READ, WRITE, and BOTH bits separately in the 397 * We store the NONE, READ, WRITE, and BOTH bits separately in the
398 * st_{access,deny}_bmap field of the stateid, in order to track not 398 * st_{access,deny}_bmap field of the stateid, in order to track not
399 * only what share bits are currently in force, but also what 399 * only what share bits are currently in force, but also what
400 * combinations of share bits previous opens have used. This allows us 400 * combinations of share bits previous opens have used. This allows us
401 * to enforce the recommendation of rfc 3530 14.2.19 that the server 401 * to enforce the recommendation of rfc 3530 14.2.19 that the server
402 * return an error if the client attempt to downgrade to a combination 402 * return an error if the client attempt to downgrade to a combination
403 * of share bits not explicable by closing some of its previous opens. 403 * of share bits not explicable by closing some of its previous opens.
404 * 404 *
405 * XXX: This enforcement is actually incomplete, since we don't keep 405 * XXX: This enforcement is actually incomplete, since we don't keep
406 * track of access/deny bit combinations; so, e.g., we allow: 406 * track of access/deny bit combinations; so, e.g., we allow:
407 * 407 *
408 * OPEN allow read, deny write 408 * OPEN allow read, deny write
409 * OPEN allow both, deny none 409 * OPEN allow both, deny none
410 * DOWNGRADE allow read, deny none 410 * DOWNGRADE allow read, deny none
411 * 411 *
412 * which we should reject. 412 * which we should reject.
413 */ 413 */
414 static unsigned int 414 static unsigned int
415 bmap_to_share_mode(unsigned long bmap) { 415 bmap_to_share_mode(unsigned long bmap) {
416 int i; 416 int i;
417 unsigned int access = 0; 417 unsigned int access = 0;
418 418
419 for (i = 1; i < 4; i++) { 419 for (i = 1; i < 4; i++) {
420 if (test_bit(i, &bmap)) 420 if (test_bit(i, &bmap))
421 access |= i; 421 access |= i;
422 } 422 }
423 return access; 423 return access;
424 } 424 }
425 425
426 static bool 426 static bool
427 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { 427 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
428 unsigned int access, deny; 428 unsigned int access, deny;
429 429
430 access = bmap_to_share_mode(stp->st_access_bmap); 430 access = bmap_to_share_mode(stp->st_access_bmap);
431 deny = bmap_to_share_mode(stp->st_deny_bmap); 431 deny = bmap_to_share_mode(stp->st_deny_bmap);
432 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 432 if ((access & open->op_share_deny) || (deny & open->op_share_access))
433 return false; 433 return false;
434 return true; 434 return true;
435 } 435 }
436 436
437 /* set share access for a given stateid */ 437 /* set share access for a given stateid */
438 static inline void 438 static inline void
439 set_access(u32 access, struct nfs4_ol_stateid *stp) 439 set_access(u32 access, struct nfs4_ol_stateid *stp)
440 { 440 {
441 __set_bit(access, &stp->st_access_bmap); 441 __set_bit(access, &stp->st_access_bmap);
442 } 442 }
443 443
444 /* clear share access for a given stateid */ 444 /* clear share access for a given stateid */
445 static inline void 445 static inline void
446 clear_access(u32 access, struct nfs4_ol_stateid *stp) 446 clear_access(u32 access, struct nfs4_ol_stateid *stp)
447 { 447 {
448 __clear_bit(access, &stp->st_access_bmap); 448 __clear_bit(access, &stp->st_access_bmap);
449 } 449 }
450 450
451 /* test whether a given stateid has access */ 451 /* test whether a given stateid has access */
452 static inline bool 452 static inline bool
453 test_access(u32 access, struct nfs4_ol_stateid *stp) 453 test_access(u32 access, struct nfs4_ol_stateid *stp)
454 { 454 {
455 return test_bit(access, &stp->st_access_bmap); 455 return test_bit(access, &stp->st_access_bmap);
456 } 456 }
457 457
458 /* set share deny for a given stateid */ 458 /* set share deny for a given stateid */
459 static inline void 459 static inline void
460 set_deny(u32 access, struct nfs4_ol_stateid *stp) 460 set_deny(u32 access, struct nfs4_ol_stateid *stp)
461 { 461 {
462 __set_bit(access, &stp->st_deny_bmap); 462 __set_bit(access, &stp->st_deny_bmap);
463 } 463 }
464 464
465 /* clear share deny for a given stateid */ 465 /* clear share deny for a given stateid */
466 static inline void 466 static inline void
467 clear_deny(u32 access, struct nfs4_ol_stateid *stp) 467 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
468 { 468 {
469 __clear_bit(access, &stp->st_deny_bmap); 469 __clear_bit(access, &stp->st_deny_bmap);
470 } 470 }
471 471
472 /* test whether a given stateid is denying specific access */ 472 /* test whether a given stateid is denying specific access */
473 static inline bool 473 static inline bool
474 test_deny(u32 access, struct nfs4_ol_stateid *stp) 474 test_deny(u32 access, struct nfs4_ol_stateid *stp)
475 { 475 {
476 return test_bit(access, &stp->st_deny_bmap); 476 return test_bit(access, &stp->st_deny_bmap);
477 } 477 }
478 478
479 static int nfs4_access_to_omode(u32 access) 479 static int nfs4_access_to_omode(u32 access)
480 { 480 {
481 switch (access & NFS4_SHARE_ACCESS_BOTH) { 481 switch (access & NFS4_SHARE_ACCESS_BOTH) {
482 case NFS4_SHARE_ACCESS_READ: 482 case NFS4_SHARE_ACCESS_READ:
483 return O_RDONLY; 483 return O_RDONLY;
484 case NFS4_SHARE_ACCESS_WRITE: 484 case NFS4_SHARE_ACCESS_WRITE:
485 return O_WRONLY; 485 return O_WRONLY;
486 case NFS4_SHARE_ACCESS_BOTH: 486 case NFS4_SHARE_ACCESS_BOTH:
487 return O_RDWR; 487 return O_RDWR;
488 } 488 }
489 WARN_ON_ONCE(1); 489 WARN_ON_ONCE(1);
490 return O_RDONLY; 490 return O_RDONLY;
491 } 491 }
492 492
493 /* release all access and file references for a given stateid */ 493 /* release all access and file references for a given stateid */
494 static void 494 static void
495 release_all_access(struct nfs4_ol_stateid *stp) 495 release_all_access(struct nfs4_ol_stateid *stp)
496 { 496 {
497 int i; 497 int i;
498 498
499 for (i = 1; i < 4; i++) { 499 for (i = 1; i < 4; i++) {
500 if (test_access(i, stp)) 500 if (test_access(i, stp))
501 nfs4_file_put_access(stp->st_file, 501 nfs4_file_put_access(stp->st_file,
502 nfs4_access_to_omode(i)); 502 nfs4_access_to_omode(i));
503 clear_access(i, stp); 503 clear_access(i, stp);
504 } 504 }
505 } 505 }
506 506
507 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) 507 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
508 { 508 {
509 list_del(&stp->st_perfile); 509 list_del(&stp->st_perfile);
510 list_del(&stp->st_perstateowner); 510 list_del(&stp->st_perstateowner);
511 } 511 }
512 512
513 static void close_generic_stateid(struct nfs4_ol_stateid *stp) 513 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
514 { 514 {
515 release_all_access(stp); 515 release_all_access(stp);
516 put_nfs4_file(stp->st_file); 516 put_nfs4_file(stp->st_file);
517 stp->st_file = NULL; 517 stp->st_file = NULL;
518 } 518 }
519 519
520 static void free_generic_stateid(struct nfs4_ol_stateid *stp) 520 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
521 { 521 {
522 kmem_cache_free(stateid_slab, stp); 522 kmem_cache_free(stateid_slab, stp);
523 } 523 }
524 524
525 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 525 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
526 { 526 {
527 struct file *file; 527 struct file *file;
528 528
529 unhash_generic_stateid(stp); 529 unhash_generic_stateid(stp);
530 unhash_stid(&stp->st_stid); 530 unhash_stid(&stp->st_stid);
531 file = find_any_file(stp->st_file); 531 file = find_any_file(stp->st_file);
532 if (file) 532 if (file)
533 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner)); 533 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
534 close_generic_stateid(stp); 534 close_generic_stateid(stp);
535 free_generic_stateid(stp); 535 free_generic_stateid(stp);
536 } 536 }
537 537
538 static void unhash_lockowner(struct nfs4_lockowner *lo) 538 static void unhash_lockowner(struct nfs4_lockowner *lo)
539 { 539 {
540 struct nfs4_ol_stateid *stp; 540 struct nfs4_ol_stateid *stp;
541 541
542 list_del(&lo->lo_owner.so_strhash); 542 list_del(&lo->lo_owner.so_strhash);
543 list_del(&lo->lo_perstateid); 543 list_del(&lo->lo_perstateid);
544 list_del(&lo->lo_owner_ino_hash); 544 list_del(&lo->lo_owner_ino_hash);
545 while (!list_empty(&lo->lo_owner.so_stateids)) { 545 while (!list_empty(&lo->lo_owner.so_stateids)) {
546 stp = list_first_entry(&lo->lo_owner.so_stateids, 546 stp = list_first_entry(&lo->lo_owner.so_stateids,
547 struct nfs4_ol_stateid, st_perstateowner); 547 struct nfs4_ol_stateid, st_perstateowner);
548 release_lock_stateid(stp); 548 release_lock_stateid(stp);
549 } 549 }
550 } 550 }
551 551
552 static void release_lockowner(struct nfs4_lockowner *lo) 552 static void release_lockowner(struct nfs4_lockowner *lo)
553 { 553 {
554 unhash_lockowner(lo); 554 unhash_lockowner(lo);
555 nfs4_free_lockowner(lo); 555 nfs4_free_lockowner(lo);
556 } 556 }
557 557
558 static void 558 static void
559 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) 559 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
560 { 560 {
561 struct nfs4_lockowner *lo; 561 struct nfs4_lockowner *lo;
562 562
563 while (!list_empty(&open_stp->st_lockowners)) { 563 while (!list_empty(&open_stp->st_lockowners)) {
564 lo = list_entry(open_stp->st_lockowners.next, 564 lo = list_entry(open_stp->st_lockowners.next,
565 struct nfs4_lockowner, lo_perstateid); 565 struct nfs4_lockowner, lo_perstateid);
566 release_lockowner(lo); 566 release_lockowner(lo);
567 } 567 }
568 } 568 }
569 569
570 static void unhash_open_stateid(struct nfs4_ol_stateid *stp) 570 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
571 { 571 {
572 unhash_generic_stateid(stp); 572 unhash_generic_stateid(stp);
573 release_stateid_lockowners(stp); 573 release_stateid_lockowners(stp);
574 close_generic_stateid(stp); 574 close_generic_stateid(stp);
575 } 575 }
576 576
577 static void release_open_stateid(struct nfs4_ol_stateid *stp) 577 static void release_open_stateid(struct nfs4_ol_stateid *stp)
578 { 578 {
579 unhash_open_stateid(stp); 579 unhash_open_stateid(stp);
580 unhash_stid(&stp->st_stid); 580 unhash_stid(&stp->st_stid);
581 free_generic_stateid(stp); 581 free_generic_stateid(stp);
582 } 582 }
583 583
584 static void unhash_openowner(struct nfs4_openowner *oo) 584 static void unhash_openowner(struct nfs4_openowner *oo)
585 { 585 {
586 struct nfs4_ol_stateid *stp; 586 struct nfs4_ol_stateid *stp;
587 587
588 list_del(&oo->oo_owner.so_strhash); 588 list_del(&oo->oo_owner.so_strhash);
589 list_del(&oo->oo_perclient); 589 list_del(&oo->oo_perclient);
590 while (!list_empty(&oo->oo_owner.so_stateids)) { 590 while (!list_empty(&oo->oo_owner.so_stateids)) {
591 stp = list_first_entry(&oo->oo_owner.so_stateids, 591 stp = list_first_entry(&oo->oo_owner.so_stateids,
592 struct nfs4_ol_stateid, st_perstateowner); 592 struct nfs4_ol_stateid, st_perstateowner);
593 release_open_stateid(stp); 593 release_open_stateid(stp);
594 } 594 }
595 } 595 }
596 596
597 static void release_last_closed_stateid(struct nfs4_openowner *oo) 597 static void release_last_closed_stateid(struct nfs4_openowner *oo)
598 { 598 {
599 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; 599 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
600 600
601 if (s) { 601 if (s) {
602 unhash_stid(&s->st_stid); 602 unhash_stid(&s->st_stid);
603 free_generic_stateid(s); 603 free_generic_stateid(s);
604 oo->oo_last_closed_stid = NULL; 604 oo->oo_last_closed_stid = NULL;
605 } 605 }
606 } 606 }
607 607
608 static void release_openowner(struct nfs4_openowner *oo) 608 static void release_openowner(struct nfs4_openowner *oo)
609 { 609 {
610 unhash_openowner(oo); 610 unhash_openowner(oo);
611 list_del(&oo->oo_close_lru); 611 list_del(&oo->oo_close_lru);
612 release_last_closed_stateid(oo); 612 release_last_closed_stateid(oo);
613 nfs4_free_openowner(oo); 613 nfs4_free_openowner(oo);
614 } 614 }
615 615
616 static inline int 616 static inline int
617 hash_sessionid(struct nfs4_sessionid *sessionid) 617 hash_sessionid(struct nfs4_sessionid *sessionid)
618 { 618 {
619 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 619 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
620 620
621 return sid->sequence % SESSION_HASH_SIZE; 621 return sid->sequence % SESSION_HASH_SIZE;
622 } 622 }
623 623
624 #ifdef NFSD_DEBUG 624 #ifdef NFSD_DEBUG
625 static inline void 625 static inline void
626 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 626 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
627 { 627 {
628 u32 *ptr = (u32 *)(&sessionid->data[0]); 628 u32 *ptr = (u32 *)(&sessionid->data[0]);
629 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 629 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
630 } 630 }
631 #else 631 #else
632 static inline void 632 static inline void
633 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 633 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
634 { 634 {
635 } 635 }
636 #endif 636 #endif
637 637
638 638
639 static void 639 static void
640 gen_sessionid(struct nfsd4_session *ses) 640 gen_sessionid(struct nfsd4_session *ses)
641 { 641 {
642 struct nfs4_client *clp = ses->se_client; 642 struct nfs4_client *clp = ses->se_client;
643 struct nfsd4_sessionid *sid; 643 struct nfsd4_sessionid *sid;
644 644
645 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 645 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
646 sid->clientid = clp->cl_clientid; 646 sid->clientid = clp->cl_clientid;
647 sid->sequence = current_sessionid++; 647 sid->sequence = current_sessionid++;
648 sid->reserved = 0; 648 sid->reserved = 0;
649 } 649 }
650 650
651 /* 651 /*
652 * The protocol defines ca_maxresponssize_cached to include the size of 652 * The protocol defines ca_maxresponssize_cached to include the size of
653 * the rpc header, but all we need to cache is the data starting after 653 * the rpc header, but all we need to cache is the data starting after
654 * the end of the initial SEQUENCE operation--the rest we regenerate 654 * the end of the initial SEQUENCE operation--the rest we regenerate
655 * each time. Therefore we can advertise a ca_maxresponssize_cached 655 * each time. Therefore we can advertise a ca_maxresponssize_cached
656 * value that is the number of bytes in our cache plus a few additional 656 * value that is the number of bytes in our cache plus a few additional
657 * bytes. In order to stay on the safe side, and not promise more than 657 * bytes. In order to stay on the safe side, and not promise more than
658 * we can cache, those additional bytes must be the minimum possible: 24 658 * we can cache, those additional bytes must be the minimum possible: 24
659 * bytes of rpc header (xid through accept state, with AUTH_NULL 659 * bytes of rpc header (xid through accept state, with AUTH_NULL
660 * verifier), 12 for the compound header (with zero-length tag), and 44 660 * verifier), 12 for the compound header (with zero-length tag), and 44
661 * for the SEQUENCE op response: 661 * for the SEQUENCE op response:
662 */ 662 */
663 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 663 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
664 664
665 static void 665 static void
666 free_session_slots(struct nfsd4_session *ses) 666 free_session_slots(struct nfsd4_session *ses)
667 { 667 {
668 int i; 668 int i;
669 669
670 for (i = 0; i < ses->se_fchannel.maxreqs; i++) 670 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
671 kfree(ses->se_slots[i]); 671 kfree(ses->se_slots[i]);
672 } 672 }
673 673
674 /* 674 /*
675 * We don't actually need to cache the rpc and session headers, so we 675 * We don't actually need to cache the rpc and session headers, so we
676 * can allocate a little less for each slot: 676 * can allocate a little less for each slot:
677 */ 677 */
678 static inline int slot_bytes(struct nfsd4_channel_attrs *ca) 678 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
679 { 679 {
680 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 680 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
681 } 681 }
682 682
683 static int nfsd4_sanitize_slot_size(u32 size) 683 static int nfsd4_sanitize_slot_size(u32 size)
684 { 684 {
685 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ 685 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
686 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); 686 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
687 687
688 return size; 688 return size;
689 } 689 }
690 690
691 /* 691 /*
692 * XXX: If we run out of reserved DRC memory we could (up to a point) 692 * XXX: If we run out of reserved DRC memory we could (up to a point)
693 * re-negotiate active sessions and reduce their slot usage to make 693 * re-negotiate active sessions and reduce their slot usage to make
694 * room for new connections. For now we just fail the create session. 694 * room for new connections. For now we just fail the create session.
695 */ 695 */
696 static int nfsd4_get_drc_mem(int slotsize, u32 num) 696 static int nfsd4_get_drc_mem(int slotsize, u32 num)
697 { 697 {
698 int avail; 698 int avail;
699 699
700 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); 700 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
701 701
702 spin_lock(&nfsd_drc_lock); 702 spin_lock(&nfsd_drc_lock);
703 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, 703 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
704 nfsd_drc_max_mem - nfsd_drc_mem_used); 704 nfsd_drc_max_mem - nfsd_drc_mem_used);
705 num = min_t(int, num, avail / slotsize); 705 num = min_t(int, num, avail / slotsize);
706 nfsd_drc_mem_used += num * slotsize; 706 nfsd_drc_mem_used += num * slotsize;
707 spin_unlock(&nfsd_drc_lock); 707 spin_unlock(&nfsd_drc_lock);
708 708
709 return num; 709 return num;
710 } 710 }
711 711
712 static void nfsd4_put_drc_mem(int slotsize, int num) 712 static void nfsd4_put_drc_mem(int slotsize, int num)
713 { 713 {
714 spin_lock(&nfsd_drc_lock); 714 spin_lock(&nfsd_drc_lock);
715 nfsd_drc_mem_used -= slotsize * num; 715 nfsd_drc_mem_used -= slotsize * num;
716 spin_unlock(&nfsd_drc_lock); 716 spin_unlock(&nfsd_drc_lock);
717 } 717 }
718 718
719 static struct nfsd4_session *__alloc_session(int slotsize, int numslots) 719 static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
720 { 720 {
721 struct nfsd4_session *new; 721 struct nfsd4_session *new;
722 int mem, i; 722 int mem, i;
723 723
724 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) 724 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
725 + sizeof(struct nfsd4_session) > PAGE_SIZE); 725 + sizeof(struct nfsd4_session) > PAGE_SIZE);
726 mem = numslots * sizeof(struct nfsd4_slot *); 726 mem = numslots * sizeof(struct nfsd4_slot *);
727 727
728 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); 728 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
729 if (!new) 729 if (!new)
730 return NULL; 730 return NULL;
731 /* allocate each struct nfsd4_slot and data cache in one piece */ 731 /* allocate each struct nfsd4_slot and data cache in one piece */
732 for (i = 0; i < numslots; i++) { 732 for (i = 0; i < numslots; i++) {
733 mem = sizeof(struct nfsd4_slot) + slotsize; 733 mem = sizeof(struct nfsd4_slot) + slotsize;
734 new->se_slots[i] = kzalloc(mem, GFP_KERNEL); 734 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
735 if (!new->se_slots[i]) 735 if (!new->se_slots[i])
736 goto out_free; 736 goto out_free;
737 } 737 }
738 return new; 738 return new;
739 out_free: 739 out_free:
740 while (i--) 740 while (i--)
741 kfree(new->se_slots[i]); 741 kfree(new->se_slots[i]);
742 kfree(new); 742 kfree(new);
743 return NULL; 743 return NULL;
744 } 744 }
745 745
746 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, 746 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new,
747 struct nfsd4_channel_attrs *req, 747 struct nfsd4_channel_attrs *req,
748 int numslots, int slotsize, 748 int numslots, int slotsize,
749 struct nfsd_net *nn) 749 struct nfsd_net *nn)
750 { 750 {
751 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 751 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
752 752
753 new->maxreqs = numslots; 753 new->maxreqs = numslots;
754 new->maxresp_cached = min_t(u32, req->maxresp_cached, 754 new->maxresp_cached = min_t(u32, req->maxresp_cached,
755 slotsize + NFSD_MIN_HDR_SEQ_SZ); 755 slotsize + NFSD_MIN_HDR_SEQ_SZ);
756 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); 756 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
757 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); 757 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
758 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); 758 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
759 } 759 }
760 760
761 static void free_conn(struct nfsd4_conn *c) 761 static void free_conn(struct nfsd4_conn *c)
762 { 762 {
763 svc_xprt_put(c->cn_xprt); 763 svc_xprt_put(c->cn_xprt);
764 kfree(c); 764 kfree(c);
765 } 765 }
766 766
767 static void nfsd4_conn_lost(struct svc_xpt_user *u) 767 static void nfsd4_conn_lost(struct svc_xpt_user *u)
768 { 768 {
769 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 769 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
770 struct nfs4_client *clp = c->cn_session->se_client; 770 struct nfs4_client *clp = c->cn_session->se_client;
771 771
772 spin_lock(&clp->cl_lock); 772 spin_lock(&clp->cl_lock);
773 if (!list_empty(&c->cn_persession)) { 773 if (!list_empty(&c->cn_persession)) {
774 list_del(&c->cn_persession); 774 list_del(&c->cn_persession);
775 free_conn(c); 775 free_conn(c);
776 } 776 }
777 spin_unlock(&clp->cl_lock); 777 spin_unlock(&clp->cl_lock);
778 nfsd4_probe_callback(clp); 778 nfsd4_probe_callback(clp);
779 } 779 }
780 780
781 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 781 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
782 { 782 {
783 struct nfsd4_conn *conn; 783 struct nfsd4_conn *conn;
784 784
785 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 785 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
786 if (!conn) 786 if (!conn)
787 return NULL; 787 return NULL;
788 svc_xprt_get(rqstp->rq_xprt); 788 svc_xprt_get(rqstp->rq_xprt);
789 conn->cn_xprt = rqstp->rq_xprt; 789 conn->cn_xprt = rqstp->rq_xprt;
790 conn->cn_flags = flags; 790 conn->cn_flags = flags;
791 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 791 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
792 return conn; 792 return conn;
793 } 793 }
794 794
795 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 795 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
796 { 796 {
797 conn->cn_session = ses; 797 conn->cn_session = ses;
798 list_add(&conn->cn_persession, &ses->se_conns); 798 list_add(&conn->cn_persession, &ses->se_conns);
799 } 799 }
800 800
801 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 801 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
802 { 802 {
803 struct nfs4_client *clp = ses->se_client; 803 struct nfs4_client *clp = ses->se_client;
804 804
805 spin_lock(&clp->cl_lock); 805 spin_lock(&clp->cl_lock);
806 __nfsd4_hash_conn(conn, ses); 806 __nfsd4_hash_conn(conn, ses);
807 spin_unlock(&clp->cl_lock); 807 spin_unlock(&clp->cl_lock);
808 } 808 }
809 809
810 static int nfsd4_register_conn(struct nfsd4_conn *conn) 810 static int nfsd4_register_conn(struct nfsd4_conn *conn)
811 { 811 {
812 conn->cn_xpt_user.callback = nfsd4_conn_lost; 812 conn->cn_xpt_user.callback = nfsd4_conn_lost;
813 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 813 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
814 } 814 }
815 815
816 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 816 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
817 { 817 {
818 int ret; 818 int ret;
819 819
820 nfsd4_hash_conn(conn, ses); 820 nfsd4_hash_conn(conn, ses);
821 ret = nfsd4_register_conn(conn); 821 ret = nfsd4_register_conn(conn);
822 if (ret) 822 if (ret)
823 /* oops; xprt is already down: */ 823 /* oops; xprt is already down: */
824 nfsd4_conn_lost(&conn->cn_xpt_user); 824 nfsd4_conn_lost(&conn->cn_xpt_user);
825 if (conn->cn_flags & NFS4_CDFC4_BACK) { 825 if (conn->cn_flags & NFS4_CDFC4_BACK) {
826 /* callback channel may be back up */ 826 /* callback channel may be back up */
827 nfsd4_probe_callback(ses->se_client); 827 nfsd4_probe_callback(ses->se_client);
828 } 828 }
829 } 829 }
830 830
831 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 831 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
832 { 832 {
833 u32 dir = NFS4_CDFC4_FORE; 833 u32 dir = NFS4_CDFC4_FORE;
834 834
835 if (cses->flags & SESSION4_BACK_CHAN) 835 if (cses->flags & SESSION4_BACK_CHAN)
836 dir |= NFS4_CDFC4_BACK; 836 dir |= NFS4_CDFC4_BACK;
837 return alloc_conn(rqstp, dir); 837 return alloc_conn(rqstp, dir);
838 } 838 }
839 839
840 /* must be called under client_lock */ 840 /* must be called under client_lock */
841 static void nfsd4_del_conns(struct nfsd4_session *s) 841 static void nfsd4_del_conns(struct nfsd4_session *s)
842 { 842 {
843 struct nfs4_client *clp = s->se_client; 843 struct nfs4_client *clp = s->se_client;
844 struct nfsd4_conn *c; 844 struct nfsd4_conn *c;
845 845
846 spin_lock(&clp->cl_lock); 846 spin_lock(&clp->cl_lock);
847 while (!list_empty(&s->se_conns)) { 847 while (!list_empty(&s->se_conns)) {
848 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 848 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
849 list_del_init(&c->cn_persession); 849 list_del_init(&c->cn_persession);
850 spin_unlock(&clp->cl_lock); 850 spin_unlock(&clp->cl_lock);
851 851
852 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 852 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
853 free_conn(c); 853 free_conn(c);
854 854
855 spin_lock(&clp->cl_lock); 855 spin_lock(&clp->cl_lock);
856 } 856 }
857 spin_unlock(&clp->cl_lock); 857 spin_unlock(&clp->cl_lock);
858 } 858 }
859 859
860 static void __free_session(struct nfsd4_session *ses) 860 static void __free_session(struct nfsd4_session *ses)
861 { 861 {
862 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs); 862 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
863 free_session_slots(ses); 863 free_session_slots(ses);
864 kfree(ses); 864 kfree(ses);
865 } 865 }
866 866
867 static void free_session(struct kref *kref) 867 static void free_session(struct kref *kref)
868 { 868 {
869 struct nfsd4_session *ses; 869 struct nfsd4_session *ses;
870 struct nfsd_net *nn; 870 struct nfsd_net *nn;
871 871
872 ses = container_of(kref, struct nfsd4_session, se_ref); 872 ses = container_of(kref, struct nfsd4_session, se_ref);
873 nn = net_generic(ses->se_client->net, nfsd_net_id); 873 nn = net_generic(ses->se_client->net, nfsd_net_id);
874 874
875 lockdep_assert_held(&nn->client_lock); 875 lockdep_assert_held(&nn->client_lock);
876 nfsd4_del_conns(ses); 876 nfsd4_del_conns(ses);
877 __free_session(ses); 877 __free_session(ses);
878 } 878 }
879 879
880 void nfsd4_put_session(struct nfsd4_session *ses) 880 void nfsd4_put_session(struct nfsd4_session *ses)
881 { 881 {
882 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id); 882 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
883 883
884 spin_lock(&nn->client_lock); 884 spin_lock(&nn->client_lock);
885 nfsd4_put_session_locked(ses); 885 nfsd4_put_session_locked(ses);
886 spin_unlock(&nn->client_lock); 886 spin_unlock(&nn->client_lock);
887 } 887 }
888 888
889 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan, 889 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan,
890 struct nfsd_net *nn) 890 struct nfsd_net *nn)
891 { 891 {
892 struct nfsd4_session *new; 892 struct nfsd4_session *new;
893 int numslots, slotsize; 893 int numslots, slotsize;
894 /* 894 /*
895 * Note decreasing slot size below client's request may 895 * Note decreasing slot size below client's request may
896 * make it difficult for client to function correctly, whereas 896 * make it difficult for client to function correctly, whereas
897 * decreasing the number of slots will (just?) affect 897 * decreasing the number of slots will (just?) affect
898 * performance. When short on memory we therefore prefer to 898 * performance. When short on memory we therefore prefer to
899 * decrease number of slots instead of their size. 899 * decrease number of slots instead of their size.
900 */ 900 */
901 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); 901 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
902 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); 902 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
903 if (numslots < 1) 903 if (numslots < 1)
904 return NULL; 904 return NULL;
905 905
906 new = __alloc_session(slotsize, numslots); 906 new = __alloc_session(slotsize, numslots);
907 if (!new) { 907 if (!new) {
908 nfsd4_put_drc_mem(slotsize, fchan->maxreqs); 908 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
909 return NULL; 909 return NULL;
910 } 910 }
911 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn); 911 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn);
912 return new; 912 return new;
913 } 913 }
914 914
915 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 915 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
916 { 916 {
917 int idx; 917 int idx;
918 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 918 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
919 919
920 new->se_client = clp; 920 new->se_client = clp;
921 gen_sessionid(new); 921 gen_sessionid(new);
922 922
923 INIT_LIST_HEAD(&new->se_conns); 923 INIT_LIST_HEAD(&new->se_conns);
924 924
925 new->se_cb_seq_nr = 1; 925 new->se_cb_seq_nr = 1;
926 new->se_flags = cses->flags; 926 new->se_flags = cses->flags;
927 new->se_cb_prog = cses->callback_prog; 927 new->se_cb_prog = cses->callback_prog;
928 new->se_cb_sec = cses->cb_sec; 928 new->se_cb_sec = cses->cb_sec;
929 kref_init(&new->se_ref); 929 kref_init(&new->se_ref);
930 idx = hash_sessionid(&new->se_sessionid); 930 idx = hash_sessionid(&new->se_sessionid);
931 spin_lock(&nn->client_lock); 931 spin_lock(&nn->client_lock);
932 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 932 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
933 spin_lock(&clp->cl_lock); 933 spin_lock(&clp->cl_lock);
934 list_add(&new->se_perclnt, &clp->cl_sessions); 934 list_add(&new->se_perclnt, &clp->cl_sessions);
935 spin_unlock(&clp->cl_lock); 935 spin_unlock(&clp->cl_lock);
936 spin_unlock(&nn->client_lock); 936 spin_unlock(&nn->client_lock);
937 937
938 if (cses->flags & SESSION4_BACK_CHAN) { 938 if (cses->flags & SESSION4_BACK_CHAN) {
939 struct sockaddr *sa = svc_addr(rqstp); 939 struct sockaddr *sa = svc_addr(rqstp);
940 /* 940 /*
941 * This is a little silly; with sessions there's no real 941 * This is a little silly; with sessions there's no real
942 * use for the callback address. Use the peer address 942 * use for the callback address. Use the peer address
943 * as a reasonable default for now, but consider fixing 943 * as a reasonable default for now, but consider fixing
944 * the rpc client not to require an address in the 944 * the rpc client not to require an address in the
945 * future: 945 * future:
946 */ 946 */
947 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 947 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
948 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 948 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
949 } 949 }
950 } 950 }
951 951
952 /* caller must hold client_lock */ 952 /* caller must hold client_lock */
953 static struct nfsd4_session * 953 static struct nfsd4_session *
954 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 954 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
955 { 955 {
956 struct nfsd4_session *elem; 956 struct nfsd4_session *elem;
957 int idx; 957 int idx;
958 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 958 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
959 959
960 dump_sessionid(__func__, sessionid); 960 dump_sessionid(__func__, sessionid);
961 idx = hash_sessionid(sessionid); 961 idx = hash_sessionid(sessionid);
962 /* Search in the appropriate list */ 962 /* Search in the appropriate list */
963 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 963 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
964 if (!memcmp(elem->se_sessionid.data, sessionid->data, 964 if (!memcmp(elem->se_sessionid.data, sessionid->data,
965 NFS4_MAX_SESSIONID_LEN)) { 965 NFS4_MAX_SESSIONID_LEN)) {
966 return elem; 966 return elem;
967 } 967 }
968 } 968 }
969 969
970 dprintk("%s: session not found\n", __func__); 970 dprintk("%s: session not found\n", __func__);
971 return NULL; 971 return NULL;
972 } 972 }
973 973
974 /* caller must hold client_lock */ 974 /* caller must hold client_lock */
975 static void 975 static void
976 unhash_session(struct nfsd4_session *ses) 976 unhash_session(struct nfsd4_session *ses)
977 { 977 {
978 list_del(&ses->se_hash); 978 list_del(&ses->se_hash);
979 spin_lock(&ses->se_client->cl_lock); 979 spin_lock(&ses->se_client->cl_lock);
980 list_del(&ses->se_perclnt); 980 list_del(&ses->se_perclnt);
981 spin_unlock(&ses->se_client->cl_lock); 981 spin_unlock(&ses->se_client->cl_lock);
982 } 982 }
983 983
984 /* must be called under the client_lock */ 984 /* must be called under the client_lock */
985 static inline void 985 static inline void
986 renew_client_locked(struct nfs4_client *clp) 986 renew_client_locked(struct nfs4_client *clp)
987 { 987 {
988 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 988 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
989 989
990 if (is_client_expired(clp)) { 990 if (is_client_expired(clp)) {
991 WARN_ON(1); 991 WARN_ON(1);
992 printk("%s: client (clientid %08x/%08x) already expired\n", 992 printk("%s: client (clientid %08x/%08x) already expired\n",
993 __func__, 993 __func__,
994 clp->cl_clientid.cl_boot, 994 clp->cl_clientid.cl_boot,
995 clp->cl_clientid.cl_id); 995 clp->cl_clientid.cl_id);
996 return; 996 return;
997 } 997 }
998 998
999 dprintk("renewing client (clientid %08x/%08x)\n", 999 dprintk("renewing client (clientid %08x/%08x)\n",
1000 clp->cl_clientid.cl_boot, 1000 clp->cl_clientid.cl_boot,
1001 clp->cl_clientid.cl_id); 1001 clp->cl_clientid.cl_id);
1002 list_move_tail(&clp->cl_lru, &nn->client_lru); 1002 list_move_tail(&clp->cl_lru, &nn->client_lru);
1003 clp->cl_time = get_seconds(); 1003 clp->cl_time = get_seconds();
1004 } 1004 }
1005 1005
1006 static inline void 1006 static inline void
1007 renew_client(struct nfs4_client *clp) 1007 renew_client(struct nfs4_client *clp)
1008 { 1008 {
1009 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1009 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1010 1010
1011 spin_lock(&nn->client_lock); 1011 spin_lock(&nn->client_lock);
1012 renew_client_locked(clp); 1012 renew_client_locked(clp);
1013 spin_unlock(&nn->client_lock); 1013 spin_unlock(&nn->client_lock);
1014 } 1014 }
1015 1015
1016 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 1016 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1017 static int 1017 static int
1018 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 1018 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1019 { 1019 {
1020 if (clid->cl_boot == nn->boot_time) 1020 if (clid->cl_boot == nn->boot_time)
1021 return 0; 1021 return 0;
1022 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", 1022 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1023 clid->cl_boot, clid->cl_id, nn->boot_time); 1023 clid->cl_boot, clid->cl_id, nn->boot_time);
1024 return 1; 1024 return 1;
1025 } 1025 }
1026 1026
1027 /* 1027 /*
1028 * XXX Should we use a slab cache ? 1028 * XXX Should we use a slab cache ?
1029 * This type of memory management is somewhat inefficient, but we use it 1029 * This type of memory management is somewhat inefficient, but we use it
1030 * anyway since SETCLIENTID is not a common operation. 1030 * anyway since SETCLIENTID is not a common operation.
1031 */ 1031 */
1032 static struct nfs4_client *alloc_client(struct xdr_netobj name) 1032 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1033 { 1033 {
1034 struct nfs4_client *clp; 1034 struct nfs4_client *clp;
1035 1035
1036 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1036 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1037 if (clp == NULL) 1037 if (clp == NULL)
1038 return NULL; 1038 return NULL;
1039 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1039 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1040 if (clp->cl_name.data == NULL) { 1040 if (clp->cl_name.data == NULL) {
1041 kfree(clp); 1041 kfree(clp);
1042 return NULL; 1042 return NULL;
1043 } 1043 }
1044 clp->cl_name.len = name.len; 1044 clp->cl_name.len = name.len;
1045 return clp; 1045 return clp;
1046 } 1046 }
1047 1047
1048 static inline void 1048 static inline void
1049 free_client(struct nfs4_client *clp) 1049 free_client(struct nfs4_client *clp)
1050 { 1050 {
1051 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1051 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1052 1052
1053 lockdep_assert_held(&nn->client_lock); 1053 lockdep_assert_held(&nn->client_lock);
1054 while (!list_empty(&clp->cl_sessions)) { 1054 while (!list_empty(&clp->cl_sessions)) {
1055 struct nfsd4_session *ses; 1055 struct nfsd4_session *ses;
1056 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1056 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1057 se_perclnt); 1057 se_perclnt);
1058 list_del(&ses->se_perclnt); 1058 list_del(&ses->se_perclnt);
1059 nfsd4_put_session_locked(ses); 1059 nfsd4_put_session_locked(ses);
1060 } 1060 }
1061 free_svc_cred(&clp->cl_cred); 1061 free_svc_cred(&clp->cl_cred);
1062 kfree(clp->cl_name.data); 1062 kfree(clp->cl_name.data);
1063 kfree(clp); 1063 kfree(clp);
1064 } 1064 }
1065 1065
1066 void 1066 void
1067 release_session_client(struct nfsd4_session *session) 1067 release_session_client(struct nfsd4_session *session)
1068 { 1068 {
1069 struct nfs4_client *clp = session->se_client; 1069 struct nfs4_client *clp = session->se_client;
1070 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1070 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1071 1071
1072 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) 1072 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
1073 return; 1073 return;
1074 if (is_client_expired(clp)) { 1074 if (is_client_expired(clp)) {
1075 free_client(clp); 1075 free_client(clp);
1076 session->se_client = NULL; 1076 session->se_client = NULL;
1077 } else 1077 } else
1078 renew_client_locked(clp); 1078 renew_client_locked(clp);
1079 spin_unlock(&nn->client_lock); 1079 spin_unlock(&nn->client_lock);
1080 } 1080 }
1081 1081
1082 /* must be called under the client_lock */ 1082 /* must be called under the client_lock */
1083 static inline void 1083 static inline void
1084 unhash_client_locked(struct nfs4_client *clp) 1084 unhash_client_locked(struct nfs4_client *clp)
1085 { 1085 {
1086 struct nfsd4_session *ses; 1086 struct nfsd4_session *ses;
1087 1087
1088 mark_client_expired(clp); 1088 mark_client_expired(clp);
1089 list_del(&clp->cl_lru); 1089 list_del(&clp->cl_lru);
1090 spin_lock(&clp->cl_lock); 1090 spin_lock(&clp->cl_lock);
1091 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1091 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1092 list_del_init(&ses->se_hash); 1092 list_del_init(&ses->se_hash);
1093 spin_unlock(&clp->cl_lock); 1093 spin_unlock(&clp->cl_lock);
1094 } 1094 }
1095 1095
1096 static void 1096 static void
1097 destroy_client(struct nfs4_client *clp) 1097 destroy_client(struct nfs4_client *clp)
1098 { 1098 {
1099 struct nfs4_openowner *oo; 1099 struct nfs4_openowner *oo;
1100 struct nfs4_delegation *dp; 1100 struct nfs4_delegation *dp;
1101 struct list_head reaplist; 1101 struct list_head reaplist;
1102 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1102 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1103 1103
1104 INIT_LIST_HEAD(&reaplist); 1104 INIT_LIST_HEAD(&reaplist);
1105 spin_lock(&recall_lock); 1105 spin_lock(&recall_lock);
1106 while (!list_empty(&clp->cl_delegations)) { 1106 while (!list_empty(&clp->cl_delegations)) {
1107 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1107 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1108 list_del_init(&dp->dl_perclnt); 1108 list_del_init(&dp->dl_perclnt);
1109 list_move(&dp->dl_recall_lru, &reaplist); 1109 list_move(&dp->dl_recall_lru, &reaplist);
1110 } 1110 }
1111 spin_unlock(&recall_lock); 1111 spin_unlock(&recall_lock);
1112 while (!list_empty(&reaplist)) { 1112 while (!list_empty(&reaplist)) {
1113 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1113 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1114 unhash_delegation(dp); 1114 unhash_delegation(dp);
1115 } 1115 }
1116 while (!list_empty(&clp->cl_openowners)) { 1116 while (!list_empty(&clp->cl_openowners)) {
1117 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1117 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1118 release_openowner(oo); 1118 release_openowner(oo);
1119 } 1119 }
1120 nfsd4_shutdown_callback(clp); 1120 nfsd4_shutdown_callback(clp);
1121 if (clp->cl_cb_conn.cb_xprt) 1121 if (clp->cl_cb_conn.cb_xprt)
1122 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1122 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1123 list_del(&clp->cl_idhash); 1123 list_del(&clp->cl_idhash);
1124 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 1124 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1125 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 1125 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1126 else 1126 else
1127 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1127 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1128 spin_lock(&nn->client_lock); 1128 spin_lock(&nn->client_lock);
1129 unhash_client_locked(clp); 1129 unhash_client_locked(clp);
1130 if (atomic_read(&clp->cl_refcount) == 0) 1130 if (atomic_read(&clp->cl_refcount) == 0)
1131 free_client(clp); 1131 free_client(clp);
1132 spin_unlock(&nn->client_lock); 1132 spin_unlock(&nn->client_lock);
1133 } 1133 }
1134 1134
1135 static void expire_client(struct nfs4_client *clp) 1135 static void expire_client(struct nfs4_client *clp)
1136 { 1136 {
1137 nfsd4_client_record_remove(clp); 1137 nfsd4_client_record_remove(clp);
1138 destroy_client(clp); 1138 destroy_client(clp);
1139 } 1139 }
1140 1140
1141 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1141 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1142 { 1142 {
1143 memcpy(target->cl_verifier.data, source->data, 1143 memcpy(target->cl_verifier.data, source->data,
1144 sizeof(target->cl_verifier.data)); 1144 sizeof(target->cl_verifier.data));
1145 } 1145 }
1146 1146
1147 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 1147 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1148 { 1148 {
1149 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 1149 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1150 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1150 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1151 } 1151 }
1152 1152
1153 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 1153 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1154 { 1154 {
1155 if (source->cr_principal) { 1155 if (source->cr_principal) {
1156 target->cr_principal = 1156 target->cr_principal =
1157 kstrdup(source->cr_principal, GFP_KERNEL); 1157 kstrdup(source->cr_principal, GFP_KERNEL);
1158 if (target->cr_principal == NULL) 1158 if (target->cr_principal == NULL)
1159 return -ENOMEM; 1159 return -ENOMEM;
1160 } else 1160 } else
1161 target->cr_principal = NULL; 1161 target->cr_principal = NULL;
1162 target->cr_flavor = source->cr_flavor; 1162 target->cr_flavor = source->cr_flavor;
1163 target->cr_uid = source->cr_uid; 1163 target->cr_uid = source->cr_uid;
1164 target->cr_gid = source->cr_gid; 1164 target->cr_gid = source->cr_gid;
1165 target->cr_group_info = source->cr_group_info; 1165 target->cr_group_info = source->cr_group_info;
1166 get_group_info(target->cr_group_info); 1166 get_group_info(target->cr_group_info);
1167 return 0; 1167 return 0;
1168 } 1168 }
1169 1169
1170 static long long 1170 static long long
1171 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 1171 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1172 { 1172 {
1173 long long res; 1173 long long res;
1174 1174
1175 res = o1->len - o2->len; 1175 res = o1->len - o2->len;
1176 if (res) 1176 if (res)
1177 return res; 1177 return res;
1178 return (long long)memcmp(o1->data, o2->data, o1->len); 1178 return (long long)memcmp(o1->data, o2->data, o1->len);
1179 } 1179 }
1180 1180
1181 static int same_name(const char *n1, const char *n2) 1181 static int same_name(const char *n1, const char *n2)
1182 { 1182 {
1183 return 0 == memcmp(n1, n2, HEXDIR_LEN); 1183 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1184 } 1184 }
1185 1185
1186 static int 1186 static int
1187 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 1187 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1188 { 1188 {
1189 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 1189 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1190 } 1190 }
1191 1191
1192 static int 1192 static int
1193 same_clid(clientid_t *cl1, clientid_t *cl2) 1193 same_clid(clientid_t *cl1, clientid_t *cl2)
1194 { 1194 {
1195 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1195 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1196 } 1196 }
1197 1197
1198 static bool groups_equal(struct group_info *g1, struct group_info *g2) 1198 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1199 { 1199 {
1200 int i; 1200 int i;
1201 1201
1202 if (g1->ngroups != g2->ngroups) 1202 if (g1->ngroups != g2->ngroups)
1203 return false; 1203 return false;
1204 for (i=0; i<g1->ngroups; i++) 1204 for (i=0; i<g1->ngroups; i++)
1205 if (GROUP_AT(g1, i) != GROUP_AT(g2, i)) 1205 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1206 return false; 1206 return false;
1207 return true; 1207 return true;
1208 } 1208 }
1209 1209
1210 /* 1210 /*
1211 * RFC 3530 language requires clid_inuse be returned when the 1211 * RFC 3530 language requires clid_inuse be returned when the
1212 * "principal" associated with a requests differs from that previously 1212 * "principal" associated with a requests differs from that previously
1213 * used. We use uid, gid's, and gss principal string as our best 1213 * used. We use uid, gid's, and gss principal string as our best
1214 * approximation. We also don't want to allow non-gss use of a client 1214 * approximation. We also don't want to allow non-gss use of a client
1215 * established using gss: in theory cr_principal should catch that 1215 * established using gss: in theory cr_principal should catch that
1216 * change, but in practice cr_principal can be null even in the gss case 1216 * change, but in practice cr_principal can be null even in the gss case
1217 * since gssd doesn't always pass down a principal string. 1217 * since gssd doesn't always pass down a principal string.
1218 */ 1218 */
1219 static bool is_gss_cred(struct svc_cred *cr) 1219 static bool is_gss_cred(struct svc_cred *cr)
1220 { 1220 {
1221 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 1221 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1222 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 1222 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1223 } 1223 }
1224 1224
1225 1225
1226 static bool 1226 static bool
1227 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1227 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1228 { 1228 {
1229 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 1229 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1230 || (cr1->cr_uid != cr2->cr_uid) 1230 || (cr1->cr_uid != cr2->cr_uid)
1231 || (cr1->cr_gid != cr2->cr_gid) 1231 || (cr1->cr_gid != cr2->cr_gid)
1232 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 1232 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1233 return false; 1233 return false;
1234 if (cr1->cr_principal == cr2->cr_principal) 1234 if (cr1->cr_principal == cr2->cr_principal)
1235 return true; 1235 return true;
1236 if (!cr1->cr_principal || !cr2->cr_principal) 1236 if (!cr1->cr_principal || !cr2->cr_principal)
1237 return false; 1237 return false;
1238 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 1238 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1239 } 1239 }
1240 1240
1241 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 1241 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1242 { 1242 {
1243 static u32 current_clientid = 1; 1243 static u32 current_clientid = 1;
1244 1244
1245 clp->cl_clientid.cl_boot = nn->boot_time; 1245 clp->cl_clientid.cl_boot = nn->boot_time;
1246 clp->cl_clientid.cl_id = current_clientid++; 1246 clp->cl_clientid.cl_id = current_clientid++;
1247 } 1247 }
1248 1248
1249 static void gen_confirm(struct nfs4_client *clp) 1249 static void gen_confirm(struct nfs4_client *clp)
1250 { 1250 {
1251 __be32 verf[2]; 1251 __be32 verf[2];
1252 static u32 i; 1252 static u32 i;
1253 1253
1254 verf[0] = (__be32)get_seconds(); 1254 verf[0] = (__be32)get_seconds();
1255 verf[1] = (__be32)i++; 1255 verf[1] = (__be32)i++;
1256 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 1256 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1257 } 1257 }
1258 1258
1259 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) 1259 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1260 { 1260 {
1261 return idr_find(&cl->cl_stateids, t->si_opaque.so_id); 1261 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1262 } 1262 }
1263 1263
1264 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 1264 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1265 { 1265 {
1266 struct nfs4_stid *s; 1266 struct nfs4_stid *s;
1267 1267
1268 s = find_stateid(cl, t); 1268 s = find_stateid(cl, t);
1269 if (!s) 1269 if (!s)
1270 return NULL; 1270 return NULL;
1271 if (typemask & s->sc_type) 1271 if (typemask & s->sc_type)
1272 return s; 1272 return s;
1273 return NULL; 1273 return NULL;
1274 } 1274 }
1275 1275
1276 static struct nfs4_client *create_client(struct xdr_netobj name, 1276 static struct nfs4_client *create_client(struct xdr_netobj name,
1277 struct svc_rqst *rqstp, nfs4_verifier *verf) 1277 struct svc_rqst *rqstp, nfs4_verifier *verf)
1278 { 1278 {
1279 struct nfs4_client *clp; 1279 struct nfs4_client *clp;
1280 struct sockaddr *sa = svc_addr(rqstp); 1280 struct sockaddr *sa = svc_addr(rqstp);
1281 int ret; 1281 int ret;
1282 struct net *net = SVC_NET(rqstp); 1282 struct net *net = SVC_NET(rqstp);
1283 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1283 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1284 1284
1285 clp = alloc_client(name); 1285 clp = alloc_client(name);
1286 if (clp == NULL) 1286 if (clp == NULL)
1287 return NULL; 1287 return NULL;
1288 1288
1289 INIT_LIST_HEAD(&clp->cl_sessions); 1289 INIT_LIST_HEAD(&clp->cl_sessions);
1290 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1290 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1291 if (ret) { 1291 if (ret) {
1292 spin_lock(&nn->client_lock); 1292 spin_lock(&nn->client_lock);
1293 free_client(clp); 1293 free_client(clp);
1294 spin_unlock(&nn->client_lock); 1294 spin_unlock(&nn->client_lock);
1295 return NULL; 1295 return NULL;
1296 } 1296 }
1297 idr_init(&clp->cl_stateids); 1297 idr_init(&clp->cl_stateids);
1298 atomic_set(&clp->cl_refcount, 0); 1298 atomic_set(&clp->cl_refcount, 0);
1299 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 1299 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1300 INIT_LIST_HEAD(&clp->cl_idhash); 1300 INIT_LIST_HEAD(&clp->cl_idhash);
1301 INIT_LIST_HEAD(&clp->cl_openowners); 1301 INIT_LIST_HEAD(&clp->cl_openowners);
1302 INIT_LIST_HEAD(&clp->cl_delegations); 1302 INIT_LIST_HEAD(&clp->cl_delegations);
1303 INIT_LIST_HEAD(&clp->cl_lru); 1303 INIT_LIST_HEAD(&clp->cl_lru);
1304 INIT_LIST_HEAD(&clp->cl_callbacks); 1304 INIT_LIST_HEAD(&clp->cl_callbacks);
1305 spin_lock_init(&clp->cl_lock); 1305 spin_lock_init(&clp->cl_lock);
1306 nfsd4_init_callback(&clp->cl_cb_null); 1306 nfsd4_init_callback(&clp->cl_cb_null);
1307 clp->cl_time = get_seconds(); 1307 clp->cl_time = get_seconds();
1308 clear_bit(0, &clp->cl_cb_slot_busy); 1308 clear_bit(0, &clp->cl_cb_slot_busy);
1309 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1309 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1310 copy_verf(clp, verf); 1310 copy_verf(clp, verf);
1311 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1311 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1312 gen_confirm(clp); 1312 gen_confirm(clp);
1313 clp->cl_cb_session = NULL; 1313 clp->cl_cb_session = NULL;
1314 clp->net = net; 1314 clp->net = net;
1315 return clp; 1315 return clp;
1316 } 1316 }
1317 1317
1318 static void 1318 static void
1319 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 1319 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1320 { 1320 {
1321 struct rb_node **new = &(root->rb_node), *parent = NULL; 1321 struct rb_node **new = &(root->rb_node), *parent = NULL;
1322 struct nfs4_client *clp; 1322 struct nfs4_client *clp;
1323 1323
1324 while (*new) { 1324 while (*new) {
1325 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 1325 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1326 parent = *new; 1326 parent = *new;
1327 1327
1328 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 1328 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1329 new = &((*new)->rb_left); 1329 new = &((*new)->rb_left);
1330 else 1330 else
1331 new = &((*new)->rb_right); 1331 new = &((*new)->rb_right);
1332 } 1332 }
1333 1333
1334 rb_link_node(&new_clp->cl_namenode, parent, new); 1334 rb_link_node(&new_clp->cl_namenode, parent, new);
1335 rb_insert_color(&new_clp->cl_namenode, root); 1335 rb_insert_color(&new_clp->cl_namenode, root);
1336 } 1336 }
1337 1337
1338 static struct nfs4_client * 1338 static struct nfs4_client *
1339 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 1339 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1340 { 1340 {
1341 long long cmp; 1341 long long cmp;
1342 struct rb_node *node = root->rb_node; 1342 struct rb_node *node = root->rb_node;
1343 struct nfs4_client *clp; 1343 struct nfs4_client *clp;
1344 1344
1345 while (node) { 1345 while (node) {
1346 clp = rb_entry(node, struct nfs4_client, cl_namenode); 1346 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1347 cmp = compare_blob(&clp->cl_name, name); 1347 cmp = compare_blob(&clp->cl_name, name);
1348 if (cmp > 0) 1348 if (cmp > 0)
1349 node = node->rb_left; 1349 node = node->rb_left;
1350 else if (cmp < 0) 1350 else if (cmp < 0)
1351 node = node->rb_right; 1351 node = node->rb_right;
1352 else 1352 else
1353 return clp; 1353 return clp;
1354 } 1354 }
1355 return NULL; 1355 return NULL;
1356 } 1356 }
1357 1357
1358 static void 1358 static void
1359 add_to_unconfirmed(struct nfs4_client *clp) 1359 add_to_unconfirmed(struct nfs4_client *clp)
1360 { 1360 {
1361 unsigned int idhashval; 1361 unsigned int idhashval;
1362 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1362 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1363 1363
1364 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1364 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1365 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 1365 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1366 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1366 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1367 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 1367 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1368 renew_client(clp); 1368 renew_client(clp);
1369 } 1369 }
1370 1370
1371 static void 1371 static void
1372 move_to_confirmed(struct nfs4_client *clp) 1372 move_to_confirmed(struct nfs4_client *clp)
1373 { 1373 {
1374 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1374 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1375 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1375 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1376 1376
1377 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 1377 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1378 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 1378 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1379 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1379 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1380 add_clp_to_name_tree(clp, &nn->conf_name_tree); 1380 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1381 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1381 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1382 renew_client(clp); 1382 renew_client(clp);
1383 } 1383 }
1384 1384
1385 static struct nfs4_client * 1385 static struct nfs4_client *
1386 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1386 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1387 { 1387 {
1388 struct nfs4_client *clp; 1388 struct nfs4_client *clp;
1389 unsigned int idhashval = clientid_hashval(clid->cl_id); 1389 unsigned int idhashval = clientid_hashval(clid->cl_id);
1390 1390
1391 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) { 1391 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) {
1392 if (same_clid(&clp->cl_clientid, clid)) { 1392 if (same_clid(&clp->cl_clientid, clid)) {
1393 if ((bool)clp->cl_minorversion != sessions) 1393 if ((bool)clp->cl_minorversion != sessions)
1394 return NULL; 1394 return NULL;
1395 renew_client(clp); 1395 renew_client(clp);
1396 return clp; 1396 return clp;
1397 } 1397 }
1398 } 1398 }
1399 return NULL; 1399 return NULL;
1400 } 1400 }
1401 1401
1402 static struct nfs4_client * 1402 static struct nfs4_client *
1403 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 1403 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1404 { 1404 {
1405 struct nfs4_client *clp; 1405 struct nfs4_client *clp;
1406 unsigned int idhashval = clientid_hashval(clid->cl_id); 1406 unsigned int idhashval = clientid_hashval(clid->cl_id);
1407 1407
1408 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) { 1408 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) {
1409 if (same_clid(&clp->cl_clientid, clid)) { 1409 if (same_clid(&clp->cl_clientid, clid)) {
1410 if ((bool)clp->cl_minorversion != sessions) 1410 if ((bool)clp->cl_minorversion != sessions)
1411 return NULL; 1411 return NULL;
1412 return clp; 1412 return clp;
1413 } 1413 }
1414 } 1414 }
1415 return NULL; 1415 return NULL;
1416 } 1416 }
1417 1417
1418 static bool clp_used_exchangeid(struct nfs4_client *clp) 1418 static bool clp_used_exchangeid(struct nfs4_client *clp)
1419 { 1419 {
1420 return clp->cl_exchange_flags != 0; 1420 return clp->cl_exchange_flags != 0;
1421 } 1421 }
1422 1422
1423 static struct nfs4_client * 1423 static struct nfs4_client *
1424 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1424 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1425 { 1425 {
1426 return find_clp_in_name_tree(name, &nn->conf_name_tree); 1426 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1427 } 1427 }
1428 1428
1429 static struct nfs4_client * 1429 static struct nfs4_client *
1430 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1430 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1431 { 1431 {
1432 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 1432 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1433 } 1433 }
1434 1434
1435 static void 1435 static void
1436 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 1436 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1437 { 1437 {
1438 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 1438 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1439 struct sockaddr *sa = svc_addr(rqstp); 1439 struct sockaddr *sa = svc_addr(rqstp);
1440 u32 scopeid = rpc_get_scope_id(sa); 1440 u32 scopeid = rpc_get_scope_id(sa);
1441 unsigned short expected_family; 1441 unsigned short expected_family;
1442 1442
1443 /* Currently, we only support tcp and tcp6 for the callback channel */ 1443 /* Currently, we only support tcp and tcp6 for the callback channel */
1444 if (se->se_callback_netid_len == 3 && 1444 if (se->se_callback_netid_len == 3 &&
1445 !memcmp(se->se_callback_netid_val, "tcp", 3)) 1445 !memcmp(se->se_callback_netid_val, "tcp", 3))
1446 expected_family = AF_INET; 1446 expected_family = AF_INET;
1447 else if (se->se_callback_netid_len == 4 && 1447 else if (se->se_callback_netid_len == 4 &&
1448 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 1448 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1449 expected_family = AF_INET6; 1449 expected_family = AF_INET6;
1450 else 1450 else
1451 goto out_err; 1451 goto out_err;
1452 1452
1453 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 1453 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1454 se->se_callback_addr_len, 1454 se->se_callback_addr_len,
1455 (struct sockaddr *)&conn->cb_addr, 1455 (struct sockaddr *)&conn->cb_addr,
1456 sizeof(conn->cb_addr)); 1456 sizeof(conn->cb_addr));
1457 1457
1458 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 1458 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1459 goto out_err; 1459 goto out_err;
1460 1460
1461 if (conn->cb_addr.ss_family == AF_INET6) 1461 if (conn->cb_addr.ss_family == AF_INET6)
1462 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 1462 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1463 1463
1464 conn->cb_prog = se->se_callback_prog; 1464 conn->cb_prog = se->se_callback_prog;
1465 conn->cb_ident = se->se_callback_ident; 1465 conn->cb_ident = se->se_callback_ident;
1466 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 1466 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1467 return; 1467 return;
1468 out_err: 1468 out_err:
1469 conn->cb_addr.ss_family = AF_UNSPEC; 1469 conn->cb_addr.ss_family = AF_UNSPEC;
1470 conn->cb_addrlen = 0; 1470 conn->cb_addrlen = 0;
1471 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " 1471 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1472 "will not receive delegations\n", 1472 "will not receive delegations\n",
1473 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); 1473 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1474 1474
1475 return; 1475 return;
1476 } 1476 }
1477 1477
1478 /* 1478 /*
1479 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. 1479 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1480 */ 1480 */
1481 void 1481 void
1482 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 1482 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1483 { 1483 {
1484 struct nfsd4_slot *slot = resp->cstate.slot; 1484 struct nfsd4_slot *slot = resp->cstate.slot;
1485 unsigned int base; 1485 unsigned int base;
1486 1486
1487 dprintk("--> %s slot %p\n", __func__, slot); 1487 dprintk("--> %s slot %p\n", __func__, slot);
1488 1488
1489 slot->sl_opcnt = resp->opcnt; 1489 slot->sl_opcnt = resp->opcnt;
1490 slot->sl_status = resp->cstate.status; 1490 slot->sl_status = resp->cstate.status;
1491 1491
1492 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 1492 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1493 if (nfsd4_not_cached(resp)) { 1493 if (nfsd4_not_cached(resp)) {
1494 slot->sl_datalen = 0; 1494 slot->sl_datalen = 0;
1495 return; 1495 return;
1496 } 1496 }
1497 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; 1497 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1498 base = (char *)resp->cstate.datap - 1498 base = (char *)resp->cstate.datap -
1499 (char *)resp->xbuf->head[0].iov_base; 1499 (char *)resp->xbuf->head[0].iov_base;
1500 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, 1500 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1501 slot->sl_datalen)) 1501 slot->sl_datalen))
1502 WARN("%s: sessions DRC could not cache compound\n", __func__); 1502 WARN("%s: sessions DRC could not cache compound\n", __func__);
1503 return; 1503 return;
1504 } 1504 }
1505 1505
1506 /* 1506 /*
1507 * Encode the replay sequence operation from the slot values. 1507 * Encode the replay sequence operation from the slot values.
1508 * If cachethis is FALSE encode the uncached rep error on the next 1508 * If cachethis is FALSE encode the uncached rep error on the next
1509 * operation which sets resp->p and increments resp->opcnt for 1509 * operation which sets resp->p and increments resp->opcnt for
1510 * nfs4svc_encode_compoundres. 1510 * nfs4svc_encode_compoundres.
1511 * 1511 *
1512 */ 1512 */
1513 static __be32 1513 static __be32
1514 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 1514 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1515 struct nfsd4_compoundres *resp) 1515 struct nfsd4_compoundres *resp)
1516 { 1516 {
1517 struct nfsd4_op *op; 1517 struct nfsd4_op *op;
1518 struct nfsd4_slot *slot = resp->cstate.slot; 1518 struct nfsd4_slot *slot = resp->cstate.slot;
1519 1519
1520 /* Encode the replayed sequence operation */ 1520 /* Encode the replayed sequence operation */
1521 op = &args->ops[resp->opcnt - 1]; 1521 op = &args->ops[resp->opcnt - 1];
1522 nfsd4_encode_operation(resp, op); 1522 nfsd4_encode_operation(resp, op);
1523 1523
1524 /* Return nfserr_retry_uncached_rep in next operation. */ 1524 /* Return nfserr_retry_uncached_rep in next operation. */
1525 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { 1525 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1526 op = &args->ops[resp->opcnt++]; 1526 op = &args->ops[resp->opcnt++];
1527 op->status = nfserr_retry_uncached_rep; 1527 op->status = nfserr_retry_uncached_rep;
1528 nfsd4_encode_operation(resp, op); 1528 nfsd4_encode_operation(resp, op);
1529 } 1529 }
1530 return op->status; 1530 return op->status;
1531 } 1531 }
1532 1532
1533 /* 1533 /*
1534 * The sequence operation is not cached because we can use the slot and 1534 * The sequence operation is not cached because we can use the slot and
1535 * session values. 1535 * session values.
1536 */ 1536 */
1537 __be32 1537 __be32
1538 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 1538 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1539 struct nfsd4_sequence *seq) 1539 struct nfsd4_sequence *seq)
1540 { 1540 {
1541 struct nfsd4_slot *slot = resp->cstate.slot; 1541 struct nfsd4_slot *slot = resp->cstate.slot;
1542 __be32 status; 1542 __be32 status;
1543 1543
1544 dprintk("--> %s slot %p\n", __func__, slot); 1544 dprintk("--> %s slot %p\n", __func__, slot);
1545 1545
1546 /* Either returns 0 or nfserr_retry_uncached */ 1546 /* Either returns 0 or nfserr_retry_uncached */
1547 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 1547 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1548 if (status == nfserr_retry_uncached_rep) 1548 if (status == nfserr_retry_uncached_rep)
1549 return status; 1549 return status;
1550 1550
1551 /* The sequence operation has been encoded, cstate->datap set. */ 1551 /* The sequence operation has been encoded, cstate->datap set. */
1552 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); 1552 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1553 1553
1554 resp->opcnt = slot->sl_opcnt; 1554 resp->opcnt = slot->sl_opcnt;
1555 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); 1555 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1556 status = slot->sl_status; 1556 status = slot->sl_status;
1557 1557
1558 return status; 1558 return status;
1559 } 1559 }
1560 1560
1561 /* 1561 /*
1562 * Set the exchange_id flags returned by the server. 1562 * Set the exchange_id flags returned by the server.
1563 */ 1563 */
1564 static void 1564 static void
1565 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 1565 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1566 { 1566 {
1567 /* pNFS is not supported */ 1567 /* pNFS is not supported */
1568 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 1568 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1569 1569
1570 /* Referrals are supported, Migration is not. */ 1570 /* Referrals are supported, Migration is not. */
1571 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 1571 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1572 1572
1573 /* set the wire flags to return to client. */ 1573 /* set the wire flags to return to client. */
1574 clid->flags = new->cl_exchange_flags; 1574 clid->flags = new->cl_exchange_flags;
1575 } 1575 }
1576 1576
1577 static bool client_has_state(struct nfs4_client *clp) 1577 static bool client_has_state(struct nfs4_client *clp)
1578 { 1578 {
1579 /* 1579 /*
1580 * Note clp->cl_openowners check isn't quite right: there's no 1580 * Note clp->cl_openowners check isn't quite right: there's no
1581 * need to count owners without stateid's. 1581 * need to count owners without stateid's.
1582 * 1582 *
1583 * Also note we should probably be using this in 4.0 case too. 1583 * Also note we should probably be using this in 4.0 case too.
1584 */ 1584 */
1585 return !list_empty(&clp->cl_openowners) 1585 return !list_empty(&clp->cl_openowners)
1586 || !list_empty(&clp->cl_delegations) 1586 || !list_empty(&clp->cl_delegations)
1587 || !list_empty(&clp->cl_sessions); 1587 || !list_empty(&clp->cl_sessions);
1588 } 1588 }
1589 1589
1590 __be32 1590 __be32
1591 nfsd4_exchange_id(struct svc_rqst *rqstp, 1591 nfsd4_exchange_id(struct svc_rqst *rqstp,
1592 struct nfsd4_compound_state *cstate, 1592 struct nfsd4_compound_state *cstate,
1593 struct nfsd4_exchange_id *exid) 1593 struct nfsd4_exchange_id *exid)
1594 { 1594 {
1595 struct nfs4_client *unconf, *conf, *new; 1595 struct nfs4_client *unconf, *conf, *new;
1596 __be32 status; 1596 __be32 status;
1597 char addr_str[INET6_ADDRSTRLEN]; 1597 char addr_str[INET6_ADDRSTRLEN];
1598 nfs4_verifier verf = exid->verifier; 1598 nfs4_verifier verf = exid->verifier;
1599 struct sockaddr *sa = svc_addr(rqstp); 1599 struct sockaddr *sa = svc_addr(rqstp);
1600 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 1600 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1601 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1601 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1602 1602
1603 rpc_ntop(sa, addr_str, sizeof(addr_str)); 1603 rpc_ntop(sa, addr_str, sizeof(addr_str));
1604 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 1604 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1605 "ip_addr=%s flags %x, spa_how %d\n", 1605 "ip_addr=%s flags %x, spa_how %d\n",
1606 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 1606 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1607 addr_str, exid->flags, exid->spa_how); 1607 addr_str, exid->flags, exid->spa_how);
1608 1608
1609 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 1609 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1610 return nfserr_inval; 1610 return nfserr_inval;
1611 1611
1612 /* Currently only support SP4_NONE */ 1612 /* Currently only support SP4_NONE */
1613 switch (exid->spa_how) { 1613 switch (exid->spa_how) {
1614 case SP4_NONE: 1614 case SP4_NONE:
1615 break; 1615 break;
1616 default: /* checked by xdr code */ 1616 default: /* checked by xdr code */
1617 WARN_ON_ONCE(1); 1617 WARN_ON_ONCE(1);
1618 case SP4_SSV: 1618 case SP4_SSV:
1619 case SP4_MACH_CRED: 1619 case SP4_MACH_CRED:
1620 return nfserr_serverfault; /* no excuse :-/ */ 1620 return nfserr_serverfault; /* no excuse :-/ */
1621 } 1621 }
1622 1622
1623 /* Cases below refer to rfc 5661 section 18.35.4: */ 1623 /* Cases below refer to rfc 5661 section 18.35.4: */
1624 nfs4_lock_state(); 1624 nfs4_lock_state();
1625 conf = find_confirmed_client_by_name(&exid->clname, nn); 1625 conf = find_confirmed_client_by_name(&exid->clname, nn);
1626 if (conf) { 1626 if (conf) {
1627 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 1627 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1628 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 1628 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1629 1629
1630 if (update) { 1630 if (update) {
1631 if (!clp_used_exchangeid(conf)) { /* buggy client */ 1631 if (!clp_used_exchangeid(conf)) { /* buggy client */
1632 status = nfserr_inval; 1632 status = nfserr_inval;
1633 goto out; 1633 goto out;
1634 } 1634 }
1635 if (!creds_match) { /* case 9 */ 1635 if (!creds_match) { /* case 9 */
1636 status = nfserr_perm; 1636 status = nfserr_perm;
1637 goto out; 1637 goto out;
1638 } 1638 }
1639 if (!verfs_match) { /* case 8 */ 1639 if (!verfs_match) { /* case 8 */
1640 status = nfserr_not_same; 1640 status = nfserr_not_same;
1641 goto out; 1641 goto out;
1642 } 1642 }
1643 /* case 6 */ 1643 /* case 6 */
1644 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 1644 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1645 new = conf; 1645 new = conf;
1646 goto out_copy; 1646 goto out_copy;
1647 } 1647 }
1648 if (!creds_match) { /* case 3 */ 1648 if (!creds_match) { /* case 3 */
1649 if (client_has_state(conf)) { 1649 if (client_has_state(conf)) {
1650 status = nfserr_clid_inuse; 1650 status = nfserr_clid_inuse;
1651 goto out; 1651 goto out;
1652 } 1652 }
1653 expire_client(conf); 1653 expire_client(conf);
1654 goto out_new; 1654 goto out_new;
1655 } 1655 }
1656 if (verfs_match) { /* case 2 */ 1656 if (verfs_match) { /* case 2 */
1657 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 1657 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1658 new = conf; 1658 new = conf;
1659 goto out_copy; 1659 goto out_copy;
1660 } 1660 }
1661 /* case 5, client reboot */ 1661 /* case 5, client reboot */
1662 goto out_new; 1662 goto out_new;
1663 } 1663 }
1664 1664
1665 if (update) { /* case 7 */ 1665 if (update) { /* case 7 */
1666 status = nfserr_noent; 1666 status = nfserr_noent;
1667 goto out; 1667 goto out;
1668 } 1668 }
1669 1669
1670 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 1670 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1671 if (unconf) /* case 4, possible retry or client restart */ 1671 if (unconf) /* case 4, possible retry or client restart */
1672 expire_client(unconf); 1672 expire_client(unconf);
1673 1673
1674 /* case 1 (normal case) */ 1674 /* case 1 (normal case) */
1675 out_new: 1675 out_new:
1676 new = create_client(exid->clname, rqstp, &verf); 1676 new = create_client(exid->clname, rqstp, &verf);
1677 if (new == NULL) { 1677 if (new == NULL) {
1678 status = nfserr_jukebox; 1678 status = nfserr_jukebox;
1679 goto out; 1679 goto out;
1680 } 1680 }
1681 new->cl_minorversion = 1; 1681 new->cl_minorversion = 1;
1682 1682
1683 gen_clid(new, nn); 1683 gen_clid(new, nn);
1684 add_to_unconfirmed(new); 1684 add_to_unconfirmed(new);
1685 out_copy: 1685 out_copy:
1686 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1686 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1687 exid->clientid.cl_id = new->cl_clientid.cl_id; 1687 exid->clientid.cl_id = new->cl_clientid.cl_id;
1688 1688
1689 exid->seqid = new->cl_cs_slot.sl_seqid + 1; 1689 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1690 nfsd4_set_ex_flags(new, exid); 1690 nfsd4_set_ex_flags(new, exid);
1691 1691
1692 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1692 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1693 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); 1693 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1694 status = nfs_ok; 1694 status = nfs_ok;
1695 1695
1696 out: 1696 out:
1697 nfs4_unlock_state(); 1697 nfs4_unlock_state();
1698 return status; 1698 return status;
1699 } 1699 }
1700 1700
1701 static __be32 1701 static __be32
1702 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 1702 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1703 { 1703 {
1704 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 1704 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1705 slot_seqid); 1705 slot_seqid);
1706 1706
1707 /* The slot is in use, and no response has been sent. */ 1707 /* The slot is in use, and no response has been sent. */
1708 if (slot_inuse) { 1708 if (slot_inuse) {
1709 if (seqid == slot_seqid) 1709 if (seqid == slot_seqid)
1710 return nfserr_jukebox; 1710 return nfserr_jukebox;
1711 else 1711 else
1712 return nfserr_seq_misordered; 1712 return nfserr_seq_misordered;
1713 } 1713 }
1714 /* Note unsigned 32-bit arithmetic handles wraparound: */ 1714 /* Note unsigned 32-bit arithmetic handles wraparound: */
1715 if (likely(seqid == slot_seqid + 1)) 1715 if (likely(seqid == slot_seqid + 1))
1716 return nfs_ok; 1716 return nfs_ok;
1717 if (seqid == slot_seqid) 1717 if (seqid == slot_seqid)
1718 return nfserr_replay_cache; 1718 return nfserr_replay_cache;
1719 return nfserr_seq_misordered; 1719 return nfserr_seq_misordered;
1720 } 1720 }
1721 1721
1722 /* 1722 /*
1723 * Cache the create session result into the create session single DRC 1723 * Cache the create session result into the create session single DRC
1724 * slot cache by saving the xdr structure. sl_seqid has been set. 1724 * slot cache by saving the xdr structure. sl_seqid has been set.
1725 * Do this for solo or embedded create session operations. 1725 * Do this for solo or embedded create session operations.
1726 */ 1726 */
1727 static void 1727 static void
1728 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 1728 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1729 struct nfsd4_clid_slot *slot, __be32 nfserr) 1729 struct nfsd4_clid_slot *slot, __be32 nfserr)
1730 { 1730 {
1731 slot->sl_status = nfserr; 1731 slot->sl_status = nfserr;
1732 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 1732 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1733 } 1733 }
1734 1734
1735 static __be32 1735 static __be32
1736 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 1736 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1737 struct nfsd4_clid_slot *slot) 1737 struct nfsd4_clid_slot *slot)
1738 { 1738 {
1739 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 1739 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1740 return slot->sl_status; 1740 return slot->sl_status;
1741 } 1741 }
1742 1742
1743 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 1743 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1744 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1744 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1745 1 + /* MIN tag is length with zero, only length */ \ 1745 1 + /* MIN tag is length with zero, only length */ \
1746 3 + /* version, opcount, opcode */ \ 1746 3 + /* version, opcount, opcode */ \
1747 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1747 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1748 /* seqid, slotID, slotID, cache */ \ 1748 /* seqid, slotID, slotID, cache */ \
1749 4 ) * sizeof(__be32)) 1749 4 ) * sizeof(__be32))
1750 1750
1751 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 1751 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1752 2 + /* verifier: AUTH_NULL, length 0 */\ 1752 2 + /* verifier: AUTH_NULL, length 0 */\
1753 1 + /* status */ \ 1753 1 + /* status */ \
1754 1 + /* MIN tag is length with zero, only length */ \ 1754 1 + /* MIN tag is length with zero, only length */ \
1755 3 + /* opcount, opcode, opstatus*/ \ 1755 3 + /* opcount, opcode, opstatus*/ \
1756 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1756 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1757 /* seqid, slotID, slotID, slotID, status */ \ 1757 /* seqid, slotID, slotID, slotID, status */ \
1758 5 ) * sizeof(__be32)) 1758 5 ) * sizeof(__be32))
1759 1759
1760 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) 1760 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1761 { 1761 {
1762 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ 1762 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1763 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; 1763 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1764 } 1764 }
1765 1765
1766 __be32 1766 __be32
1767 nfsd4_create_session(struct svc_rqst *rqstp, 1767 nfsd4_create_session(struct svc_rqst *rqstp,
1768 struct nfsd4_compound_state *cstate, 1768 struct nfsd4_compound_state *cstate,
1769 struct nfsd4_create_session *cr_ses) 1769 struct nfsd4_create_session *cr_ses)
1770 { 1770 {
1771 struct sockaddr *sa = svc_addr(rqstp); 1771 struct sockaddr *sa = svc_addr(rqstp);
1772 struct nfs4_client *conf, *unconf; 1772 struct nfs4_client *conf, *unconf;
1773 struct nfsd4_session *new; 1773 struct nfsd4_session *new;
1774 struct nfsd4_conn *conn; 1774 struct nfsd4_conn *conn;
1775 struct nfsd4_clid_slot *cs_slot = NULL; 1775 struct nfsd4_clid_slot *cs_slot = NULL;
1776 __be32 status = 0; 1776 __be32 status = 0;
1777 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1777 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1778 1778
1779 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1779 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1780 return nfserr_inval; 1780 return nfserr_inval;
1781 if (check_forechannel_attrs(cr_ses->fore_channel)) 1781 if (check_forechannel_attrs(cr_ses->fore_channel))
1782 return nfserr_toosmall; 1782 return nfserr_toosmall;
1783 new = alloc_session(&cr_ses->fore_channel, nn); 1783 new = alloc_session(&cr_ses->fore_channel, nn);
1784 if (!new) 1784 if (!new)
1785 return nfserr_jukebox; 1785 return nfserr_jukebox;
1786 status = nfserr_jukebox; 1786 status = nfserr_jukebox;
1787 conn = alloc_conn_from_crses(rqstp, cr_ses); 1787 conn = alloc_conn_from_crses(rqstp, cr_ses);
1788 if (!conn) 1788 if (!conn)
1789 goto out_free_session; 1789 goto out_free_session;
1790 1790
1791 nfs4_lock_state(); 1791 nfs4_lock_state();
1792 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 1792 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1793 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 1793 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1794 1794
1795 if (conf) { 1795 if (conf) {
1796 cs_slot = &conf->cl_cs_slot; 1796 cs_slot = &conf->cl_cs_slot;
1797 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1797 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1798 if (status == nfserr_replay_cache) { 1798 if (status == nfserr_replay_cache) {
1799 status = nfsd4_replay_create_session(cr_ses, cs_slot); 1799 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1800 goto out_free_conn; 1800 goto out_free_conn;
1801 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 1801 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1802 status = nfserr_seq_misordered; 1802 status = nfserr_seq_misordered;
1803 goto out_free_conn; 1803 goto out_free_conn;
1804 } 1804 }
1805 } else if (unconf) { 1805 } else if (unconf) {
1806 struct nfs4_client *old; 1806 struct nfs4_client *old;
1807 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 1807 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1808 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 1808 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1809 status = nfserr_clid_inuse; 1809 status = nfserr_clid_inuse;
1810 goto out_free_conn; 1810 goto out_free_conn;
1811 } 1811 }
1812 cs_slot = &unconf->cl_cs_slot; 1812 cs_slot = &unconf->cl_cs_slot;
1813 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1813 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1814 if (status) { 1814 if (status) {
1815 /* an unconfirmed replay returns misordered */ 1815 /* an unconfirmed replay returns misordered */
1816 status = nfserr_seq_misordered; 1816 status = nfserr_seq_misordered;
1817 goto out_free_conn; 1817 goto out_free_conn;
1818 } 1818 }
1819 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 1819 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1820 if (old) 1820 if (old)
1821 expire_client(old); 1821 expire_client(old);
1822 move_to_confirmed(unconf); 1822 move_to_confirmed(unconf);
1823 conf = unconf; 1823 conf = unconf;
1824 } else { 1824 } else {
1825 status = nfserr_stale_clientid; 1825 status = nfserr_stale_clientid;
1826 goto out_free_conn; 1826 goto out_free_conn;
1827 } 1827 }
1828 status = nfs_ok; 1828 status = nfs_ok;
1829 /* 1829 /*
1830 * We do not support RDMA or persistent sessions 1830 * We do not support RDMA or persistent sessions
1831 */ 1831 */
1832 cr_ses->flags &= ~SESSION4_PERSIST; 1832 cr_ses->flags &= ~SESSION4_PERSIST;
1833 cr_ses->flags &= ~SESSION4_RDMA; 1833 cr_ses->flags &= ~SESSION4_RDMA;
1834 1834
1835 init_session(rqstp, new, conf, cr_ses); 1835 init_session(rqstp, new, conf, cr_ses);
1836 nfsd4_init_conn(rqstp, conn, new); 1836 nfsd4_init_conn(rqstp, conn, new);
1837 1837
1838 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 1838 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1839 NFS4_MAX_SESSIONID_LEN); 1839 NFS4_MAX_SESSIONID_LEN);
1840 memcpy(&cr_ses->fore_channel, &new->se_fchannel, 1840 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1841 sizeof(struct nfsd4_channel_attrs)); 1841 sizeof(struct nfsd4_channel_attrs));
1842 cs_slot->sl_seqid++; 1842 cs_slot->sl_seqid++;
1843 cr_ses->seqid = cs_slot->sl_seqid; 1843 cr_ses->seqid = cs_slot->sl_seqid;
1844 1844
1845 /* cache solo and embedded create sessions under the state lock */ 1845 /* cache solo and embedded create sessions under the state lock */
1846 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1846 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1847 out: 1847 out:
1848 nfs4_unlock_state(); 1848 nfs4_unlock_state();
1849 dprintk("%s returns %d\n", __func__, ntohl(status)); 1849 dprintk("%s returns %d\n", __func__, ntohl(status));
1850 return status; 1850 return status;
1851 out_free_conn: 1851 out_free_conn:
1852 free_conn(conn); 1852 free_conn(conn);
1853 out_free_session: 1853 out_free_session:
1854 __free_session(new); 1854 __free_session(new);
1855 goto out; 1855 goto out;
1856 } 1856 }
1857 1857
1858 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) 1858 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1859 { 1859 {
1860 struct nfsd4_compoundres *resp = rqstp->rq_resp; 1860 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1861 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 1861 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1862 1862
1863 return argp->opcnt == resp->opcnt; 1863 return argp->opcnt == resp->opcnt;
1864 } 1864 }
1865 1865
1866 static __be32 nfsd4_map_bcts_dir(u32 *dir) 1866 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1867 { 1867 {
1868 switch (*dir) { 1868 switch (*dir) {
1869 case NFS4_CDFC4_FORE: 1869 case NFS4_CDFC4_FORE:
1870 case NFS4_CDFC4_BACK: 1870 case NFS4_CDFC4_BACK:
1871 return nfs_ok; 1871 return nfs_ok;
1872 case NFS4_CDFC4_FORE_OR_BOTH: 1872 case NFS4_CDFC4_FORE_OR_BOTH:
1873 case NFS4_CDFC4_BACK_OR_BOTH: 1873 case NFS4_CDFC4_BACK_OR_BOTH:
1874 *dir = NFS4_CDFC4_BOTH; 1874 *dir = NFS4_CDFC4_BOTH;
1875 return nfs_ok; 1875 return nfs_ok;
1876 }; 1876 };
1877 return nfserr_inval; 1877 return nfserr_inval;
1878 } 1878 }
1879 1879
1880 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc) 1880 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
1881 { 1881 {
1882 struct nfsd4_session *session = cstate->session; 1882 struct nfsd4_session *session = cstate->session;
1883 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1883 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1884 1884
1885 spin_lock(&nn->client_lock); 1885 spin_lock(&nn->client_lock);
1886 session->se_cb_prog = bc->bc_cb_program; 1886 session->se_cb_prog = bc->bc_cb_program;
1887 session->se_cb_sec = bc->bc_cb_sec; 1887 session->se_cb_sec = bc->bc_cb_sec;
1888 spin_unlock(&nn->client_lock); 1888 spin_unlock(&nn->client_lock);
1889 1889
1890 nfsd4_probe_callback(session->se_client); 1890 nfsd4_probe_callback(session->se_client);
1891 1891
1892 return nfs_ok; 1892 return nfs_ok;
1893 } 1893 }
1894 1894
1895 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 1895 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1896 struct nfsd4_compound_state *cstate, 1896 struct nfsd4_compound_state *cstate,
1897 struct nfsd4_bind_conn_to_session *bcts) 1897 struct nfsd4_bind_conn_to_session *bcts)
1898 { 1898 {
1899 __be32 status; 1899 __be32 status;
1900 struct nfsd4_conn *conn; 1900 struct nfsd4_conn *conn;
1901 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 1901 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1902 1902
1903 if (!nfsd4_last_compound_op(rqstp)) 1903 if (!nfsd4_last_compound_op(rqstp))
1904 return nfserr_not_only_op; 1904 return nfserr_not_only_op;
1905 spin_lock(&nn->client_lock); 1905 spin_lock(&nn->client_lock);
1906 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); 1906 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
1907 /* Sorta weird: we only need the refcnt'ing because new_conn acquires 1907 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1908 * client_lock iself: */ 1908 * client_lock iself: */
1909 if (cstate->session) { 1909 if (cstate->session) {
1910 nfsd4_get_session(cstate->session); 1910 nfsd4_get_session(cstate->session);
1911 atomic_inc(&cstate->session->se_client->cl_refcount); 1911 atomic_inc(&cstate->session->se_client->cl_refcount);
1912 } 1912 }
1913 spin_unlock(&nn->client_lock); 1913 spin_unlock(&nn->client_lock);
1914 if (!cstate->session) 1914 if (!cstate->session)
1915 return nfserr_badsession; 1915 return nfserr_badsession;
1916 1916
1917 status = nfsd4_map_bcts_dir(&bcts->dir); 1917 status = nfsd4_map_bcts_dir(&bcts->dir);
1918 if (status) 1918 if (status)
1919 return status; 1919 return status;
1920 conn = alloc_conn(rqstp, bcts->dir); 1920 conn = alloc_conn(rqstp, bcts->dir);
1921 if (!conn) 1921 if (!conn)
1922 return nfserr_jukebox; 1922 return nfserr_jukebox;
1923 nfsd4_init_conn(rqstp, conn, cstate->session); 1923 nfsd4_init_conn(rqstp, conn, cstate->session);
1924 return nfs_ok; 1924 return nfs_ok;
1925 } 1925 }
1926 1926
1927 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) 1927 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1928 { 1928 {
1929 if (!session) 1929 if (!session)
1930 return 0; 1930 return 0;
1931 return !memcmp(sid, &session->se_sessionid, sizeof(*sid)); 1931 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1932 } 1932 }
1933 1933
1934 __be32 1934 __be32
1935 nfsd4_destroy_session(struct svc_rqst *r, 1935 nfsd4_destroy_session(struct svc_rqst *r,
1936 struct nfsd4_compound_state *cstate, 1936 struct nfsd4_compound_state *cstate,
1937 struct nfsd4_destroy_session *sessionid) 1937 struct nfsd4_destroy_session *sessionid)
1938 { 1938 {
1939 struct nfsd4_session *ses; 1939 struct nfsd4_session *ses;
1940 __be32 status = nfserr_badsession; 1940 __be32 status = nfserr_badsession;
1941 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); 1941 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id);
1942 1942
1943 /* Notes: 1943 /* Notes:
1944 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid 1944 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1945 * - Should we return nfserr_back_chan_busy if waiting for 1945 * - Should we return nfserr_back_chan_busy if waiting for
1946 * callbacks on to-be-destroyed session? 1946 * callbacks on to-be-destroyed session?
1947 * - Do we need to clear any callback info from previous session? 1947 * - Do we need to clear any callback info from previous session?
1948 */ 1948 */
1949 1949
1950 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 1950 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1951 if (!nfsd4_last_compound_op(r)) 1951 if (!nfsd4_last_compound_op(r))
1952 return nfserr_not_only_op; 1952 return nfserr_not_only_op;
1953 } 1953 }
1954 dump_sessionid(__func__, &sessionid->sessionid); 1954 dump_sessionid(__func__, &sessionid->sessionid);
1955 spin_lock(&nn->client_lock); 1955 spin_lock(&nn->client_lock);
1956 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); 1956 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
1957 if (!ses) { 1957 if (!ses) {
1958 spin_unlock(&nn->client_lock); 1958 spin_unlock(&nn->client_lock);
1959 goto out; 1959 goto out;
1960 } 1960 }
1961 1961
1962 unhash_session(ses); 1962 unhash_session(ses);
1963 spin_unlock(&nn->client_lock); 1963 spin_unlock(&nn->client_lock);
1964 1964
1965 nfs4_lock_state(); 1965 nfs4_lock_state();
1966 nfsd4_probe_callback_sync(ses->se_client); 1966 nfsd4_probe_callback_sync(ses->se_client);
1967 nfs4_unlock_state(); 1967 nfs4_unlock_state();
1968 1968
1969 spin_lock(&nn->client_lock); 1969 spin_lock(&nn->client_lock);
1970 nfsd4_del_conns(ses); 1970 nfsd4_del_conns(ses);
1971 nfsd4_put_session_locked(ses); 1971 nfsd4_put_session_locked(ses);
1972 spin_unlock(&nn->client_lock); 1972 spin_unlock(&nn->client_lock);
1973 status = nfs_ok; 1973 status = nfs_ok;
1974 out: 1974 out:
1975 dprintk("%s returns %d\n", __func__, ntohl(status)); 1975 dprintk("%s returns %d\n", __func__, ntohl(status));
1976 return status; 1976 return status;
1977 } 1977 }
1978 1978
1979 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 1979 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1980 { 1980 {
1981 struct nfsd4_conn *c; 1981 struct nfsd4_conn *c;
1982 1982
1983 list_for_each_entry(c, &s->se_conns, cn_persession) { 1983 list_for_each_entry(c, &s->se_conns, cn_persession) {
1984 if (c->cn_xprt == xpt) { 1984 if (c->cn_xprt == xpt) {
1985 return c; 1985 return c;
1986 } 1986 }
1987 } 1987 }
1988 return NULL; 1988 return NULL;
1989 } 1989 }
1990 1990
1991 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 1991 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1992 { 1992 {
1993 struct nfs4_client *clp = ses->se_client; 1993 struct nfs4_client *clp = ses->se_client;
1994 struct nfsd4_conn *c; 1994 struct nfsd4_conn *c;
1995 int ret; 1995 int ret;
1996 1996
1997 spin_lock(&clp->cl_lock); 1997 spin_lock(&clp->cl_lock);
1998 c = __nfsd4_find_conn(new->cn_xprt, ses); 1998 c = __nfsd4_find_conn(new->cn_xprt, ses);
1999 if (c) { 1999 if (c) {
2000 spin_unlock(&clp->cl_lock); 2000 spin_unlock(&clp->cl_lock);
2001 free_conn(new); 2001 free_conn(new);
2002 return; 2002 return;
2003 } 2003 }
2004 __nfsd4_hash_conn(new, ses); 2004 __nfsd4_hash_conn(new, ses);
2005 spin_unlock(&clp->cl_lock); 2005 spin_unlock(&clp->cl_lock);
2006 ret = nfsd4_register_conn(new); 2006 ret = nfsd4_register_conn(new);
2007 if (ret) 2007 if (ret)
2008 /* oops; xprt is already down: */ 2008 /* oops; xprt is already down: */
2009 nfsd4_conn_lost(&new->cn_xpt_user); 2009 nfsd4_conn_lost(&new->cn_xpt_user);
2010 return; 2010 return;
2011 } 2011 }
2012 2012
2013 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 2013 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2014 { 2014 {
2015 struct nfsd4_compoundargs *args = rqstp->rq_argp; 2015 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2016 2016
2017 return args->opcnt > session->se_fchannel.maxops; 2017 return args->opcnt > session->se_fchannel.maxops;
2018 } 2018 }
2019 2019
2020 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 2020 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2021 struct nfsd4_session *session) 2021 struct nfsd4_session *session)
2022 { 2022 {
2023 struct xdr_buf *xb = &rqstp->rq_arg; 2023 struct xdr_buf *xb = &rqstp->rq_arg;
2024 2024
2025 return xb->len > session->se_fchannel.maxreq_sz; 2025 return xb->len > session->se_fchannel.maxreq_sz;
2026 } 2026 }
2027 2027
2028 __be32 2028 __be32
2029 nfsd4_sequence(struct svc_rqst *rqstp, 2029 nfsd4_sequence(struct svc_rqst *rqstp,
2030 struct nfsd4_compound_state *cstate, 2030 struct nfsd4_compound_state *cstate,
2031 struct nfsd4_sequence *seq) 2031 struct nfsd4_sequence *seq)
2032 { 2032 {
2033 struct nfsd4_compoundres *resp = rqstp->rq_resp; 2033 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2034 struct nfsd4_session *session; 2034 struct nfsd4_session *session;
2035 struct nfsd4_slot *slot; 2035 struct nfsd4_slot *slot;
2036 struct nfsd4_conn *conn; 2036 struct nfsd4_conn *conn;
2037 __be32 status; 2037 __be32 status;
2038 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2038 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2039 2039
2040 if (resp->opcnt != 1) 2040 if (resp->opcnt != 1)
2041 return nfserr_sequence_pos; 2041 return nfserr_sequence_pos;
2042 2042
2043 /* 2043 /*
2044 * Will be either used or freed by nfsd4_sequence_check_conn 2044 * Will be either used or freed by nfsd4_sequence_check_conn
2045 * below. 2045 * below.
2046 */ 2046 */
2047 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 2047 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2048 if (!conn) 2048 if (!conn)
2049 return nfserr_jukebox; 2049 return nfserr_jukebox;
2050 2050
2051 spin_lock(&nn->client_lock); 2051 spin_lock(&nn->client_lock);
2052 status = nfserr_badsession; 2052 status = nfserr_badsession;
2053 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); 2053 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2054 if (!session) 2054 if (!session)
2055 goto out; 2055 goto out;
2056 2056
2057 status = nfserr_too_many_ops; 2057 status = nfserr_too_many_ops;
2058 if (nfsd4_session_too_many_ops(rqstp, session)) 2058 if (nfsd4_session_too_many_ops(rqstp, session))
2059 goto out; 2059 goto out;
2060 2060
2061 status = nfserr_req_too_big; 2061 status = nfserr_req_too_big;
2062 if (nfsd4_request_too_big(rqstp, session)) 2062 if (nfsd4_request_too_big(rqstp, session))
2063 goto out; 2063 goto out;
2064 2064
2065 status = nfserr_badslot; 2065 status = nfserr_badslot;
2066 if (seq->slotid >= session->se_fchannel.maxreqs) 2066 if (seq->slotid >= session->se_fchannel.maxreqs)
2067 goto out; 2067 goto out;
2068 2068
2069 slot = session->se_slots[seq->slotid]; 2069 slot = session->se_slots[seq->slotid];
2070 dprintk("%s: slotid %d\n", __func__, seq->slotid); 2070 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2071 2071
2072 /* We do not negotiate the number of slots yet, so set the 2072 /* We do not negotiate the number of slots yet, so set the
2073 * maxslots to the session maxreqs which is used to encode 2073 * maxslots to the session maxreqs which is used to encode
2074 * sr_highest_slotid and the sr_target_slot id to maxslots */ 2074 * sr_highest_slotid and the sr_target_slot id to maxslots */
2075 seq->maxslots = session->se_fchannel.maxreqs; 2075 seq->maxslots = session->se_fchannel.maxreqs;
2076 2076
2077 status = check_slot_seqid(seq->seqid, slot->sl_seqid, 2077 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2078 slot->sl_flags & NFSD4_SLOT_INUSE); 2078 slot->sl_flags & NFSD4_SLOT_INUSE);
2079 if (status == nfserr_replay_cache) { 2079 if (status == nfserr_replay_cache) {
2080 status = nfserr_seq_misordered; 2080 status = nfserr_seq_misordered;
2081 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 2081 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2082 goto out; 2082 goto out;
2083 cstate->slot = slot; 2083 cstate->slot = slot;
2084 cstate->session = session; 2084 cstate->session = session;
2085 /* Return the cached reply status and set cstate->status 2085 /* Return the cached reply status and set cstate->status
2086 * for nfsd4_proc_compound processing */ 2086 * for nfsd4_proc_compound processing */
2087 status = nfsd4_replay_cache_entry(resp, seq); 2087 status = nfsd4_replay_cache_entry(resp, seq);
2088 cstate->status = nfserr_replay_cache; 2088 cstate->status = nfserr_replay_cache;
2089 goto out; 2089 goto out;
2090 } 2090 }
2091 if (status) 2091 if (status)
2092 goto out; 2092 goto out;
2093 2093
2094 nfsd4_sequence_check_conn(conn, session); 2094 nfsd4_sequence_check_conn(conn, session);
2095 conn = NULL; 2095 conn = NULL;
2096 2096
2097 /* Success! bump slot seqid */ 2097 /* Success! bump slot seqid */
2098 slot->sl_seqid = seq->seqid; 2098 slot->sl_seqid = seq->seqid;
2099 slot->sl_flags |= NFSD4_SLOT_INUSE; 2099 slot->sl_flags |= NFSD4_SLOT_INUSE;
2100 if (seq->cachethis) 2100 if (seq->cachethis)
2101 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 2101 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2102 else 2102 else
2103 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 2103 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2104 2104
2105 cstate->slot = slot; 2105 cstate->slot = slot;
2106 cstate->session = session; 2106 cstate->session = session;
2107 2107
2108 out: 2108 out:
2109 /* Hold a session reference until done processing the compound. */ 2109 /* Hold a session reference until done processing the compound. */
2110 if (cstate->session) { 2110 if (cstate->session) {
2111 struct nfs4_client *clp = session->se_client; 2111 struct nfs4_client *clp = session->se_client;
2112 2112
2113 nfsd4_get_session(cstate->session); 2113 nfsd4_get_session(cstate->session);
2114 atomic_inc(&clp->cl_refcount); 2114 atomic_inc(&clp->cl_refcount);
2115 switch (clp->cl_cb_state) { 2115 switch (clp->cl_cb_state) {
2116 case NFSD4_CB_DOWN: 2116 case NFSD4_CB_DOWN:
2117 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 2117 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2118 break; 2118 break;
2119 case NFSD4_CB_FAULT: 2119 case NFSD4_CB_FAULT:
2120 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 2120 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2121 break; 2121 break;
2122 default: 2122 default:
2123 seq->status_flags = 0; 2123 seq->status_flags = 0;
2124 } 2124 }
2125 } 2125 }
2126 kfree(conn); 2126 kfree(conn);
2127 spin_unlock(&nn->client_lock); 2127 spin_unlock(&nn->client_lock);
2128 dprintk("%s: return %d\n", __func__, ntohl(status)); 2128 dprintk("%s: return %d\n", __func__, ntohl(status));
2129 return status; 2129 return status;
2130 } 2130 }
2131 2131
2132 __be32 2132 __be32
2133 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2133 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2134 { 2134 {
2135 struct nfs4_client *conf, *unconf, *clp; 2135 struct nfs4_client *conf, *unconf, *clp;
2136 __be32 status = 0; 2136 __be32 status = 0;
2137 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2137 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2138 2138
2139 nfs4_lock_state(); 2139 nfs4_lock_state();
2140 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 2140 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2141 conf = find_confirmed_client(&dc->clientid, true, nn); 2141 conf = find_confirmed_client(&dc->clientid, true, nn);
2142 2142
2143 if (conf) { 2143 if (conf) {
2144 clp = conf; 2144 clp = conf;
2145 2145
2146 if (!is_client_expired(conf) && client_has_state(conf)) { 2146 if (!is_client_expired(conf) && client_has_state(conf)) {
2147 status = nfserr_clientid_busy; 2147 status = nfserr_clientid_busy;
2148 goto out; 2148 goto out;
2149 } 2149 }
2150 2150
2151 /* rfc5661 18.50.3 */ 2151 /* rfc5661 18.50.3 */
2152 if (cstate->session && conf == cstate->session->se_client) { 2152 if (cstate->session && conf == cstate->session->se_client) {
2153 status = nfserr_clientid_busy; 2153 status = nfserr_clientid_busy;
2154 goto out; 2154 goto out;
2155 } 2155 }
2156 } else if (unconf) 2156 } else if (unconf)
2157 clp = unconf; 2157 clp = unconf;
2158 else { 2158 else {
2159 status = nfserr_stale_clientid; 2159 status = nfserr_stale_clientid;
2160 goto out; 2160 goto out;
2161 } 2161 }
2162 2162
2163 expire_client(clp); 2163 expire_client(clp);
2164 out: 2164 out:
2165 nfs4_unlock_state(); 2165 nfs4_unlock_state();
2166 dprintk("%s return %d\n", __func__, ntohl(status)); 2166 dprintk("%s return %d\n", __func__, ntohl(status));
2167 return status; 2167 return status;
2168 } 2168 }
2169 2169
2170 __be32 2170 __be32
2171 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 2171 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2172 { 2172 {
2173 __be32 status = 0; 2173 __be32 status = 0;
2174 2174
2175 if (rc->rca_one_fs) { 2175 if (rc->rca_one_fs) {
2176 if (!cstate->current_fh.fh_dentry) 2176 if (!cstate->current_fh.fh_dentry)
2177 return nfserr_nofilehandle; 2177 return nfserr_nofilehandle;
2178 /* 2178 /*
2179 * We don't take advantage of the rca_one_fs case. 2179 * We don't take advantage of the rca_one_fs case.
2180 * That's OK, it's optional, we can safely ignore it. 2180 * That's OK, it's optional, we can safely ignore it.
2181 */ 2181 */
2182 return nfs_ok; 2182 return nfs_ok;
2183 } 2183 }
2184 2184
2185 nfs4_lock_state(); 2185 nfs4_lock_state();
2186 status = nfserr_complete_already; 2186 status = nfserr_complete_already;
2187 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 2187 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2188 &cstate->session->se_client->cl_flags)) 2188 &cstate->session->se_client->cl_flags))
2189 goto out; 2189 goto out;
2190 2190
2191 status = nfserr_stale_clientid; 2191 status = nfserr_stale_clientid;
2192 if (is_client_expired(cstate->session->se_client)) 2192 if (is_client_expired(cstate->session->se_client))
2193 /* 2193 /*
2194 * The following error isn't really legal. 2194 * The following error isn't really legal.
2195 * But we only get here if the client just explicitly 2195 * But we only get here if the client just explicitly
2196 * destroyed the client. Surely it no longer cares what 2196 * destroyed the client. Surely it no longer cares what
2197 * error it gets back on an operation for the dead 2197 * error it gets back on an operation for the dead
2198 * client. 2198 * client.
2199 */ 2199 */
2200 goto out; 2200 goto out;
2201 2201
2202 status = nfs_ok; 2202 status = nfs_ok;
2203 nfsd4_client_record_create(cstate->session->se_client); 2203 nfsd4_client_record_create(cstate->session->se_client);
2204 out: 2204 out:
2205 nfs4_unlock_state(); 2205 nfs4_unlock_state();
2206 return status; 2206 return status;
2207 } 2207 }
2208 2208
2209 __be32 2209 __be32
2210 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 2210 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2211 struct nfsd4_setclientid *setclid) 2211 struct nfsd4_setclientid *setclid)
2212 { 2212 {
2213 struct xdr_netobj clname = setclid->se_name; 2213 struct xdr_netobj clname = setclid->se_name;
2214 nfs4_verifier clverifier = setclid->se_verf; 2214 nfs4_verifier clverifier = setclid->se_verf;
2215 struct nfs4_client *conf, *unconf, *new; 2215 struct nfs4_client *conf, *unconf, *new;
2216 __be32 status; 2216 __be32 status;
2217 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2217 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2218 2218
2219 /* Cases below refer to rfc 3530 section 14.2.33: */ 2219 /* Cases below refer to rfc 3530 section 14.2.33: */
2220 nfs4_lock_state(); 2220 nfs4_lock_state();
2221 conf = find_confirmed_client_by_name(&clname, nn); 2221 conf = find_confirmed_client_by_name(&clname, nn);
2222 if (conf) { 2222 if (conf) {
2223 /* case 0: */ 2223 /* case 0: */
2224 status = nfserr_clid_inuse; 2224 status = nfserr_clid_inuse;
2225 if (clp_used_exchangeid(conf)) 2225 if (clp_used_exchangeid(conf))
2226 goto out; 2226 goto out;
2227 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 2227 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2228 char addr_str[INET6_ADDRSTRLEN]; 2228 char addr_str[INET6_ADDRSTRLEN];
2229 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, 2229 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2230 sizeof(addr_str)); 2230 sizeof(addr_str));
2231 dprintk("NFSD: setclientid: string in use by client " 2231 dprintk("NFSD: setclientid: string in use by client "
2232 "at %s\n", addr_str); 2232 "at %s\n", addr_str);
2233 goto out; 2233 goto out;
2234 } 2234 }
2235 } 2235 }
2236 unconf = find_unconfirmed_client_by_name(&clname, nn); 2236 unconf = find_unconfirmed_client_by_name(&clname, nn);
2237 if (unconf) 2237 if (unconf)
2238 expire_client(unconf); 2238 expire_client(unconf);
2239 status = nfserr_jukebox; 2239 status = nfserr_jukebox;
2240 new = create_client(clname, rqstp, &clverifier); 2240 new = create_client(clname, rqstp, &clverifier);
2241 if (new == NULL) 2241 if (new == NULL)
2242 goto out; 2242 goto out;
2243 if (conf && same_verf(&conf->cl_verifier, &clverifier)) 2243 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2244 /* case 1: probable callback update */ 2244 /* case 1: probable callback update */
2245 copy_clid(new, conf); 2245 copy_clid(new, conf);
2246 else /* case 4 (new client) or cases 2, 3 (client reboot): */ 2246 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2247 gen_clid(new, nn); 2247 gen_clid(new, nn);
2248 new->cl_minorversion = 0; 2248 new->cl_minorversion = 0;
2249 gen_callback(new, setclid, rqstp); 2249 gen_callback(new, setclid, rqstp);
2250 add_to_unconfirmed(new); 2250 add_to_unconfirmed(new);
2251 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 2251 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2252 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 2252 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2253 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 2253 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2254 status = nfs_ok; 2254 status = nfs_ok;
2255 out: 2255 out:
2256 nfs4_unlock_state(); 2256 nfs4_unlock_state();
2257 return status; 2257 return status;
2258 } 2258 }
2259 2259
2260 2260
2261 __be32 2261 __be32
2262 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 2262 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2263 struct nfsd4_compound_state *cstate, 2263 struct nfsd4_compound_state *cstate,
2264 struct nfsd4_setclientid_confirm *setclientid_confirm) 2264 struct nfsd4_setclientid_confirm *setclientid_confirm)
2265 { 2265 {
2266 struct nfs4_client *conf, *unconf; 2266 struct nfs4_client *conf, *unconf;
2267 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 2267 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2268 clientid_t * clid = &setclientid_confirm->sc_clientid; 2268 clientid_t * clid = &setclientid_confirm->sc_clientid;
2269 __be32 status; 2269 __be32 status;
2270 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2270 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2271 2271
2272 if (STALE_CLIENTID(clid, nn)) 2272 if (STALE_CLIENTID(clid, nn))
2273 return nfserr_stale_clientid; 2273 return nfserr_stale_clientid;
2274 nfs4_lock_state(); 2274 nfs4_lock_state();
2275 2275
2276 conf = find_confirmed_client(clid, false, nn); 2276 conf = find_confirmed_client(clid, false, nn);
2277 unconf = find_unconfirmed_client(clid, false, nn); 2277 unconf = find_unconfirmed_client(clid, false, nn);
2278 /* 2278 /*
2279 * We try hard to give out unique clientid's, so if we get an 2279 * We try hard to give out unique clientid's, so if we get an
2280 * attempt to confirm the same clientid with a different cred, 2280 * attempt to confirm the same clientid with a different cred,
2281 * there's a bug somewhere. Let's charitably assume it's our 2281 * there's a bug somewhere. Let's charitably assume it's our
2282 * bug. 2282 * bug.
2283 */ 2283 */
2284 status = nfserr_serverfault; 2284 status = nfserr_serverfault;
2285 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) 2285 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2286 goto out; 2286 goto out;
2287 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) 2287 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2288 goto out; 2288 goto out;
2289 /* cases below refer to rfc 3530 section 14.2.34: */ 2289 /* cases below refer to rfc 3530 section 14.2.34: */
2290 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 2290 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2291 if (conf && !unconf) /* case 2: probable retransmit */ 2291 if (conf && !unconf) /* case 2: probable retransmit */
2292 status = nfs_ok; 2292 status = nfs_ok;
2293 else /* case 4: client hasn't noticed we rebooted yet? */ 2293 else /* case 4: client hasn't noticed we rebooted yet? */
2294 status = nfserr_stale_clientid; 2294 status = nfserr_stale_clientid;
2295 goto out; 2295 goto out;
2296 } 2296 }
2297 status = nfs_ok; 2297 status = nfs_ok;
2298 if (conf) { /* case 1: callback update */ 2298 if (conf) { /* case 1: callback update */
2299 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 2299 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2300 nfsd4_probe_callback(conf); 2300 nfsd4_probe_callback(conf);
2301 expire_client(unconf); 2301 expire_client(unconf);
2302 } else { /* case 3: normal case; new or rebooted client */ 2302 } else { /* case 3: normal case; new or rebooted client */
2303 conf = find_confirmed_client_by_name(&unconf->cl_name, nn); 2303 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2304 if (conf) 2304 if (conf)
2305 expire_client(conf); 2305 expire_client(conf);
2306 move_to_confirmed(unconf); 2306 move_to_confirmed(unconf);
2307 nfsd4_probe_callback(unconf); 2307 nfsd4_probe_callback(unconf);
2308 } 2308 }
2309 out: 2309 out:
2310 nfs4_unlock_state(); 2310 nfs4_unlock_state();
2311 return status; 2311 return status;
2312 } 2312 }
2313 2313
2314 static struct nfs4_file *nfsd4_alloc_file(void) 2314 static struct nfs4_file *nfsd4_alloc_file(void)
2315 { 2315 {
2316 return kmem_cache_alloc(file_slab, GFP_KERNEL); 2316 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2317 } 2317 }
2318 2318
2319 /* OPEN Share state helper functions */ 2319 /* OPEN Share state helper functions */
2320 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) 2320 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2321 { 2321 {
2322 unsigned int hashval = file_hashval(ino); 2322 unsigned int hashval = file_hashval(ino);
2323 2323
2324 atomic_set(&fp->fi_ref, 1); 2324 atomic_set(&fp->fi_ref, 1);
2325 INIT_LIST_HEAD(&fp->fi_hash); 2325 INIT_LIST_HEAD(&fp->fi_hash);
2326 INIT_LIST_HEAD(&fp->fi_stateids); 2326 INIT_LIST_HEAD(&fp->fi_stateids);
2327 INIT_LIST_HEAD(&fp->fi_delegations); 2327 INIT_LIST_HEAD(&fp->fi_delegations);
2328 fp->fi_inode = igrab(ino); 2328 fp->fi_inode = igrab(ino);
2329 fp->fi_had_conflict = false; 2329 fp->fi_had_conflict = false;
2330 fp->fi_lease = NULL; 2330 fp->fi_lease = NULL;
2331 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2331 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2332 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2332 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2333 spin_lock(&recall_lock); 2333 spin_lock(&recall_lock);
2334 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 2334 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2335 spin_unlock(&recall_lock); 2335 spin_unlock(&recall_lock);
2336 } 2336 }
2337 2337
2338 static void 2338 static void
2339 nfsd4_free_slab(struct kmem_cache **slab) 2339 nfsd4_free_slab(struct kmem_cache **slab)
2340 { 2340 {
2341 if (*slab == NULL) 2341 if (*slab == NULL)
2342 return; 2342 return;
2343 kmem_cache_destroy(*slab); 2343 kmem_cache_destroy(*slab);
2344 *slab = NULL; 2344 *slab = NULL;
2345 } 2345 }
2346 2346
2347 void 2347 void
2348 nfsd4_free_slabs(void) 2348 nfsd4_free_slabs(void)
2349 { 2349 {
2350 nfsd4_free_slab(&openowner_slab); 2350 nfsd4_free_slab(&openowner_slab);
2351 nfsd4_free_slab(&lockowner_slab); 2351 nfsd4_free_slab(&lockowner_slab);
2352 nfsd4_free_slab(&file_slab); 2352 nfsd4_free_slab(&file_slab);
2353 nfsd4_free_slab(&stateid_slab); 2353 nfsd4_free_slab(&stateid_slab);
2354 nfsd4_free_slab(&deleg_slab); 2354 nfsd4_free_slab(&deleg_slab);
2355 } 2355 }
2356 2356
2357 int 2357 int
2358 nfsd4_init_slabs(void) 2358 nfsd4_init_slabs(void)
2359 { 2359 {
2360 openowner_slab = kmem_cache_create("nfsd4_openowners", 2360 openowner_slab = kmem_cache_create("nfsd4_openowners",
2361 sizeof(struct nfs4_openowner), 0, 0, NULL); 2361 sizeof(struct nfs4_openowner), 0, 0, NULL);
2362 if (openowner_slab == NULL) 2362 if (openowner_slab == NULL)
2363 goto out_nomem; 2363 goto out_nomem;
2364 lockowner_slab = kmem_cache_create("nfsd4_lockowners", 2364 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2365 sizeof(struct nfs4_lockowner), 0, 0, NULL); 2365 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2366 if (lockowner_slab == NULL) 2366 if (lockowner_slab == NULL)
2367 goto out_nomem; 2367 goto out_nomem;
2368 file_slab = kmem_cache_create("nfsd4_files", 2368 file_slab = kmem_cache_create("nfsd4_files",
2369 sizeof(struct nfs4_file), 0, 0, NULL); 2369 sizeof(struct nfs4_file), 0, 0, NULL);
2370 if (file_slab == NULL) 2370 if (file_slab == NULL)
2371 goto out_nomem; 2371 goto out_nomem;
2372 stateid_slab = kmem_cache_create("nfsd4_stateids", 2372 stateid_slab = kmem_cache_create("nfsd4_stateids",
2373 sizeof(struct nfs4_ol_stateid), 0, 0, NULL); 2373 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2374 if (stateid_slab == NULL) 2374 if (stateid_slab == NULL)
2375 goto out_nomem; 2375 goto out_nomem;
2376 deleg_slab = kmem_cache_create("nfsd4_delegations", 2376 deleg_slab = kmem_cache_create("nfsd4_delegations",
2377 sizeof(struct nfs4_delegation), 0, 0, NULL); 2377 sizeof(struct nfs4_delegation), 0, 0, NULL);
2378 if (deleg_slab == NULL) 2378 if (deleg_slab == NULL)
2379 goto out_nomem; 2379 goto out_nomem;
2380 return 0; 2380 return 0;
2381 out_nomem: 2381 out_nomem:
2382 nfsd4_free_slabs(); 2382 nfsd4_free_slabs();
2383 dprintk("nfsd4: out of memory while initializing nfsv4\n"); 2383 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2384 return -ENOMEM; 2384 return -ENOMEM;
2385 } 2385 }
2386 2386
2387 void nfs4_free_openowner(struct nfs4_openowner *oo) 2387 void nfs4_free_openowner(struct nfs4_openowner *oo)
2388 { 2388 {
2389 kfree(oo->oo_owner.so_owner.data); 2389 kfree(oo->oo_owner.so_owner.data);
2390 kmem_cache_free(openowner_slab, oo); 2390 kmem_cache_free(openowner_slab, oo);
2391 } 2391 }
2392 2392
2393 void nfs4_free_lockowner(struct nfs4_lockowner *lo) 2393 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2394 { 2394 {
2395 kfree(lo->lo_owner.so_owner.data); 2395 kfree(lo->lo_owner.so_owner.data);
2396 kmem_cache_free(lockowner_slab, lo); 2396 kmem_cache_free(lockowner_slab, lo);
2397 } 2397 }
2398 2398
2399 static void init_nfs4_replay(struct nfs4_replay *rp) 2399 static void init_nfs4_replay(struct nfs4_replay *rp)
2400 { 2400 {
2401 rp->rp_status = nfserr_serverfault; 2401 rp->rp_status = nfserr_serverfault;
2402 rp->rp_buflen = 0; 2402 rp->rp_buflen = 0;
2403 rp->rp_buf = rp->rp_ibuf; 2403 rp->rp_buf = rp->rp_ibuf;
2404 } 2404 }
2405 2405
2406 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 2406 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2407 { 2407 {
2408 struct nfs4_stateowner *sop; 2408 struct nfs4_stateowner *sop;
2409 2409
2410 sop = kmem_cache_alloc(slab, GFP_KERNEL); 2410 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2411 if (!sop) 2411 if (!sop)
2412 return NULL; 2412 return NULL;
2413 2413
2414 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL); 2414 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2415 if (!sop->so_owner.data) { 2415 if (!sop->so_owner.data) {
2416 kmem_cache_free(slab, sop); 2416 kmem_cache_free(slab, sop);
2417 return NULL; 2417 return NULL;
2418 } 2418 }
2419 sop->so_owner.len = owner->len; 2419 sop->so_owner.len = owner->len;
2420 2420
2421 INIT_LIST_HEAD(&sop->so_stateids); 2421 INIT_LIST_HEAD(&sop->so_stateids);
2422 sop->so_client = clp; 2422 sop->so_client = clp;
2423 init_nfs4_replay(&sop->so_replay); 2423 init_nfs4_replay(&sop->so_replay);
2424 return sop; 2424 return sop;
2425 } 2425 }
2426 2426
2427 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 2427 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2428 { 2428 {
2429 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2429 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2430 2430
2431 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 2431 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2432 list_add(&oo->oo_perclient, &clp->cl_openowners); 2432 list_add(&oo->oo_perclient, &clp->cl_openowners);
2433 } 2433 }
2434 2434
2435 static struct nfs4_openowner * 2435 static struct nfs4_openowner *
2436 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 2436 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2437 struct nfs4_openowner *oo; 2437 struct nfs4_openowner *oo;
2438 2438
2439 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 2439 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2440 if (!oo) 2440 if (!oo)
2441 return NULL; 2441 return NULL;
2442 oo->oo_owner.so_is_open_owner = 1; 2442 oo->oo_owner.so_is_open_owner = 1;
2443 oo->oo_owner.so_seqid = open->op_seqid; 2443 oo->oo_owner.so_seqid = open->op_seqid;
2444 oo->oo_flags = NFS4_OO_NEW; 2444 oo->oo_flags = NFS4_OO_NEW;
2445 oo->oo_time = 0; 2445 oo->oo_time = 0;
2446 oo->oo_last_closed_stid = NULL; 2446 oo->oo_last_closed_stid = NULL;
2447 INIT_LIST_HEAD(&oo->oo_close_lru); 2447 INIT_LIST_HEAD(&oo->oo_close_lru);
2448 hash_openowner(oo, clp, strhashval); 2448 hash_openowner(oo, clp, strhashval);
2449 return oo; 2449 return oo;
2450 } 2450 }
2451 2451
2452 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 2452 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2453 struct nfs4_openowner *oo = open->op_openowner; 2453 struct nfs4_openowner *oo = open->op_openowner;
2454 struct nfs4_client *clp = oo->oo_owner.so_client; 2454 struct nfs4_client *clp = oo->oo_owner.so_client;
2455 2455
2456 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID); 2456 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2457 INIT_LIST_HEAD(&stp->st_lockowners); 2457 INIT_LIST_HEAD(&stp->st_lockowners);
2458 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 2458 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2459 list_add(&stp->st_perfile, &fp->fi_stateids); 2459 list_add(&stp->st_perfile, &fp->fi_stateids);
2460 stp->st_stateowner = &oo->oo_owner; 2460 stp->st_stateowner = &oo->oo_owner;
2461 get_nfs4_file(fp); 2461 get_nfs4_file(fp);
2462 stp->st_file = fp; 2462 stp->st_file = fp;
2463 stp->st_access_bmap = 0; 2463 stp->st_access_bmap = 0;
2464 stp->st_deny_bmap = 0; 2464 stp->st_deny_bmap = 0;
2465 set_access(open->op_share_access, stp); 2465 set_access(open->op_share_access, stp);
2466 set_deny(open->op_share_deny, stp); 2466 set_deny(open->op_share_deny, stp);
2467 stp->st_openstp = NULL; 2467 stp->st_openstp = NULL;
2468 } 2468 }
2469 2469
2470 static void 2470 static void
2471 move_to_close_lru(struct nfs4_openowner *oo, struct net *net) 2471 move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2472 { 2472 {
2473 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2473 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2474 2474
2475 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 2475 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2476 2476
2477 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 2477 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2478 oo->oo_time = get_seconds(); 2478 oo->oo_time = get_seconds();
2479 } 2479 }
2480 2480
2481 static int 2481 static int
2482 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, 2482 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2483 clientid_t *clid) 2483 clientid_t *clid)
2484 { 2484 {
2485 return (sop->so_owner.len == owner->len) && 2485 return (sop->so_owner.len == owner->len) &&
2486 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && 2486 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2487 (sop->so_client->cl_clientid.cl_id == clid->cl_id); 2487 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2488 } 2488 }
2489 2489
2490 static struct nfs4_openowner * 2490 static struct nfs4_openowner *
2491 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 2491 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2492 bool sessions, struct nfsd_net *nn) 2492 bool sessions, struct nfsd_net *nn)
2493 { 2493 {
2494 struct nfs4_stateowner *so; 2494 struct nfs4_stateowner *so;
2495 struct nfs4_openowner *oo; 2495 struct nfs4_openowner *oo;
2496 struct nfs4_client *clp; 2496 struct nfs4_client *clp;
2497 2497
2498 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) { 2498 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2499 if (!so->so_is_open_owner) 2499 if (!so->so_is_open_owner)
2500 continue; 2500 continue;
2501 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { 2501 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2502 oo = openowner(so); 2502 oo = openowner(so);
2503 clp = oo->oo_owner.so_client; 2503 clp = oo->oo_owner.so_client;
2504 if ((bool)clp->cl_minorversion != sessions) 2504 if ((bool)clp->cl_minorversion != sessions)
2505 return NULL; 2505 return NULL;
2506 renew_client(oo->oo_owner.so_client); 2506 renew_client(oo->oo_owner.so_client);
2507 return oo; 2507 return oo;
2508 } 2508 }
2509 } 2509 }
2510 return NULL; 2510 return NULL;
2511 } 2511 }
2512 2512
2513 /* search file_hashtbl[] for file */ 2513 /* search file_hashtbl[] for file */
2514 static struct nfs4_file * 2514 static struct nfs4_file *
2515 find_file(struct inode *ino) 2515 find_file(struct inode *ino)
2516 { 2516 {
2517 unsigned int hashval = file_hashval(ino); 2517 unsigned int hashval = file_hashval(ino);
2518 struct nfs4_file *fp; 2518 struct nfs4_file *fp;
2519 2519
2520 spin_lock(&recall_lock); 2520 spin_lock(&recall_lock);
2521 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 2521 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2522 if (fp->fi_inode == ino) { 2522 if (fp->fi_inode == ino) {
2523 get_nfs4_file(fp); 2523 get_nfs4_file(fp);
2524 spin_unlock(&recall_lock); 2524 spin_unlock(&recall_lock);
2525 return fp; 2525 return fp;
2526 } 2526 }
2527 } 2527 }
2528 spin_unlock(&recall_lock); 2528 spin_unlock(&recall_lock);
2529 return NULL; 2529 return NULL;
2530 } 2530 }
2531 2531
2532 /* 2532 /*
2533 * Called to check deny when READ with all zero stateid or 2533 * Called to check deny when READ with all zero stateid or
2534 * WRITE with all zero or all one stateid 2534 * WRITE with all zero or all one stateid
2535 */ 2535 */
2536 static __be32 2536 static __be32
2537 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 2537 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2538 { 2538 {
2539 struct inode *ino = current_fh->fh_dentry->d_inode; 2539 struct inode *ino = current_fh->fh_dentry->d_inode;
2540 struct nfs4_file *fp; 2540 struct nfs4_file *fp;
2541 struct nfs4_ol_stateid *stp; 2541 struct nfs4_ol_stateid *stp;
2542 __be32 ret; 2542 __be32 ret;
2543 2543
2544 dprintk("NFSD: nfs4_share_conflict\n"); 2544 dprintk("NFSD: nfs4_share_conflict\n");
2545 2545
2546 fp = find_file(ino); 2546 fp = find_file(ino);
2547 if (!fp) 2547 if (!fp)
2548 return nfs_ok; 2548 return nfs_ok;
2549 ret = nfserr_locked; 2549 ret = nfserr_locked;
2550 /* Search for conflicting share reservations */ 2550 /* Search for conflicting share reservations */
2551 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 2551 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2552 if (test_deny(deny_type, stp) || 2552 if (test_deny(deny_type, stp) ||
2553 test_deny(NFS4_SHARE_DENY_BOTH, stp)) 2553 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2554 goto out; 2554 goto out;
2555 } 2555 }
2556 ret = nfs_ok; 2556 ret = nfs_ok;
2557 out: 2557 out:
2558 put_nfs4_file(fp); 2558 put_nfs4_file(fp);
2559 return ret; 2559 return ret;
2560 } 2560 }
2561 2561
2562 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 2562 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2563 { 2563 {
2564 /* We're assuming the state code never drops its reference 2564 /* We're assuming the state code never drops its reference
2565 * without first removing the lease. Since we're in this lease 2565 * without first removing the lease. Since we're in this lease
2566 * callback (and since the lease code is serialized by the kernel 2566 * callback (and since the lease code is serialized by the kernel
2567 * lock) we know the server hasn't removed the lease yet, we know 2567 * lock) we know the server hasn't removed the lease yet, we know
2568 * it's safe to take a reference: */ 2568 * it's safe to take a reference: */
2569 atomic_inc(&dp->dl_count); 2569 atomic_inc(&dp->dl_count);
2570 2570
2571 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2571 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2572 2572
2573 /* only place dl_time is set. protected by lock_flocks*/ 2573 /* only place dl_time is set. protected by lock_flocks*/
2574 dp->dl_time = get_seconds(); 2574 dp->dl_time = get_seconds();
2575 2575
2576 nfsd4_cb_recall(dp); 2576 nfsd4_cb_recall(dp);
2577 } 2577 }
2578 2578
2579 /* Called from break_lease() with lock_flocks() held. */ 2579 /* Called from break_lease() with lock_flocks() held. */
2580 static void nfsd_break_deleg_cb(struct file_lock *fl) 2580 static void nfsd_break_deleg_cb(struct file_lock *fl)
2581 { 2581 {
2582 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; 2582 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2583 struct nfs4_delegation *dp; 2583 struct nfs4_delegation *dp;
2584 2584
2585 if (!fp) { 2585 if (!fp) {
2586 WARN(1, "(%p)->fl_owner NULL\n", fl); 2586 WARN(1, "(%p)->fl_owner NULL\n", fl);
2587 return; 2587 return;
2588 } 2588 }
2589 if (fp->fi_had_conflict) { 2589 if (fp->fi_had_conflict) {
2590 WARN(1, "duplicate break on %p\n", fp); 2590 WARN(1, "duplicate break on %p\n", fp);
2591 return; 2591 return;
2592 } 2592 }
2593 /* 2593 /*
2594 * We don't want the locks code to timeout the lease for us; 2594 * We don't want the locks code to timeout the lease for us;
2595 * we'll remove it ourself if a delegation isn't returned 2595 * we'll remove it ourself if a delegation isn't returned
2596 * in time: 2596 * in time:
2597 */ 2597 */
2598 fl->fl_break_time = 0; 2598 fl->fl_break_time = 0;
2599 2599
2600 spin_lock(&recall_lock); 2600 spin_lock(&recall_lock);
2601 fp->fi_had_conflict = true; 2601 fp->fi_had_conflict = true;
2602 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 2602 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2603 nfsd_break_one_deleg(dp); 2603 nfsd_break_one_deleg(dp);
2604 spin_unlock(&recall_lock); 2604 spin_unlock(&recall_lock);
2605 } 2605 }
2606 2606
2607 static 2607 static
2608 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) 2608 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2609 { 2609 {
2610 if (arg & F_UNLCK) 2610 if (arg & F_UNLCK)
2611 return lease_modify(onlist, arg); 2611 return lease_modify(onlist, arg);
2612 else 2612 else
2613 return -EAGAIN; 2613 return -EAGAIN;
2614 } 2614 }
2615 2615
2616 static const struct lock_manager_operations nfsd_lease_mng_ops = { 2616 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2617 .lm_break = nfsd_break_deleg_cb, 2617 .lm_break = nfsd_break_deleg_cb,
2618 .lm_change = nfsd_change_deleg_cb, 2618 .lm_change = nfsd_change_deleg_cb,
2619 }; 2619 };
2620 2620
2621 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 2621 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2622 { 2622 {
2623 if (nfsd4_has_session(cstate)) 2623 if (nfsd4_has_session(cstate))
2624 return nfs_ok; 2624 return nfs_ok;
2625 if (seqid == so->so_seqid - 1) 2625 if (seqid == so->so_seqid - 1)
2626 return nfserr_replay_me; 2626 return nfserr_replay_me;
2627 if (seqid == so->so_seqid) 2627 if (seqid == so->so_seqid)
2628 return nfs_ok; 2628 return nfs_ok;
2629 return nfserr_bad_seqid; 2629 return nfserr_bad_seqid;
2630 } 2630 }
2631 2631
2632 __be32 2632 __be32
2633 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 2633 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2634 struct nfsd4_open *open, struct nfsd_net *nn) 2634 struct nfsd4_open *open, struct nfsd_net *nn)
2635 { 2635 {
2636 clientid_t *clientid = &open->op_clientid; 2636 clientid_t *clientid = &open->op_clientid;
2637 struct nfs4_client *clp = NULL; 2637 struct nfs4_client *clp = NULL;
2638 unsigned int strhashval; 2638 unsigned int strhashval;
2639 struct nfs4_openowner *oo = NULL; 2639 struct nfs4_openowner *oo = NULL;
2640 __be32 status; 2640 __be32 status;
2641 2641
2642 if (STALE_CLIENTID(&open->op_clientid, nn)) 2642 if (STALE_CLIENTID(&open->op_clientid, nn))
2643 return nfserr_stale_clientid; 2643 return nfserr_stale_clientid;
2644 /* 2644 /*
2645 * In case we need it later, after we've already created the 2645 * In case we need it later, after we've already created the
2646 * file and don't want to risk a further failure: 2646 * file and don't want to risk a further failure:
2647 */ 2647 */
2648 open->op_file = nfsd4_alloc_file(); 2648 open->op_file = nfsd4_alloc_file();
2649 if (open->op_file == NULL) 2649 if (open->op_file == NULL)
2650 return nfserr_jukebox; 2650 return nfserr_jukebox;
2651 2651
2652 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); 2652 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2653 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn); 2653 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
2654 open->op_openowner = oo; 2654 open->op_openowner = oo;
2655 if (!oo) { 2655 if (!oo) {
2656 clp = find_confirmed_client(clientid, cstate->minorversion, 2656 clp = find_confirmed_client(clientid, cstate->minorversion,
2657 nn); 2657 nn);
2658 if (clp == NULL) 2658 if (clp == NULL)
2659 return nfserr_expired; 2659 return nfserr_expired;
2660 goto new_owner; 2660 goto new_owner;
2661 } 2661 }
2662 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 2662 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2663 /* Replace unconfirmed owners without checking for replay. */ 2663 /* Replace unconfirmed owners without checking for replay. */
2664 clp = oo->oo_owner.so_client; 2664 clp = oo->oo_owner.so_client;
2665 release_openowner(oo); 2665 release_openowner(oo);
2666 open->op_openowner = NULL; 2666 open->op_openowner = NULL;
2667 goto new_owner; 2667 goto new_owner;
2668 } 2668 }
2669 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 2669 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2670 if (status) 2670 if (status)
2671 return status; 2671 return status;
2672 clp = oo->oo_owner.so_client; 2672 clp = oo->oo_owner.so_client;
2673 goto alloc_stateid; 2673 goto alloc_stateid;
2674 new_owner: 2674 new_owner:
2675 oo = alloc_init_open_stateowner(strhashval, clp, open); 2675 oo = alloc_init_open_stateowner(strhashval, clp, open);
2676 if (oo == NULL) 2676 if (oo == NULL)
2677 return nfserr_jukebox; 2677 return nfserr_jukebox;
2678 open->op_openowner = oo; 2678 open->op_openowner = oo;
2679 alloc_stateid: 2679 alloc_stateid:
2680 open->op_stp = nfs4_alloc_stateid(clp); 2680 open->op_stp = nfs4_alloc_stateid(clp);
2681 if (!open->op_stp) 2681 if (!open->op_stp)
2682 return nfserr_jukebox; 2682 return nfserr_jukebox;
2683 return nfs_ok; 2683 return nfs_ok;
2684 } 2684 }
2685 2685
2686 static inline __be32 2686 static inline __be32
2687 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 2687 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2688 { 2688 {
2689 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) 2689 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2690 return nfserr_openmode; 2690 return nfserr_openmode;
2691 else 2691 else
2692 return nfs_ok; 2692 return nfs_ok;
2693 } 2693 }
2694 2694
2695 static int share_access_to_flags(u32 share_access) 2695 static int share_access_to_flags(u32 share_access)
2696 { 2696 {
2697 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 2697 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2698 } 2698 }
2699 2699
2700 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) 2700 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2701 { 2701 {
2702 struct nfs4_stid *ret; 2702 struct nfs4_stid *ret;
2703 2703
2704 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); 2704 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2705 if (!ret) 2705 if (!ret)
2706 return NULL; 2706 return NULL;
2707 return delegstateid(ret); 2707 return delegstateid(ret);
2708 } 2708 }
2709 2709
2710 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 2710 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2711 { 2711 {
2712 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 2712 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2713 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 2713 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2714 } 2714 }
2715 2715
2716 static __be32 2716 static __be32
2717 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open, 2717 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2718 struct nfs4_delegation **dp) 2718 struct nfs4_delegation **dp)
2719 { 2719 {
2720 int flags; 2720 int flags;
2721 __be32 status = nfserr_bad_stateid; 2721 __be32 status = nfserr_bad_stateid;
2722 2722
2723 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); 2723 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2724 if (*dp == NULL) 2724 if (*dp == NULL)
2725 goto out; 2725 goto out;
2726 flags = share_access_to_flags(open->op_share_access); 2726 flags = share_access_to_flags(open->op_share_access);
2727 status = nfs4_check_delegmode(*dp, flags); 2727 status = nfs4_check_delegmode(*dp, flags);
2728 if (status) 2728 if (status)
2729 *dp = NULL; 2729 *dp = NULL;
2730 out: 2730 out:
2731 if (!nfsd4_is_deleg_cur(open)) 2731 if (!nfsd4_is_deleg_cur(open))
2732 return nfs_ok; 2732 return nfs_ok;
2733 if (status) 2733 if (status)
2734 return status; 2734 return status;
2735 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2735 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2736 return nfs_ok; 2736 return nfs_ok;
2737 } 2737 }
2738 2738
2739 static __be32 2739 static __be32
2740 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) 2740 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2741 { 2741 {
2742 struct nfs4_ol_stateid *local; 2742 struct nfs4_ol_stateid *local;
2743 struct nfs4_openowner *oo = open->op_openowner; 2743 struct nfs4_openowner *oo = open->op_openowner;
2744 2744
2745 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 2745 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2746 /* ignore lock owners */ 2746 /* ignore lock owners */
2747 if (local->st_stateowner->so_is_open_owner == 0) 2747 if (local->st_stateowner->so_is_open_owner == 0)
2748 continue; 2748 continue;
2749 /* remember if we have seen this open owner */ 2749 /* remember if we have seen this open owner */
2750 if (local->st_stateowner == &oo->oo_owner) 2750 if (local->st_stateowner == &oo->oo_owner)
2751 *stpp = local; 2751 *stpp = local;
2752 /* check for conflicting share reservations */ 2752 /* check for conflicting share reservations */
2753 if (!test_share(local, open)) 2753 if (!test_share(local, open))
2754 return nfserr_share_denied; 2754 return nfserr_share_denied;
2755 } 2755 }
2756 return nfs_ok; 2756 return nfs_ok;
2757 } 2757 }
2758 2758
2759 static inline int nfs4_access_to_access(u32 nfs4_access) 2759 static inline int nfs4_access_to_access(u32 nfs4_access)
2760 { 2760 {
2761 int flags = 0; 2761 int flags = 0;
2762 2762
2763 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 2763 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2764 flags |= NFSD_MAY_READ; 2764 flags |= NFSD_MAY_READ;
2765 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 2765 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2766 flags |= NFSD_MAY_WRITE; 2766 flags |= NFSD_MAY_WRITE;
2767 return flags; 2767 return flags;
2768 } 2768 }
2769 2769
2770 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 2770 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2771 struct svc_fh *cur_fh, struct nfsd4_open *open) 2771 struct svc_fh *cur_fh, struct nfsd4_open *open)
2772 { 2772 {
2773 __be32 status; 2773 __be32 status;
2774 int oflag = nfs4_access_to_omode(open->op_share_access); 2774 int oflag = nfs4_access_to_omode(open->op_share_access);
2775 int access = nfs4_access_to_access(open->op_share_access); 2775 int access = nfs4_access_to_access(open->op_share_access);
2776 2776
2777 if (!fp->fi_fds[oflag]) { 2777 if (!fp->fi_fds[oflag]) {
2778 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, 2778 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2779 &fp->fi_fds[oflag]); 2779 &fp->fi_fds[oflag]);
2780 if (status) 2780 if (status)
2781 return status; 2781 return status;
2782 } 2782 }
2783 nfs4_file_get_access(fp, oflag); 2783 nfs4_file_get_access(fp, oflag);
2784 2784
2785 return nfs_ok; 2785 return nfs_ok;
2786 } 2786 }
2787 2787
2788 static inline __be32 2788 static inline __be32
2789 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 2789 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2790 struct nfsd4_open *open) 2790 struct nfsd4_open *open)
2791 { 2791 {
2792 struct iattr iattr = { 2792 struct iattr iattr = {
2793 .ia_valid = ATTR_SIZE, 2793 .ia_valid = ATTR_SIZE,
2794 .ia_size = 0, 2794 .ia_size = 0,
2795 }; 2795 };
2796 if (!open->op_truncate) 2796 if (!open->op_truncate)
2797 return 0; 2797 return 0;
2798 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 2798 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2799 return nfserr_inval; 2799 return nfserr_inval;
2800 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 2800 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2801 } 2801 }
2802 2802
2803 static __be32 2803 static __be32
2804 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 2804 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2805 { 2805 {
2806 u32 op_share_access = open->op_share_access; 2806 u32 op_share_access = open->op_share_access;
2807 bool new_access; 2807 bool new_access;
2808 __be32 status; 2808 __be32 status;
2809 2809
2810 new_access = !test_access(op_share_access, stp); 2810 new_access = !test_access(op_share_access, stp);
2811 if (new_access) { 2811 if (new_access) {
2812 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 2812 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2813 if (status) 2813 if (status)
2814 return status; 2814 return status;
2815 } 2815 }
2816 status = nfsd4_truncate(rqstp, cur_fh, open); 2816 status = nfsd4_truncate(rqstp, cur_fh, open);
2817 if (status) { 2817 if (status) {
2818 if (new_access) { 2818 if (new_access) {
2819 int oflag = nfs4_access_to_omode(op_share_access); 2819 int oflag = nfs4_access_to_omode(op_share_access);
2820 nfs4_file_put_access(fp, oflag); 2820 nfs4_file_put_access(fp, oflag);
2821 } 2821 }
2822 return status; 2822 return status;
2823 } 2823 }
2824 /* remember the open */ 2824 /* remember the open */
2825 set_access(op_share_access, stp); 2825 set_access(op_share_access, stp);
2826 set_deny(open->op_share_deny, stp); 2826 set_deny(open->op_share_deny, stp);
2827 2827
2828 return nfs_ok; 2828 return nfs_ok;
2829 } 2829 }
2830 2830
2831 2831
2832 static void 2832 static void
2833 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) 2833 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2834 { 2834 {
2835 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2835 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2836 } 2836 }
2837 2837
2838 /* Should we give out recallable state?: */ 2838 /* Should we give out recallable state?: */
2839 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 2839 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2840 { 2840 {
2841 if (clp->cl_cb_state == NFSD4_CB_UP) 2841 if (clp->cl_cb_state == NFSD4_CB_UP)
2842 return true; 2842 return true;
2843 /* 2843 /*
2844 * In the sessions case, since we don't have to establish a 2844 * In the sessions case, since we don't have to establish a
2845 * separate connection for callbacks, we assume it's OK 2845 * separate connection for callbacks, we assume it's OK
2846 * until we hear otherwise: 2846 * until we hear otherwise:
2847 */ 2847 */
2848 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 2848 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2849 } 2849 }
2850 2850
2851 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) 2851 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2852 { 2852 {
2853 struct file_lock *fl; 2853 struct file_lock *fl;
2854 2854
2855 fl = locks_alloc_lock(); 2855 fl = locks_alloc_lock();
2856 if (!fl) 2856 if (!fl)
2857 return NULL; 2857 return NULL;
2858 locks_init_lock(fl); 2858 locks_init_lock(fl);
2859 fl->fl_lmops = &nfsd_lease_mng_ops; 2859 fl->fl_lmops = &nfsd_lease_mng_ops;
2860 fl->fl_flags = FL_LEASE; 2860 fl->fl_flags = FL_LEASE;
2861 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 2861 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2862 fl->fl_end = OFFSET_MAX; 2862 fl->fl_end = OFFSET_MAX;
2863 fl->fl_owner = (fl_owner_t)(dp->dl_file); 2863 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2864 fl->fl_pid = current->tgid; 2864 fl->fl_pid = current->tgid;
2865 return fl; 2865 return fl;
2866 } 2866 }
2867 2867
2868 static int nfs4_setlease(struct nfs4_delegation *dp, int flag) 2868 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2869 { 2869 {
2870 struct nfs4_file *fp = dp->dl_file; 2870 struct nfs4_file *fp = dp->dl_file;
2871 struct file_lock *fl; 2871 struct file_lock *fl;
2872 int status; 2872 int status;
2873 2873
2874 fl = nfs4_alloc_init_lease(dp, flag); 2874 fl = nfs4_alloc_init_lease(dp, flag);
2875 if (!fl) 2875 if (!fl)
2876 return -ENOMEM; 2876 return -ENOMEM;
2877 fl->fl_file = find_readable_file(fp); 2877 fl->fl_file = find_readable_file(fp);
2878 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2878 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2879 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); 2879 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2880 if (status) { 2880 if (status) {
2881 list_del_init(&dp->dl_perclnt); 2881 list_del_init(&dp->dl_perclnt);
2882 locks_free_lock(fl); 2882 locks_free_lock(fl);
2883 return -ENOMEM; 2883 return -ENOMEM;
2884 } 2884 }
2885 fp->fi_lease = fl; 2885 fp->fi_lease = fl;
2886 fp->fi_deleg_file = get_file(fl->fl_file); 2886 fp->fi_deleg_file = get_file(fl->fl_file);
2887 atomic_set(&fp->fi_delegees, 1); 2887 atomic_set(&fp->fi_delegees, 1);
2888 list_add(&dp->dl_perfile, &fp->fi_delegations); 2888 list_add(&dp->dl_perfile, &fp->fi_delegations);
2889 return 0; 2889 return 0;
2890 } 2890 }
2891 2891
2892 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) 2892 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2893 { 2893 {
2894 struct nfs4_file *fp = dp->dl_file; 2894 struct nfs4_file *fp = dp->dl_file;
2895 2895
2896 if (!fp->fi_lease) 2896 if (!fp->fi_lease)
2897 return nfs4_setlease(dp, flag); 2897 return nfs4_setlease(dp, flag);
2898 spin_lock(&recall_lock); 2898 spin_lock(&recall_lock);
2899 if (fp->fi_had_conflict) { 2899 if (fp->fi_had_conflict) {
2900 spin_unlock(&recall_lock); 2900 spin_unlock(&recall_lock);
2901 return -EAGAIN; 2901 return -EAGAIN;
2902 } 2902 }
2903 atomic_inc(&fp->fi_delegees); 2903 atomic_inc(&fp->fi_delegees);
2904 list_add(&dp->dl_perfile, &fp->fi_delegations); 2904 list_add(&dp->dl_perfile, &fp->fi_delegations);
2905 spin_unlock(&recall_lock); 2905 spin_unlock(&recall_lock);
2906 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 2906 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2907 return 0; 2907 return 0;
2908 } 2908 }
2909 2909
2910 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 2910 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2911 { 2911 {
2912 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 2912 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2913 if (status == -EAGAIN) 2913 if (status == -EAGAIN)
2914 open->op_why_no_deleg = WND4_CONTENTION; 2914 open->op_why_no_deleg = WND4_CONTENTION;
2915 else { 2915 else {
2916 open->op_why_no_deleg = WND4_RESOURCE; 2916 open->op_why_no_deleg = WND4_RESOURCE;
2917 switch (open->op_deleg_want) { 2917 switch (open->op_deleg_want) {
2918 case NFS4_SHARE_WANT_READ_DELEG: 2918 case NFS4_SHARE_WANT_READ_DELEG:
2919 case NFS4_SHARE_WANT_WRITE_DELEG: 2919 case NFS4_SHARE_WANT_WRITE_DELEG:
2920 case NFS4_SHARE_WANT_ANY_DELEG: 2920 case NFS4_SHARE_WANT_ANY_DELEG:
2921 break; 2921 break;
2922 case NFS4_SHARE_WANT_CANCEL: 2922 case NFS4_SHARE_WANT_CANCEL:
2923 open->op_why_no_deleg = WND4_CANCELLED; 2923 open->op_why_no_deleg = WND4_CANCELLED;
2924 break; 2924 break;
2925 case NFS4_SHARE_WANT_NO_DELEG: 2925 case NFS4_SHARE_WANT_NO_DELEG:
2926 WARN_ON_ONCE(1); 2926 WARN_ON_ONCE(1);
2927 } 2927 }
2928 } 2928 }
2929 } 2929 }
2930 2930
2931 /* 2931 /*
2932 * Attempt to hand out a delegation. 2932 * Attempt to hand out a delegation.
2933 */ 2933 */
2934 static void 2934 static void
2935 nfs4_open_delegation(struct net *net, struct svc_fh *fh, 2935 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
2936 struct nfsd4_open *open, struct nfs4_ol_stateid *stp) 2936 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2937 { 2937 {
2938 struct nfs4_delegation *dp; 2938 struct nfs4_delegation *dp;
2939 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); 2939 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2940 int cb_up; 2940 int cb_up;
2941 int status = 0, flag = 0; 2941 int status = 0, flag = 0;
2942 2942
2943 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 2943 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2944 flag = NFS4_OPEN_DELEGATE_NONE; 2944 flag = NFS4_OPEN_DELEGATE_NONE;
2945 open->op_recall = 0; 2945 open->op_recall = 0;
2946 switch (open->op_claim_type) { 2946 switch (open->op_claim_type) {
2947 case NFS4_OPEN_CLAIM_PREVIOUS: 2947 case NFS4_OPEN_CLAIM_PREVIOUS:
2948 if (!cb_up) 2948 if (!cb_up)
2949 open->op_recall = 1; 2949 open->op_recall = 1;
2950 flag = open->op_delegate_type; 2950 flag = open->op_delegate_type;
2951 if (flag == NFS4_OPEN_DELEGATE_NONE) 2951 if (flag == NFS4_OPEN_DELEGATE_NONE)
2952 goto out; 2952 goto out;
2953 break; 2953 break;
2954 case NFS4_OPEN_CLAIM_NULL: 2954 case NFS4_OPEN_CLAIM_NULL:
2955 /* Let's not give out any delegations till everyone's 2955 /* Let's not give out any delegations till everyone's
2956 * had the chance to reclaim theirs.... */ 2956 * had the chance to reclaim theirs.... */
2957 if (locks_in_grace(net)) 2957 if (locks_in_grace(net))
2958 goto out; 2958 goto out;
2959 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 2959 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2960 goto out; 2960 goto out;
2961 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 2961 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2962 flag = NFS4_OPEN_DELEGATE_WRITE; 2962 flag = NFS4_OPEN_DELEGATE_WRITE;
2963 else 2963 else
2964 flag = NFS4_OPEN_DELEGATE_READ; 2964 flag = NFS4_OPEN_DELEGATE_READ;
2965 break; 2965 break;
2966 default: 2966 default:
2967 goto out; 2967 goto out;
2968 } 2968 }
2969 2969
2970 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag); 2970 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2971 if (dp == NULL) 2971 if (dp == NULL)
2972 goto out_no_deleg; 2972 goto out_no_deleg;
2973 status = nfs4_set_delegation(dp, flag); 2973 status = nfs4_set_delegation(dp, flag);
2974 if (status) 2974 if (status)
2975 goto out_free; 2975 goto out_free;
2976 2976
2977 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 2977 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2978 2978
2979 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 2979 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2980 STATEID_VAL(&dp->dl_stid.sc_stateid)); 2980 STATEID_VAL(&dp->dl_stid.sc_stateid));
2981 out: 2981 out:
2982 open->op_delegate_type = flag; 2982 open->op_delegate_type = flag;
2983 if (flag == NFS4_OPEN_DELEGATE_NONE) { 2983 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2984 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 2984 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2985 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 2985 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2986 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 2986 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2987 2987
2988 /* 4.1 client asking for a delegation? */ 2988 /* 4.1 client asking for a delegation? */
2989 if (open->op_deleg_want) 2989 if (open->op_deleg_want)
2990 nfsd4_open_deleg_none_ext(open, status); 2990 nfsd4_open_deleg_none_ext(open, status);
2991 } 2991 }
2992 return; 2992 return;
2993 out_free: 2993 out_free:
2994 nfs4_put_delegation(dp); 2994 nfs4_put_delegation(dp);
2995 out_no_deleg: 2995 out_no_deleg:
2996 flag = NFS4_OPEN_DELEGATE_NONE; 2996 flag = NFS4_OPEN_DELEGATE_NONE;
2997 goto out; 2997 goto out;
2998 } 2998 }
2999 2999
3000 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 3000 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3001 struct nfs4_delegation *dp) 3001 struct nfs4_delegation *dp)
3002 { 3002 {
3003 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && 3003 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3004 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 3004 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3005 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3005 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3006 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 3006 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3007 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && 3007 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3008 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { 3008 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3009 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3009 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3010 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 3010 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3011 } 3011 }
3012 /* Otherwise the client must be confused wanting a delegation 3012 /* Otherwise the client must be confused wanting a delegation
3013 * it already has, therefore we don't return 3013 * it already has, therefore we don't return
3014 * NFS4_OPEN_DELEGATE_NONE_EXT and reason. 3014 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3015 */ 3015 */
3016 } 3016 }
3017 3017
3018 /* 3018 /*
3019 * called with nfs4_lock_state() held. 3019 * called with nfs4_lock_state() held.
3020 */ 3020 */
3021 __be32 3021 __be32
3022 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 3022 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3023 { 3023 {
3024 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3024 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3025 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 3025 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3026 struct nfs4_file *fp = NULL; 3026 struct nfs4_file *fp = NULL;
3027 struct inode *ino = current_fh->fh_dentry->d_inode; 3027 struct inode *ino = current_fh->fh_dentry->d_inode;
3028 struct nfs4_ol_stateid *stp = NULL; 3028 struct nfs4_ol_stateid *stp = NULL;
3029 struct nfs4_delegation *dp = NULL; 3029 struct nfs4_delegation *dp = NULL;
3030 __be32 status; 3030 __be32 status;
3031 3031
3032 /* 3032 /*
3033 * Lookup file; if found, lookup stateid and check open request, 3033 * Lookup file; if found, lookup stateid and check open request,
3034 * and check for delegations in the process of being recalled. 3034 * and check for delegations in the process of being recalled.
3035 * If not found, create the nfs4_file struct 3035 * If not found, create the nfs4_file struct
3036 */ 3036 */
3037 fp = find_file(ino); 3037 fp = find_file(ino);
3038 if (fp) { 3038 if (fp) {
3039 if ((status = nfs4_check_open(fp, open, &stp))) 3039 if ((status = nfs4_check_open(fp, open, &stp)))
3040 goto out; 3040 goto out;
3041 status = nfs4_check_deleg(cl, fp, open, &dp); 3041 status = nfs4_check_deleg(cl, fp, open, &dp);
3042 if (status) 3042 if (status)
3043 goto out; 3043 goto out;
3044 } else { 3044 } else {
3045 status = nfserr_bad_stateid; 3045 status = nfserr_bad_stateid;
3046 if (nfsd4_is_deleg_cur(open)) 3046 if (nfsd4_is_deleg_cur(open))
3047 goto out; 3047 goto out;
3048 status = nfserr_jukebox; 3048 status = nfserr_jukebox;
3049 fp = open->op_file; 3049 fp = open->op_file;
3050 open->op_file = NULL; 3050 open->op_file = NULL;
3051 nfsd4_init_file(fp, ino); 3051 nfsd4_init_file(fp, ino);
3052 } 3052 }
3053 3053
3054 /* 3054 /*
3055 * OPEN the file, or upgrade an existing OPEN. 3055 * OPEN the file, or upgrade an existing OPEN.
3056 * If truncate fails, the OPEN fails. 3056 * If truncate fails, the OPEN fails.
3057 */ 3057 */
3058 if (stp) { 3058 if (stp) {
3059 /* Stateid was found, this is an OPEN upgrade */ 3059 /* Stateid was found, this is an OPEN upgrade */
3060 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 3060 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3061 if (status) 3061 if (status)
3062 goto out; 3062 goto out;
3063 } else { 3063 } else {
3064 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open); 3064 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3065 if (status) 3065 if (status)
3066 goto out; 3066 goto out;
3067 status = nfsd4_truncate(rqstp, current_fh, open); 3067 status = nfsd4_truncate(rqstp, current_fh, open);
3068 if (status) 3068 if (status)
3069 goto out; 3069 goto out;
3070 stp = open->op_stp; 3070 stp = open->op_stp;
3071 open->op_stp = NULL; 3071 open->op_stp = NULL;
3072 init_open_stateid(stp, fp, open); 3072 init_open_stateid(stp, fp, open);
3073 } 3073 }
3074 update_stateid(&stp->st_stid.sc_stateid); 3074 update_stateid(&stp->st_stid.sc_stateid);
3075 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3075 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3076 3076
3077 if (nfsd4_has_session(&resp->cstate)) { 3077 if (nfsd4_has_session(&resp->cstate)) {
3078 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3078 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3079 3079
3080 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 3080 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3081 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 3081 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3082 open->op_why_no_deleg = WND4_NOT_WANTED; 3082 open->op_why_no_deleg = WND4_NOT_WANTED;
3083 goto nodeleg; 3083 goto nodeleg;
3084 } 3084 }
3085 } 3085 }
3086 3086
3087 /* 3087 /*
3088 * Attempt to hand out a delegation. No error return, because the 3088 * Attempt to hand out a delegation. No error return, because the
3089 * OPEN succeeds even if we fail. 3089 * OPEN succeeds even if we fail.
3090 */ 3090 */
3091 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp); 3091 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3092 nodeleg: 3092 nodeleg:
3093 status = nfs_ok; 3093 status = nfs_ok;
3094 3094
3095 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 3095 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3096 STATEID_VAL(&stp->st_stid.sc_stateid)); 3096 STATEID_VAL(&stp->st_stid.sc_stateid));
3097 out: 3097 out:
3098 /* 4.1 client trying to upgrade/downgrade delegation? */ 3098 /* 4.1 client trying to upgrade/downgrade delegation? */
3099 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && 3099 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3100 open->op_deleg_want) 3100 open->op_deleg_want)
3101 nfsd4_deleg_xgrade_none_ext(open, dp); 3101 nfsd4_deleg_xgrade_none_ext(open, dp);
3102 3102
3103 if (fp) 3103 if (fp)
3104 put_nfs4_file(fp); 3104 put_nfs4_file(fp);
3105 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 3105 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3106 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate)); 3106 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3107 /* 3107 /*
3108 * To finish the open response, we just need to set the rflags. 3108 * To finish the open response, we just need to set the rflags.
3109 */ 3109 */
3110 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 3110 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3111 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && 3111 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3112 !nfsd4_has_session(&resp->cstate)) 3112 !nfsd4_has_session(&resp->cstate))
3113 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 3113 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3114 3114
3115 return status; 3115 return status;
3116 } 3116 }
3117 3117
3118 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) 3118 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3119 { 3119 {
3120 if (open->op_openowner) { 3120 if (open->op_openowner) {
3121 struct nfs4_openowner *oo = open->op_openowner; 3121 struct nfs4_openowner *oo = open->op_openowner;
3122 3122
3123 if (!list_empty(&oo->oo_owner.so_stateids)) 3123 if (!list_empty(&oo->oo_owner.so_stateids))
3124 list_del_init(&oo->oo_close_lru); 3124 list_del_init(&oo->oo_close_lru);
3125 if (oo->oo_flags & NFS4_OO_NEW) { 3125 if (oo->oo_flags & NFS4_OO_NEW) {
3126 if (status) { 3126 if (status) {
3127 release_openowner(oo); 3127 release_openowner(oo);
3128 open->op_openowner = NULL; 3128 open->op_openowner = NULL;
3129 } else 3129 } else
3130 oo->oo_flags &= ~NFS4_OO_NEW; 3130 oo->oo_flags &= ~NFS4_OO_NEW;
3131 } 3131 }
3132 } 3132 }
3133 if (open->op_file) 3133 if (open->op_file)
3134 nfsd4_free_file(open->op_file); 3134 nfsd4_free_file(open->op_file);
3135 if (open->op_stp) 3135 if (open->op_stp)
3136 free_generic_stateid(open->op_stp); 3136 free_generic_stateid(open->op_stp);
3137 } 3137 }
3138 3138
3139 static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp) 3139 static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
3140 { 3140 {
3141 struct nfs4_client *found; 3141 struct nfs4_client *found;
3142 3142
3143 if (STALE_CLIENTID(clid, nn)) 3143 if (STALE_CLIENTID(clid, nn))
3144 return nfserr_stale_clientid; 3144 return nfserr_stale_clientid;
3145 found = find_confirmed_client(clid, session, nn); 3145 found = find_confirmed_client(clid, session, nn);
3146 if (clp) 3146 if (clp)
3147 *clp = found; 3147 *clp = found;
3148 return found ? nfs_ok : nfserr_expired; 3148 return found ? nfs_ok : nfserr_expired;
3149 } 3149 }
3150 3150
3151 __be32 3151 __be32
3152 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3152 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3153 clientid_t *clid) 3153 clientid_t *clid)
3154 { 3154 {
3155 struct nfs4_client *clp; 3155 struct nfs4_client *clp;
3156 __be32 status; 3156 __be32 status;
3157 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3157 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3158 3158
3159 nfs4_lock_state(); 3159 nfs4_lock_state();
3160 dprintk("process_renew(%08x/%08x): starting\n", 3160 dprintk("process_renew(%08x/%08x): starting\n",
3161 clid->cl_boot, clid->cl_id); 3161 clid->cl_boot, clid->cl_id);
3162 status = lookup_clientid(clid, cstate->minorversion, nn, &clp); 3162 status = lookup_clientid(clid, cstate->minorversion, nn, &clp);
3163 if (status) 3163 if (status)
3164 goto out; 3164 goto out;
3165 status = nfserr_cb_path_down; 3165 status = nfserr_cb_path_down;
3166 if (!list_empty(&clp->cl_delegations) 3166 if (!list_empty(&clp->cl_delegations)
3167 && clp->cl_cb_state != NFSD4_CB_UP) 3167 && clp->cl_cb_state != NFSD4_CB_UP)
3168 goto out; 3168 goto out;
3169 status = nfs_ok; 3169 status = nfs_ok;
3170 out: 3170 out:
3171 nfs4_unlock_state(); 3171 nfs4_unlock_state();
3172 return status; 3172 return status;
3173 } 3173 }
3174 3174
3175 static void 3175 static void
3176 nfsd4_end_grace(struct nfsd_net *nn) 3176 nfsd4_end_grace(struct nfsd_net *nn)
3177 { 3177 {
3178 /* do nothing if grace period already ended */ 3178 /* do nothing if grace period already ended */
3179 if (nn->grace_ended) 3179 if (nn->grace_ended)
3180 return; 3180 return;
3181 3181
3182 dprintk("NFSD: end of grace period\n"); 3182 dprintk("NFSD: end of grace period\n");
3183 nn->grace_ended = true; 3183 nn->grace_ended = true;
3184 nfsd4_record_grace_done(nn, nn->boot_time); 3184 nfsd4_record_grace_done(nn, nn->boot_time);
3185 locks_end_grace(&nn->nfsd4_manager); 3185 locks_end_grace(&nn->nfsd4_manager);
3186 /* 3186 /*
3187 * Now that every NFSv4 client has had the chance to recover and 3187 * Now that every NFSv4 client has had the chance to recover and
3188 * to see the (possibly new, possibly shorter) lease time, we 3188 * to see the (possibly new, possibly shorter) lease time, we
3189 * can safely set the next grace time to the current lease time: 3189 * can safely set the next grace time to the current lease time:
3190 */ 3190 */
3191 nn->nfsd4_grace = nn->nfsd4_lease; 3191 nn->nfsd4_grace = nn->nfsd4_lease;
3192 } 3192 }
3193 3193
3194 static time_t 3194 static time_t
3195 nfs4_laundromat(struct nfsd_net *nn) 3195 nfs4_laundromat(struct nfsd_net *nn)
3196 { 3196 {
3197 struct nfs4_client *clp; 3197 struct nfs4_client *clp;
3198 struct nfs4_openowner *oo; 3198 struct nfs4_openowner *oo;
3199 struct nfs4_delegation *dp; 3199 struct nfs4_delegation *dp;
3200 struct list_head *pos, *next, reaplist; 3200 struct list_head *pos, *next, reaplist;
3201 time_t cutoff = get_seconds() - nn->nfsd4_lease; 3201 time_t cutoff = get_seconds() - nn->nfsd4_lease;
3202 time_t t, clientid_val = nn->nfsd4_lease; 3202 time_t t, clientid_val = nn->nfsd4_lease;
3203 time_t u, test_val = nn->nfsd4_lease; 3203 time_t u, test_val = nn->nfsd4_lease;
3204 3204
3205 nfs4_lock_state(); 3205 nfs4_lock_state();
3206 3206
3207 dprintk("NFSD: laundromat service - starting\n"); 3207 dprintk("NFSD: laundromat service - starting\n");
3208 nfsd4_end_grace(nn); 3208 nfsd4_end_grace(nn);
3209 INIT_LIST_HEAD(&reaplist); 3209 INIT_LIST_HEAD(&reaplist);
3210 spin_lock(&nn->client_lock); 3210 spin_lock(&nn->client_lock);
3211 list_for_each_safe(pos, next, &nn->client_lru) { 3211 list_for_each_safe(pos, next, &nn->client_lru) {
3212 clp = list_entry(pos, struct nfs4_client, cl_lru); 3212 clp = list_entry(pos, struct nfs4_client, cl_lru);
3213 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) { 3213 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3214 t = clp->cl_time - cutoff; 3214 t = clp->cl_time - cutoff;
3215 if (clientid_val > t) 3215 if (clientid_val > t)
3216 clientid_val = t; 3216 clientid_val = t;
3217 break; 3217 break;
3218 } 3218 }
3219 if (atomic_read(&clp->cl_refcount)) { 3219 if (atomic_read(&clp->cl_refcount)) {
3220 dprintk("NFSD: client in use (clientid %08x)\n", 3220 dprintk("NFSD: client in use (clientid %08x)\n",
3221 clp->cl_clientid.cl_id); 3221 clp->cl_clientid.cl_id);
3222 continue; 3222 continue;
3223 } 3223 }
3224 unhash_client_locked(clp); 3224 unhash_client_locked(clp);
3225 list_add(&clp->cl_lru, &reaplist); 3225 list_add(&clp->cl_lru, &reaplist);
3226 } 3226 }
3227 spin_unlock(&nn->client_lock); 3227 spin_unlock(&nn->client_lock);
3228 list_for_each_safe(pos, next, &reaplist) { 3228 list_for_each_safe(pos, next, &reaplist) {
3229 clp = list_entry(pos, struct nfs4_client, cl_lru); 3229 clp = list_entry(pos, struct nfs4_client, cl_lru);
3230 dprintk("NFSD: purging unused client (clientid %08x)\n", 3230 dprintk("NFSD: purging unused client (clientid %08x)\n",
3231 clp->cl_clientid.cl_id); 3231 clp->cl_clientid.cl_id);
3232 expire_client(clp); 3232 expire_client(clp);
3233 } 3233 }
3234 spin_lock(&recall_lock); 3234 spin_lock(&recall_lock);
3235 list_for_each_safe(pos, next, &del_recall_lru) { 3235 list_for_each_safe(pos, next, &del_recall_lru) {
3236 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3236 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3237 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) 3237 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3238 continue; 3238 continue;
3239 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) { 3239 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3240 u = dp->dl_time - cutoff; 3240 u = dp->dl_time - cutoff;
3241 if (test_val > u) 3241 if (test_val > u)
3242 test_val = u; 3242 test_val = u;
3243 break; 3243 break;
3244 } 3244 }
3245 list_move(&dp->dl_recall_lru, &reaplist); 3245 list_move(&dp->dl_recall_lru, &reaplist);
3246 } 3246 }
3247 spin_unlock(&recall_lock); 3247 spin_unlock(&recall_lock);
3248 list_for_each_safe(pos, next, &reaplist) { 3248 list_for_each_safe(pos, next, &reaplist) {
3249 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 3249 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3250 unhash_delegation(dp); 3250 unhash_delegation(dp);
3251 } 3251 }
3252 test_val = nn->nfsd4_lease; 3252 test_val = nn->nfsd4_lease;
3253 list_for_each_safe(pos, next, &nn->close_lru) { 3253 list_for_each_safe(pos, next, &nn->close_lru) {
3254 oo = container_of(pos, struct nfs4_openowner, oo_close_lru); 3254 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3255 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { 3255 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3256 u = oo->oo_time - cutoff; 3256 u = oo->oo_time - cutoff;
3257 if (test_val > u) 3257 if (test_val > u)
3258 test_val = u; 3258 test_val = u;
3259 break; 3259 break;
3260 } 3260 }
3261 release_openowner(oo); 3261 release_openowner(oo);
3262 } 3262 }
3263 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT) 3263 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3264 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT; 3264 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3265 nfs4_unlock_state(); 3265 nfs4_unlock_state();
3266 return clientid_val; 3266 return clientid_val;
3267 } 3267 }
3268 3268
3269 static struct workqueue_struct *laundry_wq; 3269 static struct workqueue_struct *laundry_wq;
3270 static void laundromat_main(struct work_struct *); 3270 static void laundromat_main(struct work_struct *);
3271 3271
3272 static void 3272 static void
3273 laundromat_main(struct work_struct *laundry) 3273 laundromat_main(struct work_struct *laundry)
3274 { 3274 {
3275 time_t t; 3275 time_t t;
3276 struct delayed_work *dwork = container_of(laundry, struct delayed_work, 3276 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3277 work); 3277 work);
3278 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 3278 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3279 laundromat_work); 3279 laundromat_work);
3280 3280
3281 t = nfs4_laundromat(nn); 3281 t = nfs4_laundromat(nn);
3282 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t); 3282 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3283 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 3283 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3284 } 3284 }
3285 3285
3286 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 3286 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3287 { 3287 {
3288 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) 3288 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3289 return nfserr_bad_stateid; 3289 return nfserr_bad_stateid;
3290 return nfs_ok; 3290 return nfs_ok;
3291 } 3291 }
3292 3292
3293 static int 3293 static int
3294 STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn) 3294 STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
3295 { 3295 {
3296 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time) 3296 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
3297 return 0; 3297 return 0;
3298 dprintk("NFSD: stale stateid " STATEID_FMT "!\n", 3298 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3299 STATEID_VAL(stateid)); 3299 STATEID_VAL(stateid));
3300 return 1; 3300 return 1;
3301 } 3301 }
3302 3302
3303 static inline int 3303 static inline int
3304 access_permit_read(struct nfs4_ol_stateid *stp) 3304 access_permit_read(struct nfs4_ol_stateid *stp)
3305 { 3305 {
3306 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 3306 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3307 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 3307 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3308 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 3308 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3309 } 3309 }
3310 3310
3311 static inline int 3311 static inline int
3312 access_permit_write(struct nfs4_ol_stateid *stp) 3312 access_permit_write(struct nfs4_ol_stateid *stp)
3313 { 3313 {
3314 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 3314 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3315 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 3315 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3316 } 3316 }
3317 3317
3318 static 3318 static
3319 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 3319 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3320 { 3320 {
3321 __be32 status = nfserr_openmode; 3321 __be32 status = nfserr_openmode;
3322 3322
3323 /* For lock stateid's, we test the parent open, not the lock: */ 3323 /* For lock stateid's, we test the parent open, not the lock: */
3324 if (stp->st_openstp) 3324 if (stp->st_openstp)
3325 stp = stp->st_openstp; 3325 stp = stp->st_openstp;
3326 if ((flags & WR_STATE) && !access_permit_write(stp)) 3326 if ((flags & WR_STATE) && !access_permit_write(stp))
3327 goto out; 3327 goto out;
3328 if ((flags & RD_STATE) && !access_permit_read(stp)) 3328 if ((flags & RD_STATE) && !access_permit_read(stp))
3329 goto out; 3329 goto out;
3330 status = nfs_ok; 3330 status = nfs_ok;
3331 out: 3331 out:
3332 return status; 3332 return status;
3333 } 3333 }
3334 3334
3335 static inline __be32 3335 static inline __be32
3336 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 3336 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3337 { 3337 {
3338 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 3338 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3339 return nfs_ok; 3339 return nfs_ok;
3340 else if (locks_in_grace(net)) { 3340 else if (locks_in_grace(net)) {
3341 /* Answer in remaining cases depends on existence of 3341 /* Answer in remaining cases depends on existence of
3342 * conflicting state; so we must wait out the grace period. */ 3342 * conflicting state; so we must wait out the grace period. */
3343 return nfserr_grace; 3343 return nfserr_grace;
3344 } else if (flags & WR_STATE) 3344 } else if (flags & WR_STATE)
3345 return nfs4_share_conflict(current_fh, 3345 return nfs4_share_conflict(current_fh,
3346 NFS4_SHARE_DENY_WRITE); 3346 NFS4_SHARE_DENY_WRITE);
3347 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 3347 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3348 return nfs4_share_conflict(current_fh, 3348 return nfs4_share_conflict(current_fh,
3349 NFS4_SHARE_DENY_READ); 3349 NFS4_SHARE_DENY_READ);
3350 } 3350 }
3351 3351
3352 /* 3352 /*
3353 * Allow READ/WRITE during grace period on recovered state only for files 3353 * Allow READ/WRITE during grace period on recovered state only for files
3354 * that are not able to provide mandatory locking. 3354 * that are not able to provide mandatory locking.
3355 */ 3355 */
3356 static inline int 3356 static inline int
3357 grace_disallows_io(struct net *net, struct inode *inode) 3357 grace_disallows_io(struct net *net, struct inode *inode)
3358 { 3358 {
3359 return locks_in_grace(net) && mandatory_lock(inode); 3359 return locks_in_grace(net) && mandatory_lock(inode);
3360 } 3360 }
3361 3361
3362 /* Returns true iff a is later than b: */ 3362 /* Returns true iff a is later than b: */
3363 static bool stateid_generation_after(stateid_t *a, stateid_t *b) 3363 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3364 { 3364 {
3365 return (s32)a->si_generation - (s32)b->si_generation > 0; 3365 return (s32)a->si_generation - (s32)b->si_generation > 0;
3366 } 3366 }
3367 3367
3368 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 3368 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3369 { 3369 {
3370 /* 3370 /*
3371 * When sessions are used the stateid generation number is ignored 3371 * When sessions are used the stateid generation number is ignored
3372 * when it is zero. 3372 * when it is zero.
3373 */ 3373 */
3374 if (has_session && in->si_generation == 0) 3374 if (has_session && in->si_generation == 0)
3375 return nfs_ok; 3375 return nfs_ok;
3376 3376
3377 if (in->si_generation == ref->si_generation) 3377 if (in->si_generation == ref->si_generation)
3378 return nfs_ok; 3378 return nfs_ok;
3379 3379
3380 /* If the client sends us a stateid from the future, it's buggy: */ 3380 /* If the client sends us a stateid from the future, it's buggy: */
3381 if (stateid_generation_after(in, ref)) 3381 if (stateid_generation_after(in, ref))
3382 return nfserr_bad_stateid; 3382 return nfserr_bad_stateid;
3383 /* 3383 /*
3384 * However, we could see a stateid from the past, even from a 3384 * However, we could see a stateid from the past, even from a
3385 * non-buggy client. For example, if the client sends a lock 3385 * non-buggy client. For example, if the client sends a lock
3386 * while some IO is outstanding, the lock may bump si_generation 3386 * while some IO is outstanding, the lock may bump si_generation
3387 * while the IO is still in flight. The client could avoid that 3387 * while the IO is still in flight. The client could avoid that
3388 * situation by waiting for responses on all the IO requests, 3388 * situation by waiting for responses on all the IO requests,
3389 * but better performance may result in retrying IO that 3389 * but better performance may result in retrying IO that
3390 * receives an old_stateid error if requests are rarely 3390 * receives an old_stateid error if requests are rarely
3391 * reordered in flight: 3391 * reordered in flight:
3392 */ 3392 */
3393 return nfserr_old_stateid; 3393 return nfserr_old_stateid;
3394 } 3394 }
3395 3395
3396 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 3396 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3397 { 3397 {
3398 struct nfs4_stid *s; 3398 struct nfs4_stid *s;
3399 struct nfs4_ol_stateid *ols; 3399 struct nfs4_ol_stateid *ols;
3400 __be32 status; 3400 __be32 status;
3401 3401
3402 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3402 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3403 return nfserr_bad_stateid; 3403 return nfserr_bad_stateid;
3404 /* Client debugging aid. */ 3404 /* Client debugging aid. */
3405 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 3405 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3406 char addr_str[INET6_ADDRSTRLEN]; 3406 char addr_str[INET6_ADDRSTRLEN];
3407 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str, 3407 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3408 sizeof(addr_str)); 3408 sizeof(addr_str));
3409 pr_warn_ratelimited("NFSD: client %s testing state ID " 3409 pr_warn_ratelimited("NFSD: client %s testing state ID "
3410 "with incorrect client ID\n", addr_str); 3410 "with incorrect client ID\n", addr_str);
3411 return nfserr_bad_stateid; 3411 return nfserr_bad_stateid;
3412 } 3412 }
3413 s = find_stateid(cl, stateid); 3413 s = find_stateid(cl, stateid);
3414 if (!s) 3414 if (!s)
3415 return nfserr_bad_stateid; 3415 return nfserr_bad_stateid;
3416 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 3416 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3417 if (status) 3417 if (status)
3418 return status; 3418 return status;
3419 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID))) 3419 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3420 return nfs_ok; 3420 return nfs_ok;
3421 ols = openlockstateid(s); 3421 ols = openlockstateid(s);
3422 if (ols->st_stateowner->so_is_open_owner 3422 if (ols->st_stateowner->so_is_open_owner
3423 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3423 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3424 return nfserr_bad_stateid; 3424 return nfserr_bad_stateid;
3425 return nfs_ok; 3425 return nfs_ok;
3426 } 3426 }
3427 3427
3428 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, 3428 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
3429 struct nfs4_stid **s, bool sessions, 3429 struct nfs4_stid **s, bool sessions,
3430 struct nfsd_net *nn) 3430 struct nfsd_net *nn)
3431 { 3431 {
3432 struct nfs4_client *cl; 3432 struct nfs4_client *cl;
3433 3433
3434 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3434 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3435 return nfserr_bad_stateid; 3435 return nfserr_bad_stateid;
3436 if (STALE_STATEID(stateid, nn)) 3436 if (STALE_STATEID(stateid, nn))
3437 return nfserr_stale_stateid; 3437 return nfserr_stale_stateid;
3438 cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn); 3438 cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn);
3439 if (!cl) 3439 if (!cl)
3440 return nfserr_expired; 3440 return nfserr_expired;
3441 *s = find_stateid_by_type(cl, stateid, typemask); 3441 *s = find_stateid_by_type(cl, stateid, typemask);
3442 if (!*s) 3442 if (!*s)
3443 return nfserr_bad_stateid; 3443 return nfserr_bad_stateid;
3444 return nfs_ok; 3444 return nfs_ok;
3445 3445
3446 } 3446 }
3447 3447
3448 /* 3448 /*
3449 * Checks for stateid operations 3449 * Checks for stateid operations
3450 */ 3450 */
3451 __be32 3451 __be32
3452 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, 3452 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3453 stateid_t *stateid, int flags, struct file **filpp) 3453 stateid_t *stateid, int flags, struct file **filpp)
3454 { 3454 {
3455 struct nfs4_stid *s; 3455 struct nfs4_stid *s;
3456 struct nfs4_ol_stateid *stp = NULL; 3456 struct nfs4_ol_stateid *stp = NULL;
3457 struct nfs4_delegation *dp = NULL; 3457 struct nfs4_delegation *dp = NULL;
3458 struct svc_fh *current_fh = &cstate->current_fh; 3458 struct svc_fh *current_fh = &cstate->current_fh;
3459 struct inode *ino = current_fh->fh_dentry->d_inode; 3459 struct inode *ino = current_fh->fh_dentry->d_inode;
3460 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3460 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3461 __be32 status; 3461 __be32 status;
3462 3462
3463 if (filpp) 3463 if (filpp)
3464 *filpp = NULL; 3464 *filpp = NULL;
3465 3465
3466 if (grace_disallows_io(net, ino)) 3466 if (grace_disallows_io(net, ino))
3467 return nfserr_grace; 3467 return nfserr_grace;
3468 3468
3469 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 3469 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3470 return check_special_stateids(net, current_fh, stateid, flags); 3470 return check_special_stateids(net, current_fh, stateid, flags);
3471 3471
3472 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 3472 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3473 &s, cstate->minorversion, nn); 3473 &s, cstate->minorversion, nn);
3474 if (status) 3474 if (status)
3475 return status; 3475 return status;
3476 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); 3476 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3477 if (status) 3477 if (status)
3478 goto out; 3478 goto out;
3479 switch (s->sc_type) { 3479 switch (s->sc_type) {
3480 case NFS4_DELEG_STID: 3480 case NFS4_DELEG_STID:
3481 dp = delegstateid(s); 3481 dp = delegstateid(s);
3482 status = nfs4_check_delegmode(dp, flags); 3482 status = nfs4_check_delegmode(dp, flags);
3483 if (status) 3483 if (status)
3484 goto out; 3484 goto out;
3485 if (filpp) { 3485 if (filpp) {
3486 *filpp = dp->dl_file->fi_deleg_file; 3486 *filpp = dp->dl_file->fi_deleg_file;
3487 if (!*filpp) { 3487 if (!*filpp) {
3488 WARN_ON_ONCE(1); 3488 WARN_ON_ONCE(1);
3489 status = nfserr_serverfault; 3489 status = nfserr_serverfault;
3490 goto out; 3490 goto out;
3491 } 3491 }
3492 } 3492 }
3493 break; 3493 break;
3494 case NFS4_OPEN_STID: 3494 case NFS4_OPEN_STID:
3495 case NFS4_LOCK_STID: 3495 case NFS4_LOCK_STID:
3496 stp = openlockstateid(s); 3496 stp = openlockstateid(s);
3497 status = nfs4_check_fh(current_fh, stp); 3497 status = nfs4_check_fh(current_fh, stp);
3498 if (status) 3498 if (status)
3499 goto out; 3499 goto out;
3500 if (stp->st_stateowner->so_is_open_owner 3500 if (stp->st_stateowner->so_is_open_owner
3501 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 3501 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3502 goto out; 3502 goto out;
3503 status = nfs4_check_openmode(stp, flags); 3503 status = nfs4_check_openmode(stp, flags);
3504 if (status) 3504 if (status)
3505 goto out; 3505 goto out;
3506 if (filpp) { 3506 if (filpp) {
3507 if (flags & RD_STATE) 3507 if (flags & RD_STATE)
3508 *filpp = find_readable_file(stp->st_file); 3508 *filpp = find_readable_file(stp->st_file);
3509 else 3509 else
3510 *filpp = find_writeable_file(stp->st_file); 3510 *filpp = find_writeable_file(stp->st_file);
3511 } 3511 }
3512 break; 3512 break;
3513 default: 3513 default:
3514 return nfserr_bad_stateid; 3514 return nfserr_bad_stateid;
3515 } 3515 }
3516 status = nfs_ok; 3516 status = nfs_ok;
3517 out: 3517 out:
3518 return status; 3518 return status;
3519 } 3519 }
3520 3520
3521 static __be32 3521 static __be32
3522 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3522 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3523 { 3523 {
3524 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3524 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3525 return nfserr_locks_held; 3525 return nfserr_locks_held;
3526 release_lock_stateid(stp); 3526 release_lock_stateid(stp);
3527 return nfs_ok; 3527 return nfs_ok;
3528 } 3528 }
3529 3529
3530 /* 3530 /*
3531 * Test if the stateid is valid 3531 * Test if the stateid is valid
3532 */ 3532 */
3533 __be32 3533 __be32
3534 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3534 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3535 struct nfsd4_test_stateid *test_stateid) 3535 struct nfsd4_test_stateid *test_stateid)
3536 { 3536 {
3537 struct nfsd4_test_stateid_id *stateid; 3537 struct nfsd4_test_stateid_id *stateid;
3538 struct nfs4_client *cl = cstate->session->se_client; 3538 struct nfs4_client *cl = cstate->session->se_client;
3539 3539
3540 nfs4_lock_state(); 3540 nfs4_lock_state();
3541 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 3541 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3542 stateid->ts_id_status = 3542 stateid->ts_id_status =
3543 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 3543 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3544 nfs4_unlock_state(); 3544 nfs4_unlock_state();
3545 3545
3546 return nfs_ok; 3546 return nfs_ok;
3547 } 3547 }
3548 3548
3549 __be32 3549 __be32
3550 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3550 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3551 struct nfsd4_free_stateid *free_stateid) 3551 struct nfsd4_free_stateid *free_stateid)
3552 { 3552 {
3553 stateid_t *stateid = &free_stateid->fr_stateid; 3553 stateid_t *stateid = &free_stateid->fr_stateid;
3554 struct nfs4_stid *s; 3554 struct nfs4_stid *s;
3555 struct nfs4_client *cl = cstate->session->se_client; 3555 struct nfs4_client *cl = cstate->session->se_client;
3556 __be32 ret = nfserr_bad_stateid; 3556 __be32 ret = nfserr_bad_stateid;
3557 3557
3558 nfs4_lock_state(); 3558 nfs4_lock_state();
3559 s = find_stateid(cl, stateid); 3559 s = find_stateid(cl, stateid);
3560 if (!s) 3560 if (!s)
3561 goto out; 3561 goto out;
3562 switch (s->sc_type) { 3562 switch (s->sc_type) {
3563 case NFS4_DELEG_STID: 3563 case NFS4_DELEG_STID:
3564 ret = nfserr_locks_held; 3564 ret = nfserr_locks_held;
3565 goto out; 3565 goto out;
3566 case NFS4_OPEN_STID: 3566 case NFS4_OPEN_STID:
3567 case NFS4_LOCK_STID: 3567 case NFS4_LOCK_STID:
3568 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 3568 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3569 if (ret) 3569 if (ret)
3570 goto out; 3570 goto out;
3571 if (s->sc_type == NFS4_LOCK_STID) 3571 if (s->sc_type == NFS4_LOCK_STID)
3572 ret = nfsd4_free_lock_stateid(openlockstateid(s)); 3572 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3573 else 3573 else
3574 ret = nfserr_locks_held; 3574 ret = nfserr_locks_held;
3575 break; 3575 break;
3576 default: 3576 default:
3577 ret = nfserr_bad_stateid; 3577 ret = nfserr_bad_stateid;
3578 } 3578 }
3579 out: 3579 out:
3580 nfs4_unlock_state(); 3580 nfs4_unlock_state();
3581 return ret; 3581 return ret;
3582 } 3582 }
3583 3583
3584 static inline int 3584 static inline int
3585 setlkflg (int type) 3585 setlkflg (int type)
3586 { 3586 {
3587 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 3587 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3588 RD_STATE : WR_STATE; 3588 RD_STATE : WR_STATE;
3589 } 3589 }
3590 3590
3591 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 3591 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3592 { 3592 {
3593 struct svc_fh *current_fh = &cstate->current_fh; 3593 struct svc_fh *current_fh = &cstate->current_fh;
3594 struct nfs4_stateowner *sop = stp->st_stateowner; 3594 struct nfs4_stateowner *sop = stp->st_stateowner;
3595 __be32 status; 3595 __be32 status;
3596 3596
3597 status = nfsd4_check_seqid(cstate, sop, seqid); 3597 status = nfsd4_check_seqid(cstate, sop, seqid);
3598 if (status) 3598 if (status)
3599 return status; 3599 return status;
3600 if (stp->st_stid.sc_type == NFS4_CLOSED_STID) 3600 if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3601 /* 3601 /*
3602 * "Closed" stateid's exist *only* to return 3602 * "Closed" stateid's exist *only* to return
3603 * nfserr_replay_me from the previous step. 3603 * nfserr_replay_me from the previous step.
3604 */ 3604 */
3605 return nfserr_bad_stateid; 3605 return nfserr_bad_stateid;
3606 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 3606 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3607 if (status) 3607 if (status)
3608 return status; 3608 return status;
3609 return nfs4_check_fh(current_fh, stp); 3609 return nfs4_check_fh(current_fh, stp);
3610 } 3610 }
3611 3611
3612 /* 3612 /*
3613 * Checks for sequence id mutating operations. 3613 * Checks for sequence id mutating operations.
3614 */ 3614 */
3615 static __be32 3615 static __be32
3616 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3616 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3617 stateid_t *stateid, char typemask, 3617 stateid_t *stateid, char typemask,
3618 struct nfs4_ol_stateid **stpp, 3618 struct nfs4_ol_stateid **stpp,
3619 struct nfsd_net *nn) 3619 struct nfsd_net *nn)
3620 { 3620 {
3621 __be32 status; 3621 __be32 status;
3622 struct nfs4_stid *s; 3622 struct nfs4_stid *s;
3623 3623
3624 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 3624 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3625 seqid, STATEID_VAL(stateid)); 3625 seqid, STATEID_VAL(stateid));
3626 3626
3627 *stpp = NULL; 3627 *stpp = NULL;
3628 status = nfsd4_lookup_stateid(stateid, typemask, &s, 3628 status = nfsd4_lookup_stateid(stateid, typemask, &s,
3629 cstate->minorversion, nn); 3629 cstate->minorversion, nn);
3630 if (status) 3630 if (status)
3631 return status; 3631 return status;
3632 *stpp = openlockstateid(s); 3632 *stpp = openlockstateid(s);
3633 cstate->replay_owner = (*stpp)->st_stateowner; 3633 cstate->replay_owner = (*stpp)->st_stateowner;
3634 3634
3635 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); 3635 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3636 } 3636 }
3637 3637
3638 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 3638 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3639 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 3639 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
3640 { 3640 {
3641 __be32 status; 3641 __be32 status;
3642 struct nfs4_openowner *oo; 3642 struct nfs4_openowner *oo;
3643 3643
3644 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 3644 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3645 NFS4_OPEN_STID, stpp, nn); 3645 NFS4_OPEN_STID, stpp, nn);
3646 if (status) 3646 if (status)
3647 return status; 3647 return status;
3648 oo = openowner((*stpp)->st_stateowner); 3648 oo = openowner((*stpp)->st_stateowner);
3649 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) 3649 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3650 return nfserr_bad_stateid; 3650 return nfserr_bad_stateid;
3651 return nfs_ok; 3651 return nfs_ok;
3652 } 3652 }
3653 3653
3654 __be32 3654 __be32
3655 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3655 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3656 struct nfsd4_open_confirm *oc) 3656 struct nfsd4_open_confirm *oc)
3657 { 3657 {
3658 __be32 status; 3658 __be32 status;
3659 struct nfs4_openowner *oo; 3659 struct nfs4_openowner *oo;
3660 struct nfs4_ol_stateid *stp; 3660 struct nfs4_ol_stateid *stp;
3661 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3661 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3662 3662
3663 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n", 3663 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3664 (int)cstate->current_fh.fh_dentry->d_name.len, 3664 (int)cstate->current_fh.fh_dentry->d_name.len,
3665 cstate->current_fh.fh_dentry->d_name.name); 3665 cstate->current_fh.fh_dentry->d_name.name);
3666 3666
3667 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 3667 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3668 if (status) 3668 if (status)
3669 return status; 3669 return status;
3670 3670
3671 nfs4_lock_state(); 3671 nfs4_lock_state();
3672 3672
3673 status = nfs4_preprocess_seqid_op(cstate, 3673 status = nfs4_preprocess_seqid_op(cstate,
3674 oc->oc_seqid, &oc->oc_req_stateid, 3674 oc->oc_seqid, &oc->oc_req_stateid,
3675 NFS4_OPEN_STID, &stp, nn); 3675 NFS4_OPEN_STID, &stp, nn);
3676 if (status) 3676 if (status)
3677 goto out; 3677 goto out;
3678 oo = openowner(stp->st_stateowner); 3678 oo = openowner(stp->st_stateowner);
3679 status = nfserr_bad_stateid; 3679 status = nfserr_bad_stateid;
3680 if (oo->oo_flags & NFS4_OO_CONFIRMED) 3680 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3681 goto out; 3681 goto out;
3682 oo->oo_flags |= NFS4_OO_CONFIRMED; 3682 oo->oo_flags |= NFS4_OO_CONFIRMED;
3683 update_stateid(&stp->st_stid.sc_stateid); 3683 update_stateid(&stp->st_stid.sc_stateid);
3684 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3684 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3685 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 3685 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3686 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 3686 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3687 3687
3688 nfsd4_client_record_create(oo->oo_owner.so_client); 3688 nfsd4_client_record_create(oo->oo_owner.so_client);
3689 status = nfs_ok; 3689 status = nfs_ok;
3690 out: 3690 out:
3691 if (!cstate->replay_owner) 3691 if (!cstate->replay_owner)
3692 nfs4_unlock_state(); 3692 nfs4_unlock_state();
3693 return status; 3693 return status;
3694 } 3694 }
3695 3695
3696 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 3696 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3697 { 3697 {
3698 if (!test_access(access, stp)) 3698 if (!test_access(access, stp))
3699 return; 3699 return;
3700 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); 3700 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3701 clear_access(access, stp); 3701 clear_access(access, stp);
3702 } 3702 }
3703 3703
3704 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 3704 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3705 { 3705 {
3706 switch (to_access) { 3706 switch (to_access) {
3707 case NFS4_SHARE_ACCESS_READ: 3707 case NFS4_SHARE_ACCESS_READ:
3708 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 3708 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3709 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3709 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3710 break; 3710 break;
3711 case NFS4_SHARE_ACCESS_WRITE: 3711 case NFS4_SHARE_ACCESS_WRITE:
3712 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 3712 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3713 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 3713 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3714 break; 3714 break;
3715 case NFS4_SHARE_ACCESS_BOTH: 3715 case NFS4_SHARE_ACCESS_BOTH:
3716 break; 3716 break;
3717 default: 3717 default:
3718 WARN_ON_ONCE(1); 3718 WARN_ON_ONCE(1);
3719 } 3719 }
3720 } 3720 }
3721 3721
3722 static void 3722 static void
3723 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp) 3723 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3724 { 3724 {
3725 int i; 3725 int i;
3726 for (i = 0; i < 4; i++) { 3726 for (i = 0; i < 4; i++) {
3727 if ((i & deny) != i) 3727 if ((i & deny) != i)
3728 clear_deny(i, stp); 3728 clear_deny(i, stp);
3729 } 3729 }
3730 } 3730 }
3731 3731
3732 __be32 3732 __be32
3733 nfsd4_open_downgrade(struct svc_rqst *rqstp, 3733 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3734 struct nfsd4_compound_state *cstate, 3734 struct nfsd4_compound_state *cstate,
3735 struct nfsd4_open_downgrade *od) 3735 struct nfsd4_open_downgrade *od)
3736 { 3736 {
3737 __be32 status; 3737 __be32 status;
3738 struct nfs4_ol_stateid *stp; 3738 struct nfs4_ol_stateid *stp;
3739 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3739 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3740 3740
3741 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 3741 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3742 (int)cstate->current_fh.fh_dentry->d_name.len, 3742 (int)cstate->current_fh.fh_dentry->d_name.len,
3743 cstate->current_fh.fh_dentry->d_name.name); 3743 cstate->current_fh.fh_dentry->d_name.name);
3744 3744
3745 /* We don't yet support WANT bits: */ 3745 /* We don't yet support WANT bits: */
3746 if (od->od_deleg_want) 3746 if (od->od_deleg_want)
3747 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 3747 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3748 od->od_deleg_want); 3748 od->od_deleg_want);
3749 3749
3750 nfs4_lock_state(); 3750 nfs4_lock_state();
3751 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 3751 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3752 &od->od_stateid, &stp, nn); 3752 &od->od_stateid, &stp, nn);
3753 if (status) 3753 if (status)
3754 goto out; 3754 goto out;
3755 status = nfserr_inval; 3755 status = nfserr_inval;
3756 if (!test_access(od->od_share_access, stp)) { 3756 if (!test_access(od->od_share_access, stp)) {
3757 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n", 3757 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3758 stp->st_access_bmap, od->od_share_access); 3758 stp->st_access_bmap, od->od_share_access);
3759 goto out; 3759 goto out;
3760 } 3760 }
3761 if (!test_deny(od->od_share_deny, stp)) { 3761 if (!test_deny(od->od_share_deny, stp)) {
3762 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 3762 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3763 stp->st_deny_bmap, od->od_share_deny); 3763 stp->st_deny_bmap, od->od_share_deny);
3764 goto out; 3764 goto out;
3765 } 3765 }
3766 nfs4_stateid_downgrade(stp, od->od_share_access); 3766 nfs4_stateid_downgrade(stp, od->od_share_access);
3767 3767
3768 reset_union_bmap_deny(od->od_share_deny, stp); 3768 reset_union_bmap_deny(od->od_share_deny, stp);
3769 3769
3770 update_stateid(&stp->st_stid.sc_stateid); 3770 update_stateid(&stp->st_stid.sc_stateid);
3771 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3771 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3772 status = nfs_ok; 3772 status = nfs_ok;
3773 out: 3773 out:
3774 if (!cstate->replay_owner) 3774 if (!cstate->replay_owner)
3775 nfs4_unlock_state(); 3775 nfs4_unlock_state();
3776 return status; 3776 return status;
3777 } 3777 }
3778 3778
3779 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so) 3779 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3780 { 3780 {
3781 struct nfs4_openowner *oo; 3781 struct nfs4_openowner *oo;
3782 struct nfs4_ol_stateid *s; 3782 struct nfs4_ol_stateid *s;
3783 3783
3784 if (!so->so_is_open_owner) 3784 if (!so->so_is_open_owner)
3785 return; 3785 return;
3786 oo = openowner(so); 3786 oo = openowner(so);
3787 s = oo->oo_last_closed_stid; 3787 s = oo->oo_last_closed_stid;
3788 if (!s) 3788 if (!s)
3789 return; 3789 return;
3790 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) { 3790 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3791 /* Release the last_closed_stid on the next seqid bump: */ 3791 /* Release the last_closed_stid on the next seqid bump: */
3792 oo->oo_flags |= NFS4_OO_PURGE_CLOSE; 3792 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3793 return; 3793 return;
3794 } 3794 }
3795 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE; 3795 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3796 release_last_closed_stateid(oo); 3796 release_last_closed_stateid(oo);
3797 } 3797 }
3798 3798
3799 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 3799 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3800 { 3800 {
3801 unhash_open_stateid(s); 3801 unhash_open_stateid(s);
3802 s->st_stid.sc_type = NFS4_CLOSED_STID; 3802 s->st_stid.sc_type = NFS4_CLOSED_STID;
3803 } 3803 }
3804 3804
3805 /* 3805 /*
3806 * nfs4_unlock_state() called after encode 3806 * nfs4_unlock_state() called after encode
3807 */ 3807 */
3808 __be32 3808 __be32
3809 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3809 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3810 struct nfsd4_close *close) 3810 struct nfsd4_close *close)
3811 { 3811 {
3812 __be32 status; 3812 __be32 status;
3813 struct nfs4_openowner *oo; 3813 struct nfs4_openowner *oo;
3814 struct nfs4_ol_stateid *stp; 3814 struct nfs4_ol_stateid *stp;
3815 struct net *net = SVC_NET(rqstp); 3815 struct net *net = SVC_NET(rqstp);
3816 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3816 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3817 3817
3818 dprintk("NFSD: nfsd4_close on file %.*s\n", 3818 dprintk("NFSD: nfsd4_close on file %.*s\n",
3819 (int)cstate->current_fh.fh_dentry->d_name.len, 3819 (int)cstate->current_fh.fh_dentry->d_name.len,
3820 cstate->current_fh.fh_dentry->d_name.name); 3820 cstate->current_fh.fh_dentry->d_name.name);
3821 3821
3822 nfs4_lock_state(); 3822 nfs4_lock_state();
3823 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 3823 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3824 &close->cl_stateid, 3824 &close->cl_stateid,
3825 NFS4_OPEN_STID|NFS4_CLOSED_STID, 3825 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3826 &stp, nn); 3826 &stp, nn);
3827 if (status) 3827 if (status)
3828 goto out; 3828 goto out;
3829 oo = openowner(stp->st_stateowner); 3829 oo = openowner(stp->st_stateowner);
3830 status = nfs_ok; 3830 status = nfs_ok;
3831 update_stateid(&stp->st_stid.sc_stateid); 3831 update_stateid(&stp->st_stid.sc_stateid);
3832 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3832 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3833 3833
3834 nfsd4_close_open_stateid(stp); 3834 nfsd4_close_open_stateid(stp);
3835 release_last_closed_stateid(oo); 3835 release_last_closed_stateid(oo);
3836 oo->oo_last_closed_stid = stp; 3836 oo->oo_last_closed_stid = stp;
3837 3837
3838 if (list_empty(&oo->oo_owner.so_stateids)) { 3838 if (list_empty(&oo->oo_owner.so_stateids)) {
3839 if (cstate->minorversion) { 3839 if (cstate->minorversion) {
3840 release_openowner(oo); 3840 release_openowner(oo);
3841 cstate->replay_owner = NULL; 3841 cstate->replay_owner = NULL;
3842 } else { 3842 } else {
3843 /* 3843 /*
3844 * In the 4.0 case we need to keep the owners around a 3844 * In the 4.0 case we need to keep the owners around a
3845 * little while to handle CLOSE replay. 3845 * little while to handle CLOSE replay.
3846 */ 3846 */
3847 if (list_empty(&oo->oo_owner.so_stateids)) 3847 if (list_empty(&oo->oo_owner.so_stateids))
3848 move_to_close_lru(oo, SVC_NET(rqstp)); 3848 move_to_close_lru(oo, SVC_NET(rqstp));
3849 } 3849 }
3850 } 3850 }
3851 out: 3851 out:
3852 if (!cstate->replay_owner) 3852 if (!cstate->replay_owner)
3853 nfs4_unlock_state(); 3853 nfs4_unlock_state();
3854 return status; 3854 return status;
3855 } 3855 }
3856 3856
3857 __be32 3857 __be32
3858 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3858 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3859 struct nfsd4_delegreturn *dr) 3859 struct nfsd4_delegreturn *dr)
3860 { 3860 {
3861 struct nfs4_delegation *dp; 3861 struct nfs4_delegation *dp;
3862 stateid_t *stateid = &dr->dr_stateid; 3862 stateid_t *stateid = &dr->dr_stateid;
3863 struct nfs4_stid *s; 3863 struct nfs4_stid *s;
3864 __be32 status; 3864 __be32 status;
3865 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3865 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3866 3866
3867 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 3867 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3868 return status; 3868 return status;
3869 3869
3870 nfs4_lock_state(); 3870 nfs4_lock_state();
3871 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, 3871 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s,
3872 cstate->minorversion, nn); 3872 cstate->minorversion, nn);
3873 if (status) 3873 if (status)
3874 goto out; 3874 goto out;
3875 dp = delegstateid(s); 3875 dp = delegstateid(s);
3876 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 3876 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3877 if (status) 3877 if (status)
3878 goto out; 3878 goto out;
3879 3879
3880 unhash_delegation(dp); 3880 unhash_delegation(dp);
3881 out: 3881 out:
3882 nfs4_unlock_state(); 3882 nfs4_unlock_state();
3883 3883
3884 return status; 3884 return status;
3885 } 3885 }
3886 3886
3887 3887
3888 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 3888 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3889 3889
3890 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1) 3890 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
3891 3891
3892 static inline u64 3892 static inline u64
3893 end_offset(u64 start, u64 len) 3893 end_offset(u64 start, u64 len)
3894 { 3894 {
3895 u64 end; 3895 u64 end;
3896 3896
3897 end = start + len; 3897 end = start + len;
3898 return end >= start ? end: NFS4_MAX_UINT64; 3898 return end >= start ? end: NFS4_MAX_UINT64;
3899 } 3899 }
3900 3900
3901 /* last octet in a range */ 3901 /* last octet in a range */
3902 static inline u64 3902 static inline u64
3903 last_byte_offset(u64 start, u64 len) 3903 last_byte_offset(u64 start, u64 len)
3904 { 3904 {
3905 u64 end; 3905 u64 end;
3906 3906
3907 WARN_ON_ONCE(!len); 3907 WARN_ON_ONCE(!len);
3908 end = start + len; 3908 end = start + len;
3909 return end > start ? end - 1: NFS4_MAX_UINT64; 3909 return end > start ? end - 1: NFS4_MAX_UINT64;
3910 } 3910 }
3911 3911
3912 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername) 3912 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
3913 { 3913 {
3914 return (file_hashval(inode) + cl_id 3914 return (file_hashval(inode) + cl_id
3915 + opaque_hashval(ownername->data, ownername->len)) 3915 + opaque_hashval(ownername->data, ownername->len))
3916 & LOCKOWNER_INO_HASH_MASK; 3916 & LOCKOWNER_INO_HASH_MASK;
3917 } 3917 }
3918 3918
3919 /* 3919 /*
3920 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 3920 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3921 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 3921 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3922 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 3922 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3923 * locking, this prevents us from being completely protocol-compliant. The 3923 * locking, this prevents us from being completely protocol-compliant. The
3924 * real solution to this problem is to start using unsigned file offsets in 3924 * real solution to this problem is to start using unsigned file offsets in
3925 * the VFS, but this is a very deep change! 3925 * the VFS, but this is a very deep change!
3926 */ 3926 */
3927 static inline void 3927 static inline void
3928 nfs4_transform_lock_offset(struct file_lock *lock) 3928 nfs4_transform_lock_offset(struct file_lock *lock)
3929 { 3929 {
3930 if (lock->fl_start < 0) 3930 if (lock->fl_start < 0)
3931 lock->fl_start = OFFSET_MAX; 3931 lock->fl_start = OFFSET_MAX;
3932 if (lock->fl_end < 0) 3932 if (lock->fl_end < 0)
3933 lock->fl_end = OFFSET_MAX; 3933 lock->fl_end = OFFSET_MAX;
3934 } 3934 }
3935 3935
3936 /* Hack!: For now, we're defining this just so we can use a pointer to it 3936 /* Hack!: For now, we're defining this just so we can use a pointer to it
3937 * as a unique cookie to identify our (NFSv4's) posix locks. */ 3937 * as a unique cookie to identify our (NFSv4's) posix locks. */
3938 static const struct lock_manager_operations nfsd_posix_mng_ops = { 3938 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3939 }; 3939 };
3940 3940
3941 static inline void 3941 static inline void
3942 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 3942 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3943 { 3943 {
3944 struct nfs4_lockowner *lo; 3944 struct nfs4_lockowner *lo;
3945 3945
3946 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 3946 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3947 lo = (struct nfs4_lockowner *) fl->fl_owner; 3947 lo = (struct nfs4_lockowner *) fl->fl_owner;
3948 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data, 3948 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3949 lo->lo_owner.so_owner.len, GFP_KERNEL); 3949 lo->lo_owner.so_owner.len, GFP_KERNEL);
3950 if (!deny->ld_owner.data) 3950 if (!deny->ld_owner.data)
3951 /* We just don't care that much */ 3951 /* We just don't care that much */
3952 goto nevermind; 3952 goto nevermind;
3953 deny->ld_owner.len = lo->lo_owner.so_owner.len; 3953 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3954 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 3954 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3955 } else { 3955 } else {
3956 nevermind: 3956 nevermind:
3957 deny->ld_owner.len = 0; 3957 deny->ld_owner.len = 0;
3958 deny->ld_owner.data = NULL; 3958 deny->ld_owner.data = NULL;
3959 deny->ld_clientid.cl_boot = 0; 3959 deny->ld_clientid.cl_boot = 0;
3960 deny->ld_clientid.cl_id = 0; 3960 deny->ld_clientid.cl_id = 0;
3961 } 3961 }
3962 deny->ld_start = fl->fl_start; 3962 deny->ld_start = fl->fl_start;
3963 deny->ld_length = NFS4_MAX_UINT64; 3963 deny->ld_length = NFS4_MAX_UINT64;
3964 if (fl->fl_end != NFS4_MAX_UINT64) 3964 if (fl->fl_end != NFS4_MAX_UINT64)
3965 deny->ld_length = fl->fl_end - fl->fl_start + 1; 3965 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3966 deny->ld_type = NFS4_READ_LT; 3966 deny->ld_type = NFS4_READ_LT;
3967 if (fl->fl_type != F_RDLCK) 3967 if (fl->fl_type != F_RDLCK)
3968 deny->ld_type = NFS4_WRITE_LT; 3968 deny->ld_type = NFS4_WRITE_LT;
3969 } 3969 }
3970 3970
3971 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) 3971 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
3972 { 3972 {
3973 struct nfs4_ol_stateid *lst; 3973 struct nfs4_ol_stateid *lst;
3974 3974
3975 if (!same_owner_str(&lo->lo_owner, owner, clid)) 3975 if (!same_owner_str(&lo->lo_owner, owner, clid))
3976 return false; 3976 return false;
3977 lst = list_first_entry(&lo->lo_owner.so_stateids, 3977 lst = list_first_entry(&lo->lo_owner.so_stateids,
3978 struct nfs4_ol_stateid, st_perstateowner); 3978 struct nfs4_ol_stateid, st_perstateowner);
3979 return lst->st_file->fi_inode == inode; 3979 return lst->st_file->fi_inode == inode;
3980 } 3980 }
3981 3981
3982 static struct nfs4_lockowner * 3982 static struct nfs4_lockowner *
3983 find_lockowner_str(struct inode *inode, clientid_t *clid, 3983 find_lockowner_str(struct inode *inode, clientid_t *clid,
3984 struct xdr_netobj *owner, struct nfsd_net *nn) 3984 struct xdr_netobj *owner, struct nfsd_net *nn)
3985 { 3985 {
3986 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner); 3986 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
3987 struct nfs4_lockowner *lo; 3987 struct nfs4_lockowner *lo;
3988 3988
3989 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { 3989 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
3990 if (same_lockowner_ino(lo, inode, clid, owner)) 3990 if (same_lockowner_ino(lo, inode, clid, owner))
3991 return lo; 3991 return lo;
3992 } 3992 }
3993 return NULL; 3993 return NULL;
3994 } 3994 }
3995 3995
3996 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) 3996 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3997 { 3997 {
3998 struct inode *inode = open_stp->st_file->fi_inode; 3998 struct inode *inode = open_stp->st_file->fi_inode;
3999 unsigned int inohash = lockowner_ino_hashval(inode, 3999 unsigned int inohash = lockowner_ino_hashval(inode,
4000 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); 4000 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
4001 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 4001 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
4002 4002
4003 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 4003 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4004 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]); 4004 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]);
4005 list_add(&lo->lo_perstateid, &open_stp->st_lockowners); 4005 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
4006 } 4006 }
4007 4007
4008 /* 4008 /*
4009 * Alloc a lock owner structure. 4009 * Alloc a lock owner structure.
4010 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 4010 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4011 * occurred. 4011 * occurred.
4012 * 4012 *
4013 * strhashval = ownerstr_hashval 4013 * strhashval = ownerstr_hashval
4014 */ 4014 */
4015 4015
4016 static struct nfs4_lockowner * 4016 static struct nfs4_lockowner *
4017 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { 4017 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4018 struct nfs4_lockowner *lo; 4018 struct nfs4_lockowner *lo;
4019 4019
4020 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 4020 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4021 if (!lo) 4021 if (!lo)
4022 return NULL; 4022 return NULL;
4023 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 4023 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4024 lo->lo_owner.so_is_open_owner = 0; 4024 lo->lo_owner.so_is_open_owner = 0;
4025 /* It is the openowner seqid that will be incremented in encode in the 4025 /* It is the openowner seqid that will be incremented in encode in the
4026 * case of new lockowners; so increment the lock seqid manually: */ 4026 * case of new lockowners; so increment the lock seqid manually: */
4027 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; 4027 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4028 hash_lockowner(lo, strhashval, clp, open_stp); 4028 hash_lockowner(lo, strhashval, clp, open_stp);
4029 return lo; 4029 return lo;
4030 } 4030 }
4031 4031
4032 static struct nfs4_ol_stateid * 4032 static struct nfs4_ol_stateid *
4033 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) 4033 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4034 { 4034 {
4035 struct nfs4_ol_stateid *stp; 4035 struct nfs4_ol_stateid *stp;
4036 struct nfs4_client *clp = lo->lo_owner.so_client; 4036 struct nfs4_client *clp = lo->lo_owner.so_client;
4037 4037
4038 stp = nfs4_alloc_stateid(clp); 4038 stp = nfs4_alloc_stateid(clp);
4039 if (stp == NULL) 4039 if (stp == NULL)
4040 return NULL; 4040 return NULL;
4041 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID); 4041 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
4042 list_add(&stp->st_perfile, &fp->fi_stateids); 4042 list_add(&stp->st_perfile, &fp->fi_stateids);
4043 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 4043 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4044 stp->st_stateowner = &lo->lo_owner; 4044 stp->st_stateowner = &lo->lo_owner;
4045 get_nfs4_file(fp); 4045 get_nfs4_file(fp);
4046 stp->st_file = fp; 4046 stp->st_file = fp;
4047 stp->st_access_bmap = 0; 4047 stp->st_access_bmap = 0;
4048 stp->st_deny_bmap = open_stp->st_deny_bmap; 4048 stp->st_deny_bmap = open_stp->st_deny_bmap;
4049 stp->st_openstp = open_stp; 4049 stp->st_openstp = open_stp;
4050 return stp; 4050 return stp;
4051 } 4051 }
4052 4052
4053 static int 4053 static int
4054 check_lock_length(u64 offset, u64 length) 4054 check_lock_length(u64 offset, u64 length)
4055 { 4055 {
4056 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 4056 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
4057 LOFF_OVERFLOW(offset, length))); 4057 LOFF_OVERFLOW(offset, length)));
4058 } 4058 }
4059 4059
4060 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 4060 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4061 { 4061 {
4062 struct nfs4_file *fp = lock_stp->st_file; 4062 struct nfs4_file *fp = lock_stp->st_file;
4063 int oflag = nfs4_access_to_omode(access); 4063 int oflag = nfs4_access_to_omode(access);
4064 4064
4065 if (test_access(access, lock_stp)) 4065 if (test_access(access, lock_stp))
4066 return; 4066 return;
4067 nfs4_file_get_access(fp, oflag); 4067 nfs4_file_get_access(fp, oflag);
4068 set_access(access, lock_stp); 4068 set_access(access, lock_stp);
4069 } 4069 }
4070 4070
4071 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) 4071 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4072 { 4072 {
4073 struct nfs4_file *fi = ost->st_file; 4073 struct nfs4_file *fi = ost->st_file;
4074 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 4074 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4075 struct nfs4_client *cl = oo->oo_owner.so_client; 4075 struct nfs4_client *cl = oo->oo_owner.so_client;
4076 struct nfs4_lockowner *lo; 4076 struct nfs4_lockowner *lo;
4077 unsigned int strhashval; 4077 unsigned int strhashval;
4078 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id); 4078 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4079 4079
4080 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, 4080 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid,
4081 &lock->v.new.owner, nn); 4081 &lock->v.new.owner, nn);
4082 if (lo) { 4082 if (lo) {
4083 if (!cstate->minorversion) 4083 if (!cstate->minorversion)
4084 return nfserr_bad_seqid; 4084 return nfserr_bad_seqid;
4085 /* XXX: a lockowner always has exactly one stateid: */ 4085 /* XXX: a lockowner always has exactly one stateid: */
4086 *lst = list_first_entry(&lo->lo_owner.so_stateids, 4086 *lst = list_first_entry(&lo->lo_owner.so_stateids,
4087 struct nfs4_ol_stateid, st_perstateowner); 4087 struct nfs4_ol_stateid, st_perstateowner);
4088 return nfs_ok; 4088 return nfs_ok;
4089 } 4089 }
4090 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, 4090 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4091 &lock->v.new.owner); 4091 &lock->v.new.owner);
4092 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 4092 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4093 if (lo == NULL) 4093 if (lo == NULL)
4094 return nfserr_jukebox; 4094 return nfserr_jukebox;
4095 *lst = alloc_init_lock_stateid(lo, fi, ost); 4095 *lst = alloc_init_lock_stateid(lo, fi, ost);
4096 if (*lst == NULL) { 4096 if (*lst == NULL) {
4097 release_lockowner(lo); 4097 release_lockowner(lo);
4098 return nfserr_jukebox; 4098 return nfserr_jukebox;
4099 } 4099 }
4100 *new = true; 4100 *new = true;
4101 return nfs_ok; 4101 return nfs_ok;
4102 } 4102 }
4103 4103
4104 /* 4104 /*
4105 * LOCK operation 4105 * LOCK operation
4106 */ 4106 */
4107 __be32 4107 __be32
4108 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4108 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4109 struct nfsd4_lock *lock) 4109 struct nfsd4_lock *lock)
4110 { 4110 {
4111 struct nfs4_openowner *open_sop = NULL; 4111 struct nfs4_openowner *open_sop = NULL;
4112 struct nfs4_lockowner *lock_sop = NULL; 4112 struct nfs4_lockowner *lock_sop = NULL;
4113 struct nfs4_ol_stateid *lock_stp; 4113 struct nfs4_ol_stateid *lock_stp;
4114 struct file *filp = NULL; 4114 struct file *filp = NULL;
4115 struct file_lock *file_lock = NULL; 4115 struct file_lock *file_lock = NULL;
4116 struct file_lock *conflock = NULL; 4116 struct file_lock *conflock = NULL;
4117 __be32 status = 0; 4117 __be32 status = 0;
4118 bool new_state = false; 4118 bool new_state = false;
4119 int lkflg; 4119 int lkflg;
4120 int err; 4120 int err;
4121 struct net *net = SVC_NET(rqstp); 4121 struct net *net = SVC_NET(rqstp);
4122 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4122 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4123 4123
4124 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 4124 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4125 (long long) lock->lk_offset, 4125 (long long) lock->lk_offset,
4126 (long long) lock->lk_length); 4126 (long long) lock->lk_length);
4127 4127
4128 if (check_lock_length(lock->lk_offset, lock->lk_length)) 4128 if (check_lock_length(lock->lk_offset, lock->lk_length))
4129 return nfserr_inval; 4129 return nfserr_inval;
4130 4130
4131 if ((status = fh_verify(rqstp, &cstate->current_fh, 4131 if ((status = fh_verify(rqstp, &cstate->current_fh,
4132 S_IFREG, NFSD_MAY_LOCK))) { 4132 S_IFREG, NFSD_MAY_LOCK))) {
4133 dprintk("NFSD: nfsd4_lock: permission denied!\n"); 4133 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4134 return status; 4134 return status;
4135 } 4135 }
4136 4136
4137 nfs4_lock_state(); 4137 nfs4_lock_state();
4138 4138
4139 if (lock->lk_is_new) { 4139 if (lock->lk_is_new) {
4140 struct nfs4_ol_stateid *open_stp = NULL; 4140 struct nfs4_ol_stateid *open_stp = NULL;
4141 4141
4142 if (nfsd4_has_session(cstate)) 4142 if (nfsd4_has_session(cstate))
4143 /* See rfc 5661 18.10.3: given clientid is ignored: */ 4143 /* See rfc 5661 18.10.3: given clientid is ignored: */
4144 memcpy(&lock->v.new.clientid, 4144 memcpy(&lock->v.new.clientid,
4145 &cstate->session->se_client->cl_clientid, 4145 &cstate->session->se_client->cl_clientid,
4146 sizeof(clientid_t)); 4146 sizeof(clientid_t));
4147 4147
4148 status = nfserr_stale_clientid; 4148 status = nfserr_stale_clientid;
4149 if (STALE_CLIENTID(&lock->lk_new_clientid, nn)) 4149 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4150 goto out; 4150 goto out;
4151 4151
4152 /* validate and update open stateid and open seqid */ 4152 /* validate and update open stateid and open seqid */
4153 status = nfs4_preprocess_confirmed_seqid_op(cstate, 4153 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4154 lock->lk_new_open_seqid, 4154 lock->lk_new_open_seqid,
4155 &lock->lk_new_open_stateid, 4155 &lock->lk_new_open_stateid,
4156 &open_stp, nn); 4156 &open_stp, nn);
4157 if (status) 4157 if (status)
4158 goto out; 4158 goto out;
4159 open_sop = openowner(open_stp->st_stateowner); 4159 open_sop = openowner(open_stp->st_stateowner);
4160 status = nfserr_bad_stateid; 4160 status = nfserr_bad_stateid;
4161 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 4161 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4162 &lock->v.new.clientid)) 4162 &lock->v.new.clientid))
4163 goto out; 4163 goto out;
4164 status = lookup_or_create_lock_state(cstate, open_stp, lock, 4164 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4165 &lock_stp, &new_state); 4165 &lock_stp, &new_state);
4166 } else 4166 } else
4167 status = nfs4_preprocess_seqid_op(cstate, 4167 status = nfs4_preprocess_seqid_op(cstate,
4168 lock->lk_old_lock_seqid, 4168 lock->lk_old_lock_seqid,
4169 &lock->lk_old_lock_stateid, 4169 &lock->lk_old_lock_stateid,
4170 NFS4_LOCK_STID, &lock_stp, nn); 4170 NFS4_LOCK_STID, &lock_stp, nn);
4171 if (status) 4171 if (status)
4172 goto out; 4172 goto out;
4173 lock_sop = lockowner(lock_stp->st_stateowner); 4173 lock_sop = lockowner(lock_stp->st_stateowner);
4174 4174
4175 lkflg = setlkflg(lock->lk_type); 4175 lkflg = setlkflg(lock->lk_type);
4176 status = nfs4_check_openmode(lock_stp, lkflg); 4176 status = nfs4_check_openmode(lock_stp, lkflg);
4177 if (status) 4177 if (status)
4178 goto out; 4178 goto out;
4179 4179
4180 status = nfserr_grace; 4180 status = nfserr_grace;
4181 if (locks_in_grace(net) && !lock->lk_reclaim) 4181 if (locks_in_grace(net) && !lock->lk_reclaim)
4182 goto out; 4182 goto out;
4183 status = nfserr_no_grace; 4183 status = nfserr_no_grace;
4184 if (!locks_in_grace(net) && lock->lk_reclaim) 4184 if (!locks_in_grace(net) && lock->lk_reclaim)
4185 goto out; 4185 goto out;
4186 4186
4187 file_lock = locks_alloc_lock(); 4187 file_lock = locks_alloc_lock();
4188 if (!file_lock) { 4188 if (!file_lock) {
4189 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4189 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4190 status = nfserr_jukebox; 4190 status = nfserr_jukebox;
4191 goto out; 4191 goto out;
4192 } 4192 }
4193 4193
4194 locks_init_lock(file_lock); 4194 locks_init_lock(file_lock);
4195 switch (lock->lk_type) { 4195 switch (lock->lk_type) {
4196 case NFS4_READ_LT: 4196 case NFS4_READ_LT:
4197 case NFS4_READW_LT: 4197 case NFS4_READW_LT:
4198 filp = find_readable_file(lock_stp->st_file); 4198 filp = find_readable_file(lock_stp->st_file);
4199 if (filp) 4199 if (filp)
4200 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 4200 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4201 file_lock->fl_type = F_RDLCK; 4201 file_lock->fl_type = F_RDLCK;
4202 break; 4202 break;
4203 case NFS4_WRITE_LT: 4203 case NFS4_WRITE_LT:
4204 case NFS4_WRITEW_LT: 4204 case NFS4_WRITEW_LT:
4205 filp = find_writeable_file(lock_stp->st_file); 4205 filp = find_writeable_file(lock_stp->st_file);
4206 if (filp) 4206 if (filp)
4207 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 4207 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4208 file_lock->fl_type = F_WRLCK; 4208 file_lock->fl_type = F_WRLCK;
4209 break; 4209 break;
4210 default: 4210 default:
4211 status = nfserr_inval; 4211 status = nfserr_inval;
4212 goto out; 4212 goto out;
4213 } 4213 }
4214 if (!filp) { 4214 if (!filp) {
4215 status = nfserr_openmode; 4215 status = nfserr_openmode;
4216 goto out; 4216 goto out;
4217 } 4217 }
4218 file_lock->fl_owner = (fl_owner_t)lock_sop; 4218 file_lock->fl_owner = (fl_owner_t)lock_sop;
4219 file_lock->fl_pid = current->tgid; 4219 file_lock->fl_pid = current->tgid;
4220 file_lock->fl_file = filp; 4220 file_lock->fl_file = filp;
4221 file_lock->fl_flags = FL_POSIX; 4221 file_lock->fl_flags = FL_POSIX;
4222 file_lock->fl_lmops = &nfsd_posix_mng_ops; 4222 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4223 file_lock->fl_start = lock->lk_offset; 4223 file_lock->fl_start = lock->lk_offset;
4224 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 4224 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4225 nfs4_transform_lock_offset(file_lock); 4225 nfs4_transform_lock_offset(file_lock);
4226 4226
4227 conflock = locks_alloc_lock(); 4227 conflock = locks_alloc_lock();
4228 if (!conflock) { 4228 if (!conflock) {
4229 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4229 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4230 status = nfserr_jukebox; 4230 status = nfserr_jukebox;
4231 goto out; 4231 goto out;
4232 } 4232 }
4233 4233
4234 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); 4234 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4235 switch (-err) { 4235 switch (-err) {
4236 case 0: /* success! */ 4236 case 0: /* success! */
4237 update_stateid(&lock_stp->st_stid.sc_stateid); 4237 update_stateid(&lock_stp->st_stid.sc_stateid);
4238 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 4238 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4239 sizeof(stateid_t)); 4239 sizeof(stateid_t));
4240 status = 0; 4240 status = 0;
4241 break; 4241 break;
4242 case (EAGAIN): /* conflock holds conflicting lock */ 4242 case (EAGAIN): /* conflock holds conflicting lock */
4243 status = nfserr_denied; 4243 status = nfserr_denied;
4244 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 4244 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4245 nfs4_set_lock_denied(conflock, &lock->lk_denied); 4245 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4246 break; 4246 break;
4247 case (EDEADLK): 4247 case (EDEADLK):
4248 status = nfserr_deadlock; 4248 status = nfserr_deadlock;
4249 break; 4249 break;
4250 default: 4250 default:
4251 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 4251 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4252 status = nfserrno(err); 4252 status = nfserrno(err);
4253 break; 4253 break;
4254 } 4254 }
4255 out: 4255 out:
4256 if (status && new_state) 4256 if (status && new_state)
4257 release_lockowner(lock_sop); 4257 release_lockowner(lock_sop);
4258 if (!cstate->replay_owner) 4258 if (!cstate->replay_owner)
4259 nfs4_unlock_state(); 4259 nfs4_unlock_state();
4260 if (file_lock) 4260 if (file_lock)
4261 locks_free_lock(file_lock); 4261 locks_free_lock(file_lock);
4262 if (conflock) 4262 if (conflock)
4263 locks_free_lock(conflock); 4263 locks_free_lock(conflock);
4264 return status; 4264 return status;
4265 } 4265 }
4266 4266
4267 /* 4267 /*
4268 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 4268 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4269 * so we do a temporary open here just to get an open file to pass to 4269 * so we do a temporary open here just to get an open file to pass to
4270 * vfs_test_lock. (Arguably perhaps test_lock should be done with an 4270 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4271 * inode operation.) 4271 * inode operation.)
4272 */ 4272 */
4273 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 4273 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4274 { 4274 {
4275 struct file *file; 4275 struct file *file;
4276 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file); 4276 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4277 if (!err) { 4277 if (!err) {
4278 err = nfserrno(vfs_test_lock(file, lock)); 4278 err = nfserrno(vfs_test_lock(file, lock));
4279 nfsd_close(file); 4279 nfsd_close(file);
4280 } 4280 }
4281 return err; 4281 return err;
4282 } 4282 }
4283 4283
4284 /* 4284 /*
4285 * LOCKT operation 4285 * LOCKT operation
4286 */ 4286 */
4287 __be32 4287 __be32
4288 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4288 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4289 struct nfsd4_lockt *lockt) 4289 struct nfsd4_lockt *lockt)
4290 { 4290 {
4291 struct inode *inode; 4291 struct inode *inode;
4292 struct file_lock *file_lock = NULL; 4292 struct file_lock *file_lock = NULL;
4293 struct nfs4_lockowner *lo; 4293 struct nfs4_lockowner *lo;
4294 __be32 status; 4294 __be32 status;
4295 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4295 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4296 4296
4297 if (locks_in_grace(SVC_NET(rqstp))) 4297 if (locks_in_grace(SVC_NET(rqstp)))
4298 return nfserr_grace; 4298 return nfserr_grace;
4299 4299
4300 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 4300 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4301 return nfserr_inval; 4301 return nfserr_inval;
4302 4302
4303 nfs4_lock_state(); 4303 nfs4_lock_state();
4304 4304
4305 if (!nfsd4_has_session(cstate)) { 4305 if (!nfsd4_has_session(cstate)) {
4306 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL); 4306 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL);
4307 if (status) 4307 if (status)
4308 goto out; 4308 goto out;
4309 } 4309 }
4310 4310
4311 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 4311 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4312 goto out; 4312 goto out;
4313 4313
4314 inode = cstate->current_fh.fh_dentry->d_inode; 4314 inode = cstate->current_fh.fh_dentry->d_inode;
4315 file_lock = locks_alloc_lock(); 4315 file_lock = locks_alloc_lock();
4316 if (!file_lock) { 4316 if (!file_lock) {
4317 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4317 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4318 status = nfserr_jukebox; 4318 status = nfserr_jukebox;
4319 goto out; 4319 goto out;
4320 } 4320 }
4321 locks_init_lock(file_lock); 4321 locks_init_lock(file_lock);
4322 switch (lockt->lt_type) { 4322 switch (lockt->lt_type) {
4323 case NFS4_READ_LT: 4323 case NFS4_READ_LT:
4324 case NFS4_READW_LT: 4324 case NFS4_READW_LT:
4325 file_lock->fl_type = F_RDLCK; 4325 file_lock->fl_type = F_RDLCK;
4326 break; 4326 break;
4327 case NFS4_WRITE_LT: 4327 case NFS4_WRITE_LT:
4328 case NFS4_WRITEW_LT: 4328 case NFS4_WRITEW_LT:
4329 file_lock->fl_type = F_WRLCK; 4329 file_lock->fl_type = F_WRLCK;
4330 break; 4330 break;
4331 default: 4331 default:
4332 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 4332 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4333 status = nfserr_inval; 4333 status = nfserr_inval;
4334 goto out; 4334 goto out;
4335 } 4335 }
4336 4336
4337 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn); 4337 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn);
4338 if (lo) 4338 if (lo)
4339 file_lock->fl_owner = (fl_owner_t)lo; 4339 file_lock->fl_owner = (fl_owner_t)lo;
4340 file_lock->fl_pid = current->tgid; 4340 file_lock->fl_pid = current->tgid;
4341 file_lock->fl_flags = FL_POSIX; 4341 file_lock->fl_flags = FL_POSIX;
4342 4342
4343 file_lock->fl_start = lockt->lt_offset; 4343 file_lock->fl_start = lockt->lt_offset;
4344 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 4344 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4345 4345
4346 nfs4_transform_lock_offset(file_lock); 4346 nfs4_transform_lock_offset(file_lock);
4347 4347
4348 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 4348 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4349 if (status) 4349 if (status)
4350 goto out; 4350 goto out;
4351 4351
4352 if (file_lock->fl_type != F_UNLCK) { 4352 if (file_lock->fl_type != F_UNLCK) {
4353 status = nfserr_denied; 4353 status = nfserr_denied;
4354 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 4354 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4355 } 4355 }
4356 out: 4356 out:
4357 nfs4_unlock_state(); 4357 nfs4_unlock_state();
4358 if (file_lock) 4358 if (file_lock)
4359 locks_free_lock(file_lock); 4359 locks_free_lock(file_lock);
4360 return status; 4360 return status;
4361 } 4361 }
4362 4362
4363 __be32 4363 __be32
4364 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4364 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4365 struct nfsd4_locku *locku) 4365 struct nfsd4_locku *locku)
4366 { 4366 {
4367 struct nfs4_ol_stateid *stp; 4367 struct nfs4_ol_stateid *stp;
4368 struct file *filp = NULL; 4368 struct file *filp = NULL;
4369 struct file_lock *file_lock = NULL; 4369 struct file_lock *file_lock = NULL;
4370 __be32 status; 4370 __be32 status;
4371 int err; 4371 int err;
4372 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4372 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4373 4373
4374 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 4374 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4375 (long long) locku->lu_offset, 4375 (long long) locku->lu_offset,
4376 (long long) locku->lu_length); 4376 (long long) locku->lu_length);
4377 4377
4378 if (check_lock_length(locku->lu_offset, locku->lu_length)) 4378 if (check_lock_length(locku->lu_offset, locku->lu_length))
4379 return nfserr_inval; 4379 return nfserr_inval;
4380 4380
4381 nfs4_lock_state(); 4381 nfs4_lock_state();
4382 4382
4383 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 4383 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4384 &locku->lu_stateid, NFS4_LOCK_STID, 4384 &locku->lu_stateid, NFS4_LOCK_STID,
4385 &stp, nn); 4385 &stp, nn);
4386 if (status) 4386 if (status)
4387 goto out; 4387 goto out;
4388 filp = find_any_file(stp->st_file); 4388 filp = find_any_file(stp->st_file);
4389 if (!filp) { 4389 if (!filp) {
4390 status = nfserr_lock_range; 4390 status = nfserr_lock_range;
4391 goto out; 4391 goto out;
4392 } 4392 }
4393 file_lock = locks_alloc_lock(); 4393 file_lock = locks_alloc_lock();
4394 if (!file_lock) { 4394 if (!file_lock) {
4395 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 4395 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4396 status = nfserr_jukebox; 4396 status = nfserr_jukebox;
4397 goto out; 4397 goto out;
4398 } 4398 }
4399 locks_init_lock(file_lock); 4399 locks_init_lock(file_lock);
4400 file_lock->fl_type = F_UNLCK; 4400 file_lock->fl_type = F_UNLCK;
4401 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner); 4401 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4402 file_lock->fl_pid = current->tgid; 4402 file_lock->fl_pid = current->tgid;
4403 file_lock->fl_file = filp; 4403 file_lock->fl_file = filp;
4404 file_lock->fl_flags = FL_POSIX; 4404 file_lock->fl_flags = FL_POSIX;
4405 file_lock->fl_lmops = &nfsd_posix_mng_ops; 4405 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4406 file_lock->fl_start = locku->lu_offset; 4406 file_lock->fl_start = locku->lu_offset;
4407 4407
4408 file_lock->fl_end = last_byte_offset(locku->lu_offset, 4408 file_lock->fl_end = last_byte_offset(locku->lu_offset,
4409 locku->lu_length); 4409 locku->lu_length);
4410 nfs4_transform_lock_offset(file_lock); 4410 nfs4_transform_lock_offset(file_lock);
4411 4411
4412 /* 4412 /*
4413 * Try to unlock the file in the VFS. 4413 * Try to unlock the file in the VFS.
4414 */ 4414 */
4415 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); 4415 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4416 if (err) { 4416 if (err) {
4417 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 4417 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4418 goto out_nfserr; 4418 goto out_nfserr;
4419 } 4419 }
4420 /* 4420 /*
4421 * OK, unlock succeeded; the only thing left to do is update the stateid. 4421 * OK, unlock succeeded; the only thing left to do is update the stateid.
4422 */ 4422 */
4423 update_stateid(&stp->st_stid.sc_stateid); 4423 update_stateid(&stp->st_stid.sc_stateid);
4424 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4424 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4425 4425
4426 out: 4426 out:
4427 if (!cstate->replay_owner) 4427 if (!cstate->replay_owner)
4428 nfs4_unlock_state(); 4428 nfs4_unlock_state();
4429 if (file_lock) 4429 if (file_lock)
4430 locks_free_lock(file_lock); 4430 locks_free_lock(file_lock);
4431 return status; 4431 return status;
4432 4432
4433 out_nfserr: 4433 out_nfserr:
4434 status = nfserrno(err); 4434 status = nfserrno(err);
4435 goto out; 4435 goto out;
4436 } 4436 }
4437 4437
4438 /* 4438 /*
4439 * returns 4439 * returns
4440 * 1: locks held by lockowner 4440 * 1: locks held by lockowner
4441 * 0: no locks held by lockowner 4441 * 0: no locks held by lockowner
4442 */ 4442 */
4443 static int 4443 static int
4444 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) 4444 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4445 { 4445 {
4446 struct file_lock **flpp; 4446 struct file_lock **flpp;
4447 struct inode *inode = filp->fi_inode; 4447 struct inode *inode = filp->fi_inode;
4448 int status = 0; 4448 int status = 0;
4449 4449
4450 lock_flocks(); 4450 lock_flocks();
4451 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 4451 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4452 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 4452 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4453 status = 1; 4453 status = 1;
4454 goto out; 4454 goto out;
4455 } 4455 }
4456 } 4456 }
4457 out: 4457 out:
4458 unlock_flocks(); 4458 unlock_flocks();
4459 return status; 4459 return status;
4460 } 4460 }
4461 4461
4462 __be32 4462 __be32
4463 nfsd4_release_lockowner(struct svc_rqst *rqstp, 4463 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4464 struct nfsd4_compound_state *cstate, 4464 struct nfsd4_compound_state *cstate,
4465 struct nfsd4_release_lockowner *rlockowner) 4465 struct nfsd4_release_lockowner *rlockowner)
4466 { 4466 {
4467 clientid_t *clid = &rlockowner->rl_clientid; 4467 clientid_t *clid = &rlockowner->rl_clientid;
4468 struct nfs4_stateowner *sop; 4468 struct nfs4_stateowner *sop;
4469 struct nfs4_lockowner *lo; 4469 struct nfs4_lockowner *lo;
4470 struct nfs4_ol_stateid *stp; 4470 struct nfs4_ol_stateid *stp;
4471 struct xdr_netobj *owner = &rlockowner->rl_owner; 4471 struct xdr_netobj *owner = &rlockowner->rl_owner;
4472 struct list_head matches; 4472 struct list_head matches;
4473 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner); 4473 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4474 __be32 status; 4474 __be32 status;
4475 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4475 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4476 4476
4477 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 4477 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4478 clid->cl_boot, clid->cl_id); 4478 clid->cl_boot, clid->cl_id);
4479 4479
4480 nfs4_lock_state(); 4480 nfs4_lock_state();
4481 4481
4482 status = lookup_clientid(clid, cstate->minorversion, nn, NULL); 4482 status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
4483 if (status) 4483 if (status)
4484 goto out; 4484 goto out;
4485 4485
4486 status = nfserr_locks_held; 4486 status = nfserr_locks_held;
4487 INIT_LIST_HEAD(&matches); 4487 INIT_LIST_HEAD(&matches);
4488 4488
4489 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) { 4489 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) {
4490 if (sop->so_is_open_owner) 4490 if (sop->so_is_open_owner)
4491 continue; 4491 continue;
4492 if (!same_owner_str(sop, owner, clid)) 4492 if (!same_owner_str(sop, owner, clid))
4493 continue; 4493 continue;
4494 list_for_each_entry(stp, &sop->so_stateids, 4494 list_for_each_entry(stp, &sop->so_stateids,
4495 st_perstateowner) { 4495 st_perstateowner) {
4496 lo = lockowner(sop); 4496 lo = lockowner(sop);
4497 if (check_for_locks(stp->st_file, lo)) 4497 if (check_for_locks(stp->st_file, lo))
4498 goto out; 4498 goto out;
4499 list_add(&lo->lo_list, &matches); 4499 list_add(&lo->lo_list, &matches);
4500 } 4500 }
4501 } 4501 }
4502 /* Clients probably won't expect us to return with some (but not all) 4502 /* Clients probably won't expect us to return with some (but not all)
4503 * of the lockowner state released; so don't release any until all 4503 * of the lockowner state released; so don't release any until all
4504 * have been checked. */ 4504 * have been checked. */
4505 status = nfs_ok; 4505 status = nfs_ok;
4506 while (!list_empty(&matches)) { 4506 while (!list_empty(&matches)) {
4507 lo = list_entry(matches.next, struct nfs4_lockowner, 4507 lo = list_entry(matches.next, struct nfs4_lockowner,
4508 lo_list); 4508 lo_list);
4509 /* unhash_stateowner deletes so_perclient only 4509 /* unhash_stateowner deletes so_perclient only
4510 * for openowners. */ 4510 * for openowners. */
4511 list_del(&lo->lo_list); 4511 list_del(&lo->lo_list);
4512 release_lockowner(lo); 4512 release_lockowner(lo);
4513 } 4513 }
4514 out: 4514 out:
4515 nfs4_unlock_state(); 4515 nfs4_unlock_state();
4516 return status; 4516 return status;
4517 } 4517 }
4518 4518
4519 static inline struct nfs4_client_reclaim * 4519 static inline struct nfs4_client_reclaim *
4520 alloc_reclaim(void) 4520 alloc_reclaim(void)
4521 { 4521 {
4522 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 4522 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4523 } 4523 }
4524 4524
4525 bool 4525 bool
4526 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn) 4526 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
4527 { 4527 {
4528 struct nfs4_client_reclaim *crp; 4528 struct nfs4_client_reclaim *crp;
4529 4529
4530 crp = nfsd4_find_reclaim_client(name, nn); 4530 crp = nfsd4_find_reclaim_client(name, nn);
4531 return (crp && crp->cr_clp); 4531 return (crp && crp->cr_clp);
4532 } 4532 }
4533 4533
4534 /* 4534 /*
4535 * failure => all reset bets are off, nfserr_no_grace... 4535 * failure => all reset bets are off, nfserr_no_grace...
4536 */ 4536 */
4537 struct nfs4_client_reclaim * 4537 struct nfs4_client_reclaim *
4538 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn) 4538 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
4539 { 4539 {
4540 unsigned int strhashval; 4540 unsigned int strhashval;
4541 struct nfs4_client_reclaim *crp; 4541 struct nfs4_client_reclaim *crp;
4542 4542
4543 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name); 4543 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4544 crp = alloc_reclaim(); 4544 crp = alloc_reclaim();
4545 if (crp) { 4545 if (crp) {
4546 strhashval = clientstr_hashval(name); 4546 strhashval = clientstr_hashval(name);
4547 INIT_LIST_HEAD(&crp->cr_strhash); 4547 INIT_LIST_HEAD(&crp->cr_strhash);
4548 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 4548 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
4549 memcpy(crp->cr_recdir, name, HEXDIR_LEN); 4549 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4550 crp->cr_clp = NULL; 4550 crp->cr_clp = NULL;
4551 nn->reclaim_str_hashtbl_size++; 4551 nn->reclaim_str_hashtbl_size++;
4552 } 4552 }
4553 return crp; 4553 return crp;
4554 } 4554 }
4555 4555
4556 void 4556 void
4557 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 4557 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
4558 { 4558 {
4559 list_del(&crp->cr_strhash); 4559 list_del(&crp->cr_strhash);
4560 kfree(crp); 4560 kfree(crp);
4561 nn->reclaim_str_hashtbl_size--; 4561 nn->reclaim_str_hashtbl_size--;
4562 } 4562 }
4563 4563
4564 void 4564 void
4565 nfs4_release_reclaim(struct nfsd_net *nn) 4565 nfs4_release_reclaim(struct nfsd_net *nn)
4566 { 4566 {
4567 struct nfs4_client_reclaim *crp = NULL; 4567 struct nfs4_client_reclaim *crp = NULL;
4568 int i; 4568 int i;
4569 4569
4570 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4570 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4571 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 4571 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
4572 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 4572 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
4573 struct nfs4_client_reclaim, cr_strhash); 4573 struct nfs4_client_reclaim, cr_strhash);
4574 nfs4_remove_reclaim_record(crp, nn); 4574 nfs4_remove_reclaim_record(crp, nn);
4575 } 4575 }
4576 } 4576 }
4577 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 4577 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
4578 } 4578 }
4579 4579
4580 /* 4580 /*
4581 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 4581 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4582 struct nfs4_client_reclaim * 4582 struct nfs4_client_reclaim *
4583 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn) 4583 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4584 { 4584 {
4585 unsigned int strhashval; 4585 unsigned int strhashval;
4586 struct nfs4_client_reclaim *crp = NULL; 4586 struct nfs4_client_reclaim *crp = NULL;
4587 4587
4588 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir); 4588 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
4589 4589
4590 strhashval = clientstr_hashval(recdir); 4590 strhashval = clientstr_hashval(recdir);
4591 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 4591 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
4592 if (same_name(crp->cr_recdir, recdir)) { 4592 if (same_name(crp->cr_recdir, recdir)) {
4593 return crp; 4593 return crp;
4594 } 4594 }
4595 } 4595 }
4596 return NULL; 4596 return NULL;
4597 } 4597 }
4598 4598
4599 /* 4599 /*
4600 * Called from OPEN. Look for clientid in reclaim list. 4600 * Called from OPEN. Look for clientid in reclaim list.
4601 */ 4601 */
4602 __be32 4602 __be32
4603 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn) 4603 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
4604 { 4604 {
4605 struct nfs4_client *clp; 4605 struct nfs4_client *clp;
4606 4606
4607 /* find clientid in conf_id_hashtbl */ 4607 /* find clientid in conf_id_hashtbl */
4608 clp = find_confirmed_client(clid, sessions, nn); 4608 clp = find_confirmed_client(clid, sessions, nn);
4609 if (clp == NULL) 4609 if (clp == NULL)
4610 return nfserr_reclaim_bad; 4610 return nfserr_reclaim_bad;
4611 4611
4612 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; 4612 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4613 } 4613 }
4614 4614
4615 #ifdef CONFIG_NFSD_FAULT_INJECTION 4615 #ifdef CONFIG_NFSD_FAULT_INJECTION
4616 4616
4617 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) 4617 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
4618 { 4618 {
4619 expire_client(clp); 4619 expire_client(clp);
4620 return 1; 4620 return 1;
4621 } 4621 }
4622 4622
4623 u64 nfsd_print_client(struct nfs4_client *clp, u64 num) 4623 u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
4624 { 4624 {
4625 char buf[INET6_ADDRSTRLEN]; 4625 char buf[INET6_ADDRSTRLEN];
4626 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, 129); 4626 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
4627 printk(KERN_INFO "NFS Client: %s\n", buf); 4627 printk(KERN_INFO "NFS Client: %s\n", buf);
4628 return 1; 4628 return 1;
4629 } 4629 }
4630 4630
4631 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, 4631 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
4632 const char *type) 4632 const char *type)
4633 { 4633 {
4634 char buf[INET6_ADDRSTRLEN]; 4634 char buf[INET6_ADDRSTRLEN];
4635 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, 129); 4635 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
4636 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); 4636 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
4637 } 4637 }
4638 4638
4639 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *)) 4639 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *))
4640 { 4640 {
4641 struct nfs4_openowner *oop; 4641 struct nfs4_openowner *oop;
4642 struct nfs4_lockowner *lop, *lo_next; 4642 struct nfs4_lockowner *lop, *lo_next;
4643 struct nfs4_ol_stateid *stp, *st_next; 4643 struct nfs4_ol_stateid *stp, *st_next;
4644 u64 count = 0; 4644 u64 count = 0;
4645 4645
4646 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { 4646 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
4647 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) { 4647 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) {
4648 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) { 4648 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) {
4649 if (func) 4649 if (func)
4650 func(lop); 4650 func(lop);
4651 if (++count == max) 4651 if (++count == max)
4652 return count; 4652 return count;
4653 } 4653 }
4654 } 4654 }
4655 } 4655 }
4656 4656
4657 return count; 4657 return count;
4658 } 4658 }
4659 4659
4660 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max) 4660 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
4661 { 4661 {
4662 return nfsd_foreach_client_lock(clp, max, release_lockowner); 4662 return nfsd_foreach_client_lock(clp, max, release_lockowner);
4663 } 4663 }
4664 4664
4665 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max) 4665 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
4666 { 4666 {
4667 u64 count = nfsd_foreach_client_lock(clp, max, NULL); 4667 u64 count = nfsd_foreach_client_lock(clp, max, NULL);
4668 nfsd_print_count(clp, count, "locked files"); 4668 nfsd_print_count(clp, count, "locked files");
4669 return count; 4669 return count;
4670 } 4670 }
4671 4671
4672 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *)) 4672 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
4673 { 4673 {
4674 struct nfs4_openowner *oop, *next; 4674 struct nfs4_openowner *oop, *next;
4675 u64 count = 0; 4675 u64 count = 0;
4676 4676
4677 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { 4677 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
4678 if (func) 4678 if (func)
4679 func(oop); 4679 func(oop);
4680 if (++count == max) 4680 if (++count == max)
4681 break; 4681 break;
4682 } 4682 }
4683 4683
4684 return count; 4684 return count;
4685 } 4685 }
4686 4686
4687 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max) 4687 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
4688 { 4688 {
4689 return nfsd_foreach_client_open(clp, max, release_openowner); 4689 return nfsd_foreach_client_open(clp, max, release_openowner);
4690 } 4690 }
4691 4691
4692 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max) 4692 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
4693 { 4693 {
4694 u64 count = nfsd_foreach_client_open(clp, max, NULL); 4694 u64 count = nfsd_foreach_client_open(clp, max, NULL);
4695 nfsd_print_count(clp, count, "open files"); 4695 nfsd_print_count(clp, count, "open files");
4696 return count; 4696 return count;
4697 } 4697 }
4698 4698
4699 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, 4699 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
4700 struct list_head *victims) 4700 struct list_head *victims)
4701 { 4701 {
4702 struct nfs4_delegation *dp, *next; 4702 struct nfs4_delegation *dp, *next;
4703 u64 count = 0; 4703 u64 count = 0;
4704 4704
4705 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { 4705 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
4706 if (victims) 4706 if (victims)
4707 list_move(&dp->dl_recall_lru, victims); 4707 list_move(&dp->dl_recall_lru, victims);
4708 if (++count == max) 4708 if (++count == max)
4709 break; 4709 break;
4710 } 4710 }
4711 return count; 4711 return count;
4712 } 4712 }
4713 4713
4714 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max) 4714 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
4715 { 4715 {
4716 struct nfs4_delegation *dp, *next; 4716 struct nfs4_delegation *dp, *next;
4717 LIST_HEAD(victims); 4717 LIST_HEAD(victims);
4718 u64 count; 4718 u64 count;
4719 4719
4720 spin_lock(&recall_lock); 4720 spin_lock(&recall_lock);
4721 count = nfsd_find_all_delegations(clp, max, &victims); 4721 count = nfsd_find_all_delegations(clp, max, &victims);
4722 spin_unlock(&recall_lock); 4722 spin_unlock(&recall_lock);
4723 4723
4724 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 4724 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4725 unhash_delegation(dp); 4725 unhash_delegation(dp);
4726 4726
4727 return count; 4727 return count;
4728 } 4728 }
4729 4729
4730 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max) 4730 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
4731 { 4731 {
4732 struct nfs4_delegation *dp, *next; 4732 struct nfs4_delegation *dp, *next;
4733 LIST_HEAD(victims); 4733 LIST_HEAD(victims);
4734 u64 count; 4734 u64 count;
4735 4735
4736 spin_lock(&recall_lock); 4736 spin_lock(&recall_lock);
4737 count = nfsd_find_all_delegations(clp, max, &victims); 4737 count = nfsd_find_all_delegations(clp, max, &victims);
4738 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 4738 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4739 nfsd_break_one_deleg(dp); 4739 nfsd_break_one_deleg(dp);
4740 spin_unlock(&recall_lock); 4740 spin_unlock(&recall_lock);
4741 4741
4742 return count; 4742 return count;
4743 } 4743 }
4744 4744
4745 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max) 4745 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
4746 { 4746 {
4747 u64 count = 0; 4747 u64 count = 0;
4748 4748
4749 spin_lock(&recall_lock); 4749 spin_lock(&recall_lock);
4750 count = nfsd_find_all_delegations(clp, max, NULL); 4750 count = nfsd_find_all_delegations(clp, max, NULL);
4751 spin_unlock(&recall_lock); 4751 spin_unlock(&recall_lock);
4752 4752
4753 nfsd_print_count(clp, count, "delegations"); 4753 nfsd_print_count(clp, count, "delegations");
4754 return count; 4754 return count;
4755 } 4755 }
4756 4756
4757 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64)) 4757 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
4758 { 4758 {
4759 struct nfs4_client *clp, *next; 4759 struct nfs4_client *clp, *next;
4760 u64 count = 0; 4760 u64 count = 0;
4761 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 4761 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4762 4762
4763 if (!nfsd_netns_ready(nn)) 4763 if (!nfsd_netns_ready(nn))
4764 return 0; 4764 return 0;
4765 4765
4766 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 4766 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
4767 count += func(clp, max - count); 4767 count += func(clp, max - count);
4768 if ((max != 0) && (count >= max)) 4768 if ((max != 0) && (count >= max))
4769 break; 4769 break;
4770 } 4770 }
4771 4771
4772 return count; 4772 return count;
4773 } 4773 }
4774 4774
4775 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) 4775 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
4776 { 4776 {
4777 struct nfs4_client *clp; 4777 struct nfs4_client *clp;
4778 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 4778 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4779 4779
4780 if (!nfsd_netns_ready(nn)) 4780 if (!nfsd_netns_ready(nn))
4781 return NULL; 4781 return NULL;
4782 4782
4783 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 4783 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
4784 if (memcmp(&clp->cl_addr, addr, addr_size) == 0) 4784 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
4785 return clp; 4785 return clp;
4786 } 4786 }
4787 return NULL; 4787 return NULL;
4788 } 4788 }
4789 4789
4790 #endif /* CONFIG_NFSD_FAULT_INJECTION */ 4790 #endif /* CONFIG_NFSD_FAULT_INJECTION */
4791 4791
4792 /* initialization to perform at module load time: */ 4792 /* initialization to perform at module load time: */
4793 4793
4794 void 4794 void
4795 nfs4_state_init(void) 4795 nfs4_state_init(void)
4796 { 4796 {
4797 int i; 4797 int i;
4798 4798
4799 for (i = 0; i < FILE_HASH_SIZE; i++) { 4799 for (i = 0; i < FILE_HASH_SIZE; i++) {
4800 INIT_LIST_HEAD(&file_hashtbl[i]); 4800 INIT_LIST_HEAD(&file_hashtbl[i]);
4801 } 4801 }
4802 INIT_LIST_HEAD(&del_recall_lru); 4802 INIT_LIST_HEAD(&del_recall_lru);
4803 } 4803 }
4804 4804
4805 /* 4805 /*
4806 * Since the lifetime of a delegation isn't limited to that of an open, a 4806 * Since the lifetime of a delegation isn't limited to that of an open, a
4807 * client may quite reasonably hang on to a delegation as long as it has 4807 * client may quite reasonably hang on to a delegation as long as it has
4808 * the inode cached. This becomes an obvious problem the first time a 4808 * the inode cached. This becomes an obvious problem the first time a
4809 * client's inode cache approaches the size of the server's total memory. 4809 * client's inode cache approaches the size of the server's total memory.
4810 * 4810 *
4811 * For now we avoid this problem by imposing a hard limit on the number 4811 * For now we avoid this problem by imposing a hard limit on the number
4812 * of delegations, which varies according to the server's memory size. 4812 * of delegations, which varies according to the server's memory size.
4813 */ 4813 */
4814 static void 4814 static void
4815 set_max_delegations(void) 4815 set_max_delegations(void)
4816 { 4816 {
4817 /* 4817 /*
4818 * Allow at most 4 delegations per megabyte of RAM. Quick 4818 * Allow at most 4 delegations per megabyte of RAM. Quick
4819 * estimates suggest that in the worst case (where every delegation 4819 * estimates suggest that in the worst case (where every delegation
4820 * is for a different inode), a delegation could take about 1.5K, 4820 * is for a different inode), a delegation could take about 1.5K,
4821 * giving a worst case usage of about 6% of memory. 4821 * giving a worst case usage of about 6% of memory.
4822 */ 4822 */
4823 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 4823 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4824 } 4824 }
4825 4825
4826 static int nfs4_state_create_net(struct net *net) 4826 static int nfs4_state_create_net(struct net *net)
4827 { 4827 {
4828 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4828 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4829 int i; 4829 int i;
4830 4830
4831 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) * 4831 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4832 CLIENT_HASH_SIZE, GFP_KERNEL); 4832 CLIENT_HASH_SIZE, GFP_KERNEL);
4833 if (!nn->conf_id_hashtbl) 4833 if (!nn->conf_id_hashtbl)
4834 goto err; 4834 goto err;
4835 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) * 4835 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4836 CLIENT_HASH_SIZE, GFP_KERNEL); 4836 CLIENT_HASH_SIZE, GFP_KERNEL);
4837 if (!nn->unconf_id_hashtbl) 4837 if (!nn->unconf_id_hashtbl)
4838 goto err_unconf_id; 4838 goto err_unconf_id;
4839 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) * 4839 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
4840 OWNER_HASH_SIZE, GFP_KERNEL); 4840 OWNER_HASH_SIZE, GFP_KERNEL);
4841 if (!nn->ownerstr_hashtbl) 4841 if (!nn->ownerstr_hashtbl)
4842 goto err_ownerstr; 4842 goto err_ownerstr;
4843 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) * 4843 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) *
4844 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL); 4844 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL);
4845 if (!nn->lockowner_ino_hashtbl) 4845 if (!nn->lockowner_ino_hashtbl)
4846 goto err_lockowner_ino; 4846 goto err_lockowner_ino;
4847 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * 4847 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
4848 SESSION_HASH_SIZE, GFP_KERNEL); 4848 SESSION_HASH_SIZE, GFP_KERNEL);
4849 if (!nn->sessionid_hashtbl) 4849 if (!nn->sessionid_hashtbl)
4850 goto err_sessionid; 4850 goto err_sessionid;
4851 4851
4852 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4852 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4853 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 4853 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
4854 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 4854 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
4855 } 4855 }
4856 for (i = 0; i < OWNER_HASH_SIZE; i++) 4856 for (i = 0; i < OWNER_HASH_SIZE; i++)
4857 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]); 4857 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
4858 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++) 4858 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
4859 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]); 4859 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]);
4860 for (i = 0; i < SESSION_HASH_SIZE; i++) 4860 for (i = 0; i < SESSION_HASH_SIZE; i++)
4861 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 4861 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
4862 nn->conf_name_tree = RB_ROOT; 4862 nn->conf_name_tree = RB_ROOT;
4863 nn->unconf_name_tree = RB_ROOT; 4863 nn->unconf_name_tree = RB_ROOT;
4864 INIT_LIST_HEAD(&nn->client_lru); 4864 INIT_LIST_HEAD(&nn->client_lru);
4865 INIT_LIST_HEAD(&nn->close_lru); 4865 INIT_LIST_HEAD(&nn->close_lru);
4866 spin_lock_init(&nn->client_lock); 4866 spin_lock_init(&nn->client_lock);
4867 4867
4868 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 4868 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
4869 get_net(net); 4869 get_net(net);
4870 4870
4871 return 0; 4871 return 0;
4872 4872
4873 err_sessionid: 4873 err_sessionid:
4874 kfree(nn->lockowner_ino_hashtbl); 4874 kfree(nn->lockowner_ino_hashtbl);
4875 err_lockowner_ino: 4875 err_lockowner_ino:
4876 kfree(nn->ownerstr_hashtbl); 4876 kfree(nn->ownerstr_hashtbl);
4877 err_ownerstr: 4877 err_ownerstr:
4878 kfree(nn->unconf_id_hashtbl); 4878 kfree(nn->unconf_id_hashtbl);
4879 err_unconf_id: 4879 err_unconf_id:
4880 kfree(nn->conf_id_hashtbl); 4880 kfree(nn->conf_id_hashtbl);
4881 err: 4881 err:
4882 return -ENOMEM; 4882 return -ENOMEM;
4883 } 4883 }
4884 4884
4885 static void 4885 static void
4886 nfs4_state_destroy_net(struct net *net) 4886 nfs4_state_destroy_net(struct net *net)
4887 { 4887 {
4888 int i; 4888 int i;
4889 struct nfs4_client *clp = NULL; 4889 struct nfs4_client *clp = NULL;
4890 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4890 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4891 struct rb_node *node, *tmp; 4891 struct rb_node *node, *tmp;
4892 4892
4893 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 4893 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4894 while (!list_empty(&nn->conf_id_hashtbl[i])) { 4894 while (!list_empty(&nn->conf_id_hashtbl[i])) {
4895 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 4895 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4896 destroy_client(clp); 4896 destroy_client(clp);
4897 } 4897 }
4898 } 4898 }
4899 4899
4900 node = rb_first(&nn->unconf_name_tree); 4900 node = rb_first(&nn->unconf_name_tree);
4901 while (node != NULL) { 4901 while (node != NULL) {
4902 tmp = node; 4902 tmp = node;
4903 node = rb_next(tmp); 4903 node = rb_next(tmp);
4904 clp = rb_entry(tmp, struct nfs4_client, cl_namenode); 4904 clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
4905 rb_erase(tmp, &nn->unconf_name_tree); 4905 rb_erase(tmp, &nn->unconf_name_tree);
4906 destroy_client(clp); 4906 destroy_client(clp);
4907 } 4907 }
4908 4908
4909 kfree(nn->sessionid_hashtbl); 4909 kfree(nn->sessionid_hashtbl);
4910 kfree(nn->lockowner_ino_hashtbl); 4910 kfree(nn->lockowner_ino_hashtbl);
4911 kfree(nn->ownerstr_hashtbl); 4911 kfree(nn->ownerstr_hashtbl);
4912 kfree(nn->unconf_id_hashtbl); 4912 kfree(nn->unconf_id_hashtbl);
4913 kfree(nn->conf_id_hashtbl); 4913 kfree(nn->conf_id_hashtbl);
4914 put_net(net); 4914 put_net(net);
4915 } 4915 }
4916 4916
4917 int 4917 int
4918 nfs4_state_start_net(struct net *net) 4918 nfs4_state_start_net(struct net *net)
4919 { 4919 {
4920 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4920 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4921 int ret; 4921 int ret;
4922 4922
4923 /* 4923 /*
4924 * FIXME: For now, we hang most of the pernet global stuff off of 4924 * FIXME: For now, we hang most of the pernet global stuff off of
4925 * init_net until nfsd is fully containerized. Eventually, we'll 4925 * init_net until nfsd is fully containerized. Eventually, we'll
4926 * need to pass a net pointer into this function, take a reference 4926 * need to pass a net pointer into this function, take a reference
4927 * to that instead and then do most of the rest of this on a per-net 4927 * to that instead and then do most of the rest of this on a per-net
4928 * basis. 4928 * basis.
4929 */ 4929 */
4930 if (net != &init_net) 4930 if (net != &init_net)
4931 return -EINVAL; 4931 return -EINVAL;
4932 4932
4933 ret = nfs4_state_create_net(net); 4933 ret = nfs4_state_create_net(net);
4934 if (ret) 4934 if (ret)
4935 return ret; 4935 return ret;
4936 nfsd4_client_tracking_init(net); 4936 nfsd4_client_tracking_init(net);
4937 nn->boot_time = get_seconds(); 4937 nn->boot_time = get_seconds();
4938 locks_start_grace(net, &nn->nfsd4_manager); 4938 locks_start_grace(net, &nn->nfsd4_manager);
4939 nn->grace_ended = false; 4939 nn->grace_ended = false;
4940 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", 4940 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
4941 nn->nfsd4_grace, net); 4941 nn->nfsd4_grace, net);
4942 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 4942 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
4943 return 0; 4943 return 0;
4944 } 4944 }
4945 4945
4946 /* initialization to perform when the nfsd service is started: */ 4946 /* initialization to perform when the nfsd service is started: */
4947 4947
4948 int 4948 int
4949 nfs4_state_start(void) 4949 nfs4_state_start(void)
4950 { 4950 {
4951 int ret; 4951 int ret;
4952 4952
4953 ret = set_callback_cred(); 4953 ret = set_callback_cred();
4954 if (ret) 4954 if (ret)
4955 return -ENOMEM; 4955 return -ENOMEM;
4956 laundry_wq = create_singlethread_workqueue("nfsd4"); 4956 laundry_wq = create_singlethread_workqueue("nfsd4");
4957 if (laundry_wq == NULL) { 4957 if (laundry_wq == NULL) {
4958 ret = -ENOMEM; 4958 ret = -ENOMEM;
4959 goto out_recovery; 4959 goto out_recovery;
4960 } 4960 }
4961 ret = nfsd4_create_callback_queue(); 4961 ret = nfsd4_create_callback_queue();
4962 if (ret) 4962 if (ret)
4963 goto out_free_laundry; 4963 goto out_free_laundry;
4964 4964
4965 set_max_delegations(); 4965 set_max_delegations();
4966 4966
4967 return 0; 4967 return 0;
4968 4968
4969 out_free_laundry: 4969 out_free_laundry:
4970 destroy_workqueue(laundry_wq); 4970 destroy_workqueue(laundry_wq);
4971 out_recovery: 4971 out_recovery:
4972 return ret; 4972 return ret;
4973 } 4973 }
4974 4974
4975 /* should be called with the state lock held */ 4975 /* should be called with the state lock held */
4976 void 4976 void
4977 nfs4_state_shutdown_net(struct net *net) 4977 nfs4_state_shutdown_net(struct net *net)
4978 { 4978 {
4979 struct nfs4_delegation *dp = NULL; 4979 struct nfs4_delegation *dp = NULL;
4980 struct list_head *pos, *next, reaplist; 4980 struct list_head *pos, *next, reaplist;
4981 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4981 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4982 4982
4983 cancel_delayed_work_sync(&nn->laundromat_work); 4983 cancel_delayed_work_sync(&nn->laundromat_work);
4984 locks_end_grace(&nn->nfsd4_manager); 4984 locks_end_grace(&nn->nfsd4_manager);
4985 4985
4986 INIT_LIST_HEAD(&reaplist); 4986 INIT_LIST_HEAD(&reaplist);
4987 spin_lock(&recall_lock); 4987 spin_lock(&recall_lock);
4988 list_for_each_safe(pos, next, &del_recall_lru) { 4988 list_for_each_safe(pos, next, &del_recall_lru) {
4989 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4989 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4990 if (dp->dl_stid.sc_client->net != net) 4990 if (dp->dl_stid.sc_client->net != net)
4991 continue; 4991 continue;
4992 list_move(&dp->dl_recall_lru, &reaplist); 4992 list_move(&dp->dl_recall_lru, &reaplist);
4993 } 4993 }
4994 spin_unlock(&recall_lock); 4994 spin_unlock(&recall_lock);
4995 list_for_each_safe(pos, next, &reaplist) { 4995 list_for_each_safe(pos, next, &reaplist) {
4996 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4996 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4997 unhash_delegation(dp); 4997 unhash_delegation(dp);
4998 } 4998 }
4999 4999
5000 nfsd4_client_tracking_exit(net); 5000 nfsd4_client_tracking_exit(net);
5001 nfs4_state_destroy_net(net); 5001 nfs4_state_destroy_net(net);
5002 } 5002 }
5003 5003
5004 void 5004 void
5005 nfs4_state_shutdown(void) 5005 nfs4_state_shutdown(void)
5006 { 5006 {
5007 destroy_workqueue(laundry_wq); 5007 destroy_workqueue(laundry_wq);
5008 nfsd4_destroy_callback_queue(); 5008 nfsd4_destroy_callback_queue();
5009 } 5009 }
5010 5010
5011 static void 5011 static void
5012 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 5012 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5013 { 5013 {
5014 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) 5014 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
5015 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 5015 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5016 } 5016 }
5017 5017
5018 static void 5018 static void
5019 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 5019 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5020 { 5020 {
5021 if (cstate->minorversion) { 5021 if (cstate->minorversion) {
5022 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 5022 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
5023 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 5023 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5024 } 5024 }
5025 } 5025 }
5026 5026
5027 void 5027 void
5028 clear_current_stateid(struct nfsd4_compound_state *cstate) 5028 clear_current_stateid(struct nfsd4_compound_state *cstate)
5029 { 5029 {
5030 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG); 5030 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5031 } 5031 }
5032 5032
5033 /* 5033 /*
5034 * functions to set current state id 5034 * functions to set current state id
5035 */ 5035 */
5036 void 5036 void
5037 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 5037 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5038 { 5038 {
5039 put_stateid(cstate, &odp->od_stateid); 5039 put_stateid(cstate, &odp->od_stateid);
5040 } 5040 }
5041 5041
5042 void 5042 void
5043 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) 5043 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
5044 { 5044 {
5045 put_stateid(cstate, &open->op_stateid); 5045 put_stateid(cstate, &open->op_stateid);
5046 } 5046 }
5047 5047
5048 void 5048 void
5049 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 5049 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5050 { 5050 {
5051 put_stateid(cstate, &close->cl_stateid); 5051 put_stateid(cstate, &close->cl_stateid);
5052 } 5052 }
5053 5053
5054 void 5054 void
5055 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock) 5055 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
5056 { 5056 {
5057 put_stateid(cstate, &lock->lk_resp_stateid); 5057 put_stateid(cstate, &lock->lk_resp_stateid);
5058 } 5058 }
5059 5059
5060 /* 5060 /*
5061 * functions to consume current state id 5061 * functions to consume current state id
5062 */ 5062 */
5063 5063
5064 void 5064 void
5065 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp) 5065 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5066 { 5066 {
5067 get_stateid(cstate, &odp->od_stateid); 5067 get_stateid(cstate, &odp->od_stateid);
5068 } 5068 }
5069 5069
5070 void 5070 void
5071 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp) 5071 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
5072 { 5072 {
5073 get_stateid(cstate, &drp->dr_stateid); 5073 get_stateid(cstate, &drp->dr_stateid);
5074 } 5074 }
5075 5075
5076 void 5076 void
5077 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp) 5077 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
5078 { 5078 {
5079 get_stateid(cstate, &fsp->fr_stateid); 5079 get_stateid(cstate, &fsp->fr_stateid);
5080 } 5080 }
5081 5081
5082 void 5082 void
5083 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) 5083 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
5084 { 5084 {
5085 get_stateid(cstate, &setattr->sa_stateid); 5085 get_stateid(cstate, &setattr->sa_stateid);
5086 } 5086 }
5087 5087
5088 void 5088 void
5089 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close) 5089 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5090 { 5090 {
5091 get_stateid(cstate, &close->cl_stateid); 5091 get_stateid(cstate, &close->cl_stateid);
5092 } 5092 }
5093 5093
5094 void 5094 void
5095 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku) 5095 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5096 { 5096 {
5097 get_stateid(cstate, &locku->lu_stateid); 5097 get_stateid(cstate, &locku->lu_stateid);
5098 } 5098 }
5099 5099
5100 void 5100 void
5101 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read) 5101 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
5102 { 5102 {
5103 get_stateid(cstate, &read->rd_stateid); 5103 get_stateid(cstate, &read->rd_stateid);
5104 } 5104 }
5105 5105
5106 void 5106 void
5107 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write) 5107 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5108 { 5108 {
5109 get_stateid(cstate, &write->wr_stateid); 5109 get_stateid(cstate, &write->wr_stateid);
5110 } 5110 }
5111 5111