Commit 41735818766c0ec215b9a69591e7eae642061954

Authored by Zhao Hongjiang
Committed by Al Viro
1 parent 9cc64ceaa8

fs: change return values from -EACCES to -EPERM

According to SUSv3:

[EACCES] Permission denied. An attempt was made to access a file in a way
forbidden by its file access permissions.

[EPERM] Operation not permitted. An attempt was made to perform an operation
limited to processes with appropriate privileges or to the owner of a file
or other resource.

So -EPERM should be returned if capability checks fails.

Strictly speaking this is an API change since the error code user sees is
altered.

Signed-off-by: Zhao Hongjiang <zhaohongjiang@huawei.com>
Acked-by: Jan Kara <jack@suse.cz>
Acked-by: Steven Whitehouse <swhiteho@redhat.com>
Acked-by: Ian Kent <raven@themaw.net>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 6 changed files with 16 additions and 16 deletions Inline Diff

1 /* -*- c -*- --------------------------------------------------------------- * 1 /* -*- c -*- --------------------------------------------------------------- *
2 * 2 *
3 * linux/fs/autofs/root.c 3 * linux/fs/autofs/root.c
4 * 4 *
5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved 5 * Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org> 6 * Copyright 1999-2000 Jeremy Fitzhardinge <jeremy@goop.org>
7 * Copyright 2001-2006 Ian Kent <raven@themaw.net> 7 * Copyright 2001-2006 Ian Kent <raven@themaw.net>
8 * 8 *
9 * This file is part of the Linux kernel and is made available under 9 * This file is part of the Linux kernel and is made available under
10 * the terms of the GNU General Public License, version 2, or at your 10 * the terms of the GNU General Public License, version 2, or at your
11 * option, any later version, incorporated herein by reference. 11 * option, any later version, incorporated herein by reference.
12 * 12 *
13 * ------------------------------------------------------------------------- */ 13 * ------------------------------------------------------------------------- */
14 14
15 #include <linux/capability.h> 15 #include <linux/capability.h>
16 #include <linux/errno.h> 16 #include <linux/errno.h>
17 #include <linux/stat.h> 17 #include <linux/stat.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/param.h> 19 #include <linux/param.h>
20 #include <linux/time.h> 20 #include <linux/time.h>
21 #include <linux/compat.h> 21 #include <linux/compat.h>
22 #include <linux/mutex.h> 22 #include <linux/mutex.h>
23 23
24 #include "autofs_i.h" 24 #include "autofs_i.h"
25 25
26 static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *); 26 static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
27 static int autofs4_dir_unlink(struct inode *,struct dentry *); 27 static int autofs4_dir_unlink(struct inode *,struct dentry *);
28 static int autofs4_dir_rmdir(struct inode *,struct dentry *); 28 static int autofs4_dir_rmdir(struct inode *,struct dentry *);
29 static int autofs4_dir_mkdir(struct inode *,struct dentry *,umode_t); 29 static int autofs4_dir_mkdir(struct inode *,struct dentry *,umode_t);
30 static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long); 30 static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
31 #ifdef CONFIG_COMPAT 31 #ifdef CONFIG_COMPAT
32 static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); 32 static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
33 #endif 33 #endif
34 static int autofs4_dir_open(struct inode *inode, struct file *file); 34 static int autofs4_dir_open(struct inode *inode, struct file *file);
35 static struct dentry *autofs4_lookup(struct inode *,struct dentry *, unsigned int); 35 static struct dentry *autofs4_lookup(struct inode *,struct dentry *, unsigned int);
36 static struct vfsmount *autofs4_d_automount(struct path *); 36 static struct vfsmount *autofs4_d_automount(struct path *);
37 static int autofs4_d_manage(struct dentry *, bool); 37 static int autofs4_d_manage(struct dentry *, bool);
38 static void autofs4_dentry_release(struct dentry *); 38 static void autofs4_dentry_release(struct dentry *);
39 39
40 const struct file_operations autofs4_root_operations = { 40 const struct file_operations autofs4_root_operations = {
41 .open = dcache_dir_open, 41 .open = dcache_dir_open,
42 .release = dcache_dir_close, 42 .release = dcache_dir_close,
43 .read = generic_read_dir, 43 .read = generic_read_dir,
44 .readdir = dcache_readdir, 44 .readdir = dcache_readdir,
45 .llseek = dcache_dir_lseek, 45 .llseek = dcache_dir_lseek,
46 .unlocked_ioctl = autofs4_root_ioctl, 46 .unlocked_ioctl = autofs4_root_ioctl,
47 #ifdef CONFIG_COMPAT 47 #ifdef CONFIG_COMPAT
48 .compat_ioctl = autofs4_root_compat_ioctl, 48 .compat_ioctl = autofs4_root_compat_ioctl,
49 #endif 49 #endif
50 }; 50 };
51 51
52 const struct file_operations autofs4_dir_operations = { 52 const struct file_operations autofs4_dir_operations = {
53 .open = autofs4_dir_open, 53 .open = autofs4_dir_open,
54 .release = dcache_dir_close, 54 .release = dcache_dir_close,
55 .read = generic_read_dir, 55 .read = generic_read_dir,
56 .readdir = dcache_readdir, 56 .readdir = dcache_readdir,
57 .llseek = dcache_dir_lseek, 57 .llseek = dcache_dir_lseek,
58 }; 58 };
59 59
60 const struct inode_operations autofs4_dir_inode_operations = { 60 const struct inode_operations autofs4_dir_inode_operations = {
61 .lookup = autofs4_lookup, 61 .lookup = autofs4_lookup,
62 .unlink = autofs4_dir_unlink, 62 .unlink = autofs4_dir_unlink,
63 .symlink = autofs4_dir_symlink, 63 .symlink = autofs4_dir_symlink,
64 .mkdir = autofs4_dir_mkdir, 64 .mkdir = autofs4_dir_mkdir,
65 .rmdir = autofs4_dir_rmdir, 65 .rmdir = autofs4_dir_rmdir,
66 }; 66 };
67 67
68 const struct dentry_operations autofs4_dentry_operations = { 68 const struct dentry_operations autofs4_dentry_operations = {
69 .d_automount = autofs4_d_automount, 69 .d_automount = autofs4_d_automount,
70 .d_manage = autofs4_d_manage, 70 .d_manage = autofs4_d_manage,
71 .d_release = autofs4_dentry_release, 71 .d_release = autofs4_dentry_release,
72 }; 72 };
73 73
74 static void autofs4_add_active(struct dentry *dentry) 74 static void autofs4_add_active(struct dentry *dentry)
75 { 75 {
76 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 76 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
77 struct autofs_info *ino = autofs4_dentry_ino(dentry); 77 struct autofs_info *ino = autofs4_dentry_ino(dentry);
78 if (ino) { 78 if (ino) {
79 spin_lock(&sbi->lookup_lock); 79 spin_lock(&sbi->lookup_lock);
80 if (!ino->active_count) { 80 if (!ino->active_count) {
81 if (list_empty(&ino->active)) 81 if (list_empty(&ino->active))
82 list_add(&ino->active, &sbi->active_list); 82 list_add(&ino->active, &sbi->active_list);
83 } 83 }
84 ino->active_count++; 84 ino->active_count++;
85 spin_unlock(&sbi->lookup_lock); 85 spin_unlock(&sbi->lookup_lock);
86 } 86 }
87 return; 87 return;
88 } 88 }
89 89
90 static void autofs4_del_active(struct dentry *dentry) 90 static void autofs4_del_active(struct dentry *dentry)
91 { 91 {
92 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 92 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
93 struct autofs_info *ino = autofs4_dentry_ino(dentry); 93 struct autofs_info *ino = autofs4_dentry_ino(dentry);
94 if (ino) { 94 if (ino) {
95 spin_lock(&sbi->lookup_lock); 95 spin_lock(&sbi->lookup_lock);
96 ino->active_count--; 96 ino->active_count--;
97 if (!ino->active_count) { 97 if (!ino->active_count) {
98 if (!list_empty(&ino->active)) 98 if (!list_empty(&ino->active))
99 list_del_init(&ino->active); 99 list_del_init(&ino->active);
100 } 100 }
101 spin_unlock(&sbi->lookup_lock); 101 spin_unlock(&sbi->lookup_lock);
102 } 102 }
103 return; 103 return;
104 } 104 }
105 105
106 static int autofs4_dir_open(struct inode *inode, struct file *file) 106 static int autofs4_dir_open(struct inode *inode, struct file *file)
107 { 107 {
108 struct dentry *dentry = file->f_path.dentry; 108 struct dentry *dentry = file->f_path.dentry;
109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
110 110
111 DPRINTK("file=%p dentry=%p %.*s", 111 DPRINTK("file=%p dentry=%p %.*s",
112 file, dentry, dentry->d_name.len, dentry->d_name.name); 112 file, dentry, dentry->d_name.len, dentry->d_name.name);
113 113
114 if (autofs4_oz_mode(sbi)) 114 if (autofs4_oz_mode(sbi))
115 goto out; 115 goto out;
116 116
117 /* 117 /*
118 * An empty directory in an autofs file system is always a 118 * An empty directory in an autofs file system is always a
119 * mount point. The daemon must have failed to mount this 119 * mount point. The daemon must have failed to mount this
120 * during lookup so it doesn't exist. This can happen, for 120 * during lookup so it doesn't exist. This can happen, for
121 * example, if user space returns an incorrect status for a 121 * example, if user space returns an incorrect status for a
122 * mount request. Otherwise we're doing a readdir on the 122 * mount request. Otherwise we're doing a readdir on the
123 * autofs file system so just let the libfs routines handle 123 * autofs file system so just let the libfs routines handle
124 * it. 124 * it.
125 */ 125 */
126 spin_lock(&sbi->lookup_lock); 126 spin_lock(&sbi->lookup_lock);
127 if (!d_mountpoint(dentry) && simple_empty(dentry)) { 127 if (!d_mountpoint(dentry) && simple_empty(dentry)) {
128 spin_unlock(&sbi->lookup_lock); 128 spin_unlock(&sbi->lookup_lock);
129 return -ENOENT; 129 return -ENOENT;
130 } 130 }
131 spin_unlock(&sbi->lookup_lock); 131 spin_unlock(&sbi->lookup_lock);
132 132
133 out: 133 out:
134 return dcache_dir_open(inode, file); 134 return dcache_dir_open(inode, file);
135 } 135 }
136 136
137 static void autofs4_dentry_release(struct dentry *de) 137 static void autofs4_dentry_release(struct dentry *de)
138 { 138 {
139 struct autofs_info *ino = autofs4_dentry_ino(de); 139 struct autofs_info *ino = autofs4_dentry_ino(de);
140 struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb); 140 struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb);
141 141
142 DPRINTK("releasing %p", de); 142 DPRINTK("releasing %p", de);
143 143
144 if (!ino) 144 if (!ino)
145 return; 145 return;
146 146
147 if (sbi) { 147 if (sbi) {
148 spin_lock(&sbi->lookup_lock); 148 spin_lock(&sbi->lookup_lock);
149 if (!list_empty(&ino->active)) 149 if (!list_empty(&ino->active))
150 list_del(&ino->active); 150 list_del(&ino->active);
151 if (!list_empty(&ino->expiring)) 151 if (!list_empty(&ino->expiring))
152 list_del(&ino->expiring); 152 list_del(&ino->expiring);
153 spin_unlock(&sbi->lookup_lock); 153 spin_unlock(&sbi->lookup_lock);
154 } 154 }
155 155
156 autofs4_free_ino(ino); 156 autofs4_free_ino(ino);
157 } 157 }
158 158
159 static struct dentry *autofs4_lookup_active(struct dentry *dentry) 159 static struct dentry *autofs4_lookup_active(struct dentry *dentry)
160 { 160 {
161 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 161 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
162 struct dentry *parent = dentry->d_parent; 162 struct dentry *parent = dentry->d_parent;
163 struct qstr *name = &dentry->d_name; 163 struct qstr *name = &dentry->d_name;
164 unsigned int len = name->len; 164 unsigned int len = name->len;
165 unsigned int hash = name->hash; 165 unsigned int hash = name->hash;
166 const unsigned char *str = name->name; 166 const unsigned char *str = name->name;
167 struct list_head *p, *head; 167 struct list_head *p, *head;
168 168
169 spin_lock(&sbi->lookup_lock); 169 spin_lock(&sbi->lookup_lock);
170 head = &sbi->active_list; 170 head = &sbi->active_list;
171 list_for_each(p, head) { 171 list_for_each(p, head) {
172 struct autofs_info *ino; 172 struct autofs_info *ino;
173 struct dentry *active; 173 struct dentry *active;
174 struct qstr *qstr; 174 struct qstr *qstr;
175 175
176 ino = list_entry(p, struct autofs_info, active); 176 ino = list_entry(p, struct autofs_info, active);
177 active = ino->dentry; 177 active = ino->dentry;
178 178
179 spin_lock(&active->d_lock); 179 spin_lock(&active->d_lock);
180 180
181 /* Already gone? */ 181 /* Already gone? */
182 if (active->d_count == 0) 182 if (active->d_count == 0)
183 goto next; 183 goto next;
184 184
185 qstr = &active->d_name; 185 qstr = &active->d_name;
186 186
187 if (active->d_name.hash != hash) 187 if (active->d_name.hash != hash)
188 goto next; 188 goto next;
189 if (active->d_parent != parent) 189 if (active->d_parent != parent)
190 goto next; 190 goto next;
191 191
192 if (qstr->len != len) 192 if (qstr->len != len)
193 goto next; 193 goto next;
194 if (memcmp(qstr->name, str, len)) 194 if (memcmp(qstr->name, str, len))
195 goto next; 195 goto next;
196 196
197 if (d_unhashed(active)) { 197 if (d_unhashed(active)) {
198 dget_dlock(active); 198 dget_dlock(active);
199 spin_unlock(&active->d_lock); 199 spin_unlock(&active->d_lock);
200 spin_unlock(&sbi->lookup_lock); 200 spin_unlock(&sbi->lookup_lock);
201 return active; 201 return active;
202 } 202 }
203 next: 203 next:
204 spin_unlock(&active->d_lock); 204 spin_unlock(&active->d_lock);
205 } 205 }
206 spin_unlock(&sbi->lookup_lock); 206 spin_unlock(&sbi->lookup_lock);
207 207
208 return NULL; 208 return NULL;
209 } 209 }
210 210
211 static struct dentry *autofs4_lookup_expiring(struct dentry *dentry) 211 static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
212 { 212 {
213 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 213 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
214 struct dentry *parent = dentry->d_parent; 214 struct dentry *parent = dentry->d_parent;
215 struct qstr *name = &dentry->d_name; 215 struct qstr *name = &dentry->d_name;
216 unsigned int len = name->len; 216 unsigned int len = name->len;
217 unsigned int hash = name->hash; 217 unsigned int hash = name->hash;
218 const unsigned char *str = name->name; 218 const unsigned char *str = name->name;
219 struct list_head *p, *head; 219 struct list_head *p, *head;
220 220
221 spin_lock(&sbi->lookup_lock); 221 spin_lock(&sbi->lookup_lock);
222 head = &sbi->expiring_list; 222 head = &sbi->expiring_list;
223 list_for_each(p, head) { 223 list_for_each(p, head) {
224 struct autofs_info *ino; 224 struct autofs_info *ino;
225 struct dentry *expiring; 225 struct dentry *expiring;
226 struct qstr *qstr; 226 struct qstr *qstr;
227 227
228 ino = list_entry(p, struct autofs_info, expiring); 228 ino = list_entry(p, struct autofs_info, expiring);
229 expiring = ino->dentry; 229 expiring = ino->dentry;
230 230
231 spin_lock(&expiring->d_lock); 231 spin_lock(&expiring->d_lock);
232 232
233 /* Bad luck, we've already been dentry_iput */ 233 /* Bad luck, we've already been dentry_iput */
234 if (!expiring->d_inode) 234 if (!expiring->d_inode)
235 goto next; 235 goto next;
236 236
237 qstr = &expiring->d_name; 237 qstr = &expiring->d_name;
238 238
239 if (expiring->d_name.hash != hash) 239 if (expiring->d_name.hash != hash)
240 goto next; 240 goto next;
241 if (expiring->d_parent != parent) 241 if (expiring->d_parent != parent)
242 goto next; 242 goto next;
243 243
244 if (qstr->len != len) 244 if (qstr->len != len)
245 goto next; 245 goto next;
246 if (memcmp(qstr->name, str, len)) 246 if (memcmp(qstr->name, str, len))
247 goto next; 247 goto next;
248 248
249 if (d_unhashed(expiring)) { 249 if (d_unhashed(expiring)) {
250 dget_dlock(expiring); 250 dget_dlock(expiring);
251 spin_unlock(&expiring->d_lock); 251 spin_unlock(&expiring->d_lock);
252 spin_unlock(&sbi->lookup_lock); 252 spin_unlock(&sbi->lookup_lock);
253 return expiring; 253 return expiring;
254 } 254 }
255 next: 255 next:
256 spin_unlock(&expiring->d_lock); 256 spin_unlock(&expiring->d_lock);
257 } 257 }
258 spin_unlock(&sbi->lookup_lock); 258 spin_unlock(&sbi->lookup_lock);
259 259
260 return NULL; 260 return NULL;
261 } 261 }
262 262
263 static int autofs4_mount_wait(struct dentry *dentry) 263 static int autofs4_mount_wait(struct dentry *dentry)
264 { 264 {
265 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 265 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
266 struct autofs_info *ino = autofs4_dentry_ino(dentry); 266 struct autofs_info *ino = autofs4_dentry_ino(dentry);
267 int status = 0; 267 int status = 0;
268 268
269 if (ino->flags & AUTOFS_INF_PENDING) { 269 if (ino->flags & AUTOFS_INF_PENDING) {
270 DPRINTK("waiting for mount name=%.*s", 270 DPRINTK("waiting for mount name=%.*s",
271 dentry->d_name.len, dentry->d_name.name); 271 dentry->d_name.len, dentry->d_name.name);
272 status = autofs4_wait(sbi, dentry, NFY_MOUNT); 272 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
273 DPRINTK("mount wait done status=%d", status); 273 DPRINTK("mount wait done status=%d", status);
274 } 274 }
275 ino->last_used = jiffies; 275 ino->last_used = jiffies;
276 return status; 276 return status;
277 } 277 }
278 278
279 static int do_expire_wait(struct dentry *dentry) 279 static int do_expire_wait(struct dentry *dentry)
280 { 280 {
281 struct dentry *expiring; 281 struct dentry *expiring;
282 282
283 expiring = autofs4_lookup_expiring(dentry); 283 expiring = autofs4_lookup_expiring(dentry);
284 if (!expiring) 284 if (!expiring)
285 return autofs4_expire_wait(dentry); 285 return autofs4_expire_wait(dentry);
286 else { 286 else {
287 /* 287 /*
288 * If we are racing with expire the request might not 288 * If we are racing with expire the request might not
289 * be quite complete, but the directory has been removed 289 * be quite complete, but the directory has been removed
290 * so it must have been successful, just wait for it. 290 * so it must have been successful, just wait for it.
291 */ 291 */
292 autofs4_expire_wait(expiring); 292 autofs4_expire_wait(expiring);
293 autofs4_del_expiring(expiring); 293 autofs4_del_expiring(expiring);
294 dput(expiring); 294 dput(expiring);
295 } 295 }
296 return 0; 296 return 0;
297 } 297 }
298 298
299 static struct dentry *autofs4_mountpoint_changed(struct path *path) 299 static struct dentry *autofs4_mountpoint_changed(struct path *path)
300 { 300 {
301 struct dentry *dentry = path->dentry; 301 struct dentry *dentry = path->dentry;
302 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 302 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
303 303
304 /* 304 /*
305 * If this is an indirect mount the dentry could have gone away 305 * If this is an indirect mount the dentry could have gone away
306 * as a result of an expire and a new one created. 306 * as a result of an expire and a new one created.
307 */ 307 */
308 if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { 308 if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
309 struct dentry *parent = dentry->d_parent; 309 struct dentry *parent = dentry->d_parent;
310 struct autofs_info *ino; 310 struct autofs_info *ino;
311 struct dentry *new = d_lookup(parent, &dentry->d_name); 311 struct dentry *new = d_lookup(parent, &dentry->d_name);
312 if (!new) 312 if (!new)
313 return NULL; 313 return NULL;
314 ino = autofs4_dentry_ino(new); 314 ino = autofs4_dentry_ino(new);
315 ino->last_used = jiffies; 315 ino->last_used = jiffies;
316 dput(path->dentry); 316 dput(path->dentry);
317 path->dentry = new; 317 path->dentry = new;
318 } 318 }
319 return path->dentry; 319 return path->dentry;
320 } 320 }
321 321
322 static struct vfsmount *autofs4_d_automount(struct path *path) 322 static struct vfsmount *autofs4_d_automount(struct path *path)
323 { 323 {
324 struct dentry *dentry = path->dentry; 324 struct dentry *dentry = path->dentry;
325 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 325 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
326 struct autofs_info *ino = autofs4_dentry_ino(dentry); 326 struct autofs_info *ino = autofs4_dentry_ino(dentry);
327 int status; 327 int status;
328 328
329 DPRINTK("dentry=%p %.*s", 329 DPRINTK("dentry=%p %.*s",
330 dentry, dentry->d_name.len, dentry->d_name.name); 330 dentry, dentry->d_name.len, dentry->d_name.name);
331 331
332 /* The daemon never triggers a mount. */ 332 /* The daemon never triggers a mount. */
333 if (autofs4_oz_mode(sbi)) 333 if (autofs4_oz_mode(sbi))
334 return NULL; 334 return NULL;
335 335
336 /* 336 /*
337 * If an expire request is pending everyone must wait. 337 * If an expire request is pending everyone must wait.
338 * If the expire fails we're still mounted so continue 338 * If the expire fails we're still mounted so continue
339 * the follow and return. A return of -EAGAIN (which only 339 * the follow and return. A return of -EAGAIN (which only
340 * happens with indirect mounts) means the expire completed 340 * happens with indirect mounts) means the expire completed
341 * and the directory was removed, so just go ahead and try 341 * and the directory was removed, so just go ahead and try
342 * the mount. 342 * the mount.
343 */ 343 */
344 status = do_expire_wait(dentry); 344 status = do_expire_wait(dentry);
345 if (status && status != -EAGAIN) 345 if (status && status != -EAGAIN)
346 return NULL; 346 return NULL;
347 347
348 /* Callback to the daemon to perform the mount or wait */ 348 /* Callback to the daemon to perform the mount or wait */
349 spin_lock(&sbi->fs_lock); 349 spin_lock(&sbi->fs_lock);
350 if (ino->flags & AUTOFS_INF_PENDING) { 350 if (ino->flags & AUTOFS_INF_PENDING) {
351 spin_unlock(&sbi->fs_lock); 351 spin_unlock(&sbi->fs_lock);
352 status = autofs4_mount_wait(dentry); 352 status = autofs4_mount_wait(dentry);
353 if (status) 353 if (status)
354 return ERR_PTR(status); 354 return ERR_PTR(status);
355 goto done; 355 goto done;
356 } 356 }
357 357
358 /* 358 /*
359 * If the dentry is a symlink it's equivalent to a directory 359 * If the dentry is a symlink it's equivalent to a directory
360 * having d_mountpoint() true, so there's no need to call back 360 * having d_mountpoint() true, so there's no need to call back
361 * to the daemon. 361 * to the daemon.
362 */ 362 */
363 if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 363 if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
364 spin_unlock(&sbi->fs_lock); 364 spin_unlock(&sbi->fs_lock);
365 goto done; 365 goto done;
366 } 366 }
367 367
368 if (!d_mountpoint(dentry)) { 368 if (!d_mountpoint(dentry)) {
369 /* 369 /*
370 * It's possible that user space hasn't removed directories 370 * It's possible that user space hasn't removed directories
371 * after umounting a rootless multi-mount, although it 371 * after umounting a rootless multi-mount, although it
372 * should. For v5 have_submounts() is sufficient to handle 372 * should. For v5 have_submounts() is sufficient to handle
373 * this because the leaves of the directory tree under the 373 * this because the leaves of the directory tree under the
374 * mount never trigger mounts themselves (they have an autofs 374 * mount never trigger mounts themselves (they have an autofs
375 * trigger mount mounted on them). But v4 pseudo direct mounts 375 * trigger mount mounted on them). But v4 pseudo direct mounts
376 * do need the leaves to to trigger mounts. In this case we 376 * do need the leaves to to trigger mounts. In this case we
377 * have no choice but to use the list_empty() check and 377 * have no choice but to use the list_empty() check and
378 * require user space behave. 378 * require user space behave.
379 */ 379 */
380 if (sbi->version > 4) { 380 if (sbi->version > 4) {
381 if (have_submounts(dentry)) { 381 if (have_submounts(dentry)) {
382 spin_unlock(&sbi->fs_lock); 382 spin_unlock(&sbi->fs_lock);
383 goto done; 383 goto done;
384 } 384 }
385 } else { 385 } else {
386 if (!simple_empty(dentry)) 386 if (!simple_empty(dentry))
387 goto done; 387 goto done;
388 } 388 }
389 ino->flags |= AUTOFS_INF_PENDING; 389 ino->flags |= AUTOFS_INF_PENDING;
390 spin_unlock(&sbi->fs_lock); 390 spin_unlock(&sbi->fs_lock);
391 status = autofs4_mount_wait(dentry); 391 status = autofs4_mount_wait(dentry);
392 spin_lock(&sbi->fs_lock); 392 spin_lock(&sbi->fs_lock);
393 ino->flags &= ~AUTOFS_INF_PENDING; 393 ino->flags &= ~AUTOFS_INF_PENDING;
394 if (status) { 394 if (status) {
395 spin_unlock(&sbi->fs_lock); 395 spin_unlock(&sbi->fs_lock);
396 return ERR_PTR(status); 396 return ERR_PTR(status);
397 } 397 }
398 } 398 }
399 spin_unlock(&sbi->fs_lock); 399 spin_unlock(&sbi->fs_lock);
400 done: 400 done:
401 /* Mount succeeded, check if we ended up with a new dentry */ 401 /* Mount succeeded, check if we ended up with a new dentry */
402 dentry = autofs4_mountpoint_changed(path); 402 dentry = autofs4_mountpoint_changed(path);
403 if (!dentry) 403 if (!dentry)
404 return ERR_PTR(-ENOENT); 404 return ERR_PTR(-ENOENT);
405 405
406 return NULL; 406 return NULL;
407 } 407 }
408 408
409 int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) 409 int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
410 { 410 {
411 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 411 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
412 struct autofs_info *ino = autofs4_dentry_ino(dentry); 412 struct autofs_info *ino = autofs4_dentry_ino(dentry);
413 int status; 413 int status;
414 414
415 DPRINTK("dentry=%p %.*s", 415 DPRINTK("dentry=%p %.*s",
416 dentry, dentry->d_name.len, dentry->d_name.name); 416 dentry, dentry->d_name.len, dentry->d_name.name);
417 417
418 /* The daemon never waits. */ 418 /* The daemon never waits. */
419 if (autofs4_oz_mode(sbi)) { 419 if (autofs4_oz_mode(sbi)) {
420 if (rcu_walk) 420 if (rcu_walk)
421 return 0; 421 return 0;
422 if (!d_mountpoint(dentry)) 422 if (!d_mountpoint(dentry))
423 return -EISDIR; 423 return -EISDIR;
424 return 0; 424 return 0;
425 } 425 }
426 426
427 /* We need to sleep, so we need pathwalk to be in ref-mode */ 427 /* We need to sleep, so we need pathwalk to be in ref-mode */
428 if (rcu_walk) 428 if (rcu_walk)
429 return -ECHILD; 429 return -ECHILD;
430 430
431 /* Wait for pending expires */ 431 /* Wait for pending expires */
432 do_expire_wait(dentry); 432 do_expire_wait(dentry);
433 433
434 /* 434 /*
435 * This dentry may be under construction so wait on mount 435 * This dentry may be under construction so wait on mount
436 * completion. 436 * completion.
437 */ 437 */
438 status = autofs4_mount_wait(dentry); 438 status = autofs4_mount_wait(dentry);
439 if (status) 439 if (status)
440 return status; 440 return status;
441 441
442 spin_lock(&sbi->fs_lock); 442 spin_lock(&sbi->fs_lock);
443 /* 443 /*
444 * If the dentry has been selected for expire while we slept 444 * If the dentry has been selected for expire while we slept
445 * on the lock then it might go away. We'll deal with that in 445 * on the lock then it might go away. We'll deal with that in
446 * ->d_automount() and wait on a new mount if the expire 446 * ->d_automount() and wait on a new mount if the expire
447 * succeeds or return here if it doesn't (since there's no 447 * succeeds or return here if it doesn't (since there's no
448 * mount to follow with a rootless multi-mount). 448 * mount to follow with a rootless multi-mount).
449 */ 449 */
450 if (!(ino->flags & AUTOFS_INF_EXPIRING)) { 450 if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
451 /* 451 /*
452 * Any needed mounting has been completed and the path 452 * Any needed mounting has been completed and the path
453 * updated so check if this is a rootless multi-mount so 453 * updated so check if this is a rootless multi-mount so
454 * we can avoid needless calls ->d_automount() and avoid 454 * we can avoid needless calls ->d_automount() and avoid
455 * an incorrect ELOOP error return. 455 * an incorrect ELOOP error return.
456 */ 456 */
457 if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || 457 if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
458 (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) 458 (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
459 status = -EISDIR; 459 status = -EISDIR;
460 } 460 }
461 spin_unlock(&sbi->fs_lock); 461 spin_unlock(&sbi->fs_lock);
462 462
463 return status; 463 return status;
464 } 464 }
465 465
466 /* Lookups in the root directory */ 466 /* Lookups in the root directory */
467 static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 467 static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
468 { 468 {
469 struct autofs_sb_info *sbi; 469 struct autofs_sb_info *sbi;
470 struct autofs_info *ino; 470 struct autofs_info *ino;
471 struct dentry *active; 471 struct dentry *active;
472 472
473 DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name); 473 DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name);
474 474
475 /* File name too long to exist */ 475 /* File name too long to exist */
476 if (dentry->d_name.len > NAME_MAX) 476 if (dentry->d_name.len > NAME_MAX)
477 return ERR_PTR(-ENAMETOOLONG); 477 return ERR_PTR(-ENAMETOOLONG);
478 478
479 sbi = autofs4_sbi(dir->i_sb); 479 sbi = autofs4_sbi(dir->i_sb);
480 480
481 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 481 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
482 current->pid, task_pgrp_nr(current), sbi->catatonic, 482 current->pid, task_pgrp_nr(current), sbi->catatonic,
483 autofs4_oz_mode(sbi)); 483 autofs4_oz_mode(sbi));
484 484
485 active = autofs4_lookup_active(dentry); 485 active = autofs4_lookup_active(dentry);
486 if (active) { 486 if (active) {
487 return active; 487 return active;
488 } else { 488 } else {
489 /* 489 /*
490 * A dentry that is not within the root can never trigger a 490 * A dentry that is not within the root can never trigger a
491 * mount operation, unless the directory already exists, so we 491 * mount operation, unless the directory already exists, so we
492 * can return fail immediately. The daemon however does need 492 * can return fail immediately. The daemon however does need
493 * to create directories within the file system. 493 * to create directories within the file system.
494 */ 494 */
495 if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent)) 495 if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent))
496 return ERR_PTR(-ENOENT); 496 return ERR_PTR(-ENOENT);
497 497
498 /* Mark entries in the root as mount triggers */ 498 /* Mark entries in the root as mount triggers */
499 if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) 499 if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent))
500 __managed_dentry_set_managed(dentry); 500 __managed_dentry_set_managed(dentry);
501 501
502 ino = autofs4_new_ino(sbi); 502 ino = autofs4_new_ino(sbi);
503 if (!ino) 503 if (!ino)
504 return ERR_PTR(-ENOMEM); 504 return ERR_PTR(-ENOMEM);
505 505
506 dentry->d_fsdata = ino; 506 dentry->d_fsdata = ino;
507 ino->dentry = dentry; 507 ino->dentry = dentry;
508 508
509 autofs4_add_active(dentry); 509 autofs4_add_active(dentry);
510 510
511 d_instantiate(dentry, NULL); 511 d_instantiate(dentry, NULL);
512 } 512 }
513 return NULL; 513 return NULL;
514 } 514 }
515 515
516 static int autofs4_dir_symlink(struct inode *dir, 516 static int autofs4_dir_symlink(struct inode *dir,
517 struct dentry *dentry, 517 struct dentry *dentry,
518 const char *symname) 518 const char *symname)
519 { 519 {
520 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 520 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
521 struct autofs_info *ino = autofs4_dentry_ino(dentry); 521 struct autofs_info *ino = autofs4_dentry_ino(dentry);
522 struct autofs_info *p_ino; 522 struct autofs_info *p_ino;
523 struct inode *inode; 523 struct inode *inode;
524 size_t size = strlen(symname); 524 size_t size = strlen(symname);
525 char *cp; 525 char *cp;
526 526
527 DPRINTK("%s <- %.*s", symname, 527 DPRINTK("%s <- %.*s", symname,
528 dentry->d_name.len, dentry->d_name.name); 528 dentry->d_name.len, dentry->d_name.name);
529 529
530 if (!autofs4_oz_mode(sbi)) 530 if (!autofs4_oz_mode(sbi))
531 return -EACCES; 531 return -EACCES;
532 532
533 BUG_ON(!ino); 533 BUG_ON(!ino);
534 534
535 autofs4_clean_ino(ino); 535 autofs4_clean_ino(ino);
536 536
537 autofs4_del_active(dentry); 537 autofs4_del_active(dentry);
538 538
539 cp = kmalloc(size + 1, GFP_KERNEL); 539 cp = kmalloc(size + 1, GFP_KERNEL);
540 if (!cp) 540 if (!cp)
541 return -ENOMEM; 541 return -ENOMEM;
542 542
543 strcpy(cp, symname); 543 strcpy(cp, symname);
544 544
545 inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555); 545 inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
546 if (!inode) { 546 if (!inode) {
547 kfree(cp); 547 kfree(cp);
548 if (!dentry->d_fsdata) 548 if (!dentry->d_fsdata)
549 kfree(ino); 549 kfree(ino);
550 return -ENOMEM; 550 return -ENOMEM;
551 } 551 }
552 inode->i_private = cp; 552 inode->i_private = cp;
553 inode->i_size = size; 553 inode->i_size = size;
554 d_add(dentry, inode); 554 d_add(dentry, inode);
555 555
556 dget(dentry); 556 dget(dentry);
557 atomic_inc(&ino->count); 557 atomic_inc(&ino->count);
558 p_ino = autofs4_dentry_ino(dentry->d_parent); 558 p_ino = autofs4_dentry_ino(dentry->d_parent);
559 if (p_ino && dentry->d_parent != dentry) 559 if (p_ino && dentry->d_parent != dentry)
560 atomic_inc(&p_ino->count); 560 atomic_inc(&p_ino->count);
561 561
562 dir->i_mtime = CURRENT_TIME; 562 dir->i_mtime = CURRENT_TIME;
563 563
564 return 0; 564 return 0;
565 } 565 }
566 566
567 /* 567 /*
568 * NOTE! 568 * NOTE!
569 * 569 *
570 * Normal filesystems would do a "d_delete()" to tell the VFS dcache 570 * Normal filesystems would do a "d_delete()" to tell the VFS dcache
571 * that the file no longer exists. However, doing that means that the 571 * that the file no longer exists. However, doing that means that the
572 * VFS layer can turn the dentry into a negative dentry. We don't want 572 * VFS layer can turn the dentry into a negative dentry. We don't want
573 * this, because the unlink is probably the result of an expire. 573 * this, because the unlink is probably the result of an expire.
574 * We simply d_drop it and add it to a expiring list in the super block, 574 * We simply d_drop it and add it to a expiring list in the super block,
575 * which allows the dentry lookup to check for an incomplete expire. 575 * which allows the dentry lookup to check for an incomplete expire.
576 * 576 *
577 * If a process is blocked on the dentry waiting for the expire to finish, 577 * If a process is blocked on the dentry waiting for the expire to finish,
578 * it will invalidate the dentry and try to mount with a new one. 578 * it will invalidate the dentry and try to mount with a new one.
579 * 579 *
580 * Also see autofs4_dir_rmdir().. 580 * Also see autofs4_dir_rmdir()..
581 */ 581 */
582 static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) 582 static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
583 { 583 {
584 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 584 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
585 struct autofs_info *ino = autofs4_dentry_ino(dentry); 585 struct autofs_info *ino = autofs4_dentry_ino(dentry);
586 struct autofs_info *p_ino; 586 struct autofs_info *p_ino;
587 587
588 /* This allows root to remove symlinks */ 588 /* This allows root to remove symlinks */
589 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) 589 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
590 return -EACCES; 590 return -EPERM;
591 591
592 if (atomic_dec_and_test(&ino->count)) { 592 if (atomic_dec_and_test(&ino->count)) {
593 p_ino = autofs4_dentry_ino(dentry->d_parent); 593 p_ino = autofs4_dentry_ino(dentry->d_parent);
594 if (p_ino && dentry->d_parent != dentry) 594 if (p_ino && dentry->d_parent != dentry)
595 atomic_dec(&p_ino->count); 595 atomic_dec(&p_ino->count);
596 } 596 }
597 dput(ino->dentry); 597 dput(ino->dentry);
598 598
599 dentry->d_inode->i_size = 0; 599 dentry->d_inode->i_size = 0;
600 clear_nlink(dentry->d_inode); 600 clear_nlink(dentry->d_inode);
601 601
602 dir->i_mtime = CURRENT_TIME; 602 dir->i_mtime = CURRENT_TIME;
603 603
604 spin_lock(&sbi->lookup_lock); 604 spin_lock(&sbi->lookup_lock);
605 __autofs4_add_expiring(dentry); 605 __autofs4_add_expiring(dentry);
606 d_drop(dentry); 606 d_drop(dentry);
607 spin_unlock(&sbi->lookup_lock); 607 spin_unlock(&sbi->lookup_lock);
608 608
609 return 0; 609 return 0;
610 } 610 }
611 611
612 /* 612 /*
613 * Version 4 of autofs provides a pseudo direct mount implementation 613 * Version 4 of autofs provides a pseudo direct mount implementation
614 * that relies on directories at the leaves of a directory tree under 614 * that relies on directories at the leaves of a directory tree under
615 * an indirect mount to trigger mounts. To allow for this we need to 615 * an indirect mount to trigger mounts. To allow for this we need to
616 * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves 616 * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves
617 * of the directory tree. There is no need to clear the automount flag 617 * of the directory tree. There is no need to clear the automount flag
618 * following a mount or restore it after an expire because these mounts 618 * following a mount or restore it after an expire because these mounts
619 * are always covered. However, it is necessary to ensure that these 619 * are always covered. However, it is necessary to ensure that these
620 * flags are clear on non-empty directories to avoid unnecessary calls 620 * flags are clear on non-empty directories to avoid unnecessary calls
621 * during path walks. 621 * during path walks.
622 */ 622 */
623 static void autofs_set_leaf_automount_flags(struct dentry *dentry) 623 static void autofs_set_leaf_automount_flags(struct dentry *dentry)
624 { 624 {
625 struct dentry *parent; 625 struct dentry *parent;
626 626
627 /* root and dentrys in the root are already handled */ 627 /* root and dentrys in the root are already handled */
628 if (IS_ROOT(dentry->d_parent)) 628 if (IS_ROOT(dentry->d_parent))
629 return; 629 return;
630 630
631 managed_dentry_set_managed(dentry); 631 managed_dentry_set_managed(dentry);
632 632
633 parent = dentry->d_parent; 633 parent = dentry->d_parent;
634 /* only consider parents below dentrys in the root */ 634 /* only consider parents below dentrys in the root */
635 if (IS_ROOT(parent->d_parent)) 635 if (IS_ROOT(parent->d_parent))
636 return; 636 return;
637 managed_dentry_clear_managed(parent); 637 managed_dentry_clear_managed(parent);
638 return; 638 return;
639 } 639 }
640 640
641 static void autofs_clear_leaf_automount_flags(struct dentry *dentry) 641 static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
642 { 642 {
643 struct list_head *d_child; 643 struct list_head *d_child;
644 struct dentry *parent; 644 struct dentry *parent;
645 645
646 /* flags for dentrys in the root are handled elsewhere */ 646 /* flags for dentrys in the root are handled elsewhere */
647 if (IS_ROOT(dentry->d_parent)) 647 if (IS_ROOT(dentry->d_parent))
648 return; 648 return;
649 649
650 managed_dentry_clear_managed(dentry); 650 managed_dentry_clear_managed(dentry);
651 651
652 parent = dentry->d_parent; 652 parent = dentry->d_parent;
653 /* only consider parents below dentrys in the root */ 653 /* only consider parents below dentrys in the root */
654 if (IS_ROOT(parent->d_parent)) 654 if (IS_ROOT(parent->d_parent))
655 return; 655 return;
656 d_child = &dentry->d_u.d_child; 656 d_child = &dentry->d_u.d_child;
657 /* Set parent managed if it's becoming empty */ 657 /* Set parent managed if it's becoming empty */
658 if (d_child->next == &parent->d_subdirs && 658 if (d_child->next == &parent->d_subdirs &&
659 d_child->prev == &parent->d_subdirs) 659 d_child->prev == &parent->d_subdirs)
660 managed_dentry_set_managed(parent); 660 managed_dentry_set_managed(parent);
661 return; 661 return;
662 } 662 }
663 663
664 static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) 664 static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
665 { 665 {
666 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 666 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
667 struct autofs_info *ino = autofs4_dentry_ino(dentry); 667 struct autofs_info *ino = autofs4_dentry_ino(dentry);
668 struct autofs_info *p_ino; 668 struct autofs_info *p_ino;
669 669
670 DPRINTK("dentry %p, removing %.*s", 670 DPRINTK("dentry %p, removing %.*s",
671 dentry, dentry->d_name.len, dentry->d_name.name); 671 dentry, dentry->d_name.len, dentry->d_name.name);
672 672
673 if (!autofs4_oz_mode(sbi)) 673 if (!autofs4_oz_mode(sbi))
674 return -EACCES; 674 return -EACCES;
675 675
676 spin_lock(&sbi->lookup_lock); 676 spin_lock(&sbi->lookup_lock);
677 if (!simple_empty(dentry)) { 677 if (!simple_empty(dentry)) {
678 spin_unlock(&sbi->lookup_lock); 678 spin_unlock(&sbi->lookup_lock);
679 return -ENOTEMPTY; 679 return -ENOTEMPTY;
680 } 680 }
681 __autofs4_add_expiring(dentry); 681 __autofs4_add_expiring(dentry);
682 d_drop(dentry); 682 d_drop(dentry);
683 spin_unlock(&sbi->lookup_lock); 683 spin_unlock(&sbi->lookup_lock);
684 684
685 if (sbi->version < 5) 685 if (sbi->version < 5)
686 autofs_clear_leaf_automount_flags(dentry); 686 autofs_clear_leaf_automount_flags(dentry);
687 687
688 if (atomic_dec_and_test(&ino->count)) { 688 if (atomic_dec_and_test(&ino->count)) {
689 p_ino = autofs4_dentry_ino(dentry->d_parent); 689 p_ino = autofs4_dentry_ino(dentry->d_parent);
690 if (p_ino && dentry->d_parent != dentry) 690 if (p_ino && dentry->d_parent != dentry)
691 atomic_dec(&p_ino->count); 691 atomic_dec(&p_ino->count);
692 } 692 }
693 dput(ino->dentry); 693 dput(ino->dentry);
694 dentry->d_inode->i_size = 0; 694 dentry->d_inode->i_size = 0;
695 clear_nlink(dentry->d_inode); 695 clear_nlink(dentry->d_inode);
696 696
697 if (dir->i_nlink) 697 if (dir->i_nlink)
698 drop_nlink(dir); 698 drop_nlink(dir);
699 699
700 return 0; 700 return 0;
701 } 701 }
702 702
703 static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 703 static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
704 { 704 {
705 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 705 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
706 struct autofs_info *ino = autofs4_dentry_ino(dentry); 706 struct autofs_info *ino = autofs4_dentry_ino(dentry);
707 struct autofs_info *p_ino; 707 struct autofs_info *p_ino;
708 struct inode *inode; 708 struct inode *inode;
709 709
710 if (!autofs4_oz_mode(sbi)) 710 if (!autofs4_oz_mode(sbi))
711 return -EACCES; 711 return -EACCES;
712 712
713 DPRINTK("dentry %p, creating %.*s", 713 DPRINTK("dentry %p, creating %.*s",
714 dentry, dentry->d_name.len, dentry->d_name.name); 714 dentry, dentry->d_name.len, dentry->d_name.name);
715 715
716 BUG_ON(!ino); 716 BUG_ON(!ino);
717 717
718 autofs4_clean_ino(ino); 718 autofs4_clean_ino(ino);
719 719
720 autofs4_del_active(dentry); 720 autofs4_del_active(dentry);
721 721
722 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); 722 inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
723 if (!inode) 723 if (!inode)
724 return -ENOMEM; 724 return -ENOMEM;
725 d_add(dentry, inode); 725 d_add(dentry, inode);
726 726
727 if (sbi->version < 5) 727 if (sbi->version < 5)
728 autofs_set_leaf_automount_flags(dentry); 728 autofs_set_leaf_automount_flags(dentry);
729 729
730 dget(dentry); 730 dget(dentry);
731 atomic_inc(&ino->count); 731 atomic_inc(&ino->count);
732 p_ino = autofs4_dentry_ino(dentry->d_parent); 732 p_ino = autofs4_dentry_ino(dentry->d_parent);
733 if (p_ino && dentry->d_parent != dentry) 733 if (p_ino && dentry->d_parent != dentry)
734 atomic_inc(&p_ino->count); 734 atomic_inc(&p_ino->count);
735 inc_nlink(dir); 735 inc_nlink(dir);
736 dir->i_mtime = CURRENT_TIME; 736 dir->i_mtime = CURRENT_TIME;
737 737
738 return 0; 738 return 0;
739 } 739 }
740 740
741 /* Get/set timeout ioctl() operation */ 741 /* Get/set timeout ioctl() operation */
742 #ifdef CONFIG_COMPAT 742 #ifdef CONFIG_COMPAT
743 static inline int autofs4_compat_get_set_timeout(struct autofs_sb_info *sbi, 743 static inline int autofs4_compat_get_set_timeout(struct autofs_sb_info *sbi,
744 compat_ulong_t __user *p) 744 compat_ulong_t __user *p)
745 { 745 {
746 int rv; 746 int rv;
747 unsigned long ntimeout; 747 unsigned long ntimeout;
748 748
749 if ((rv = get_user(ntimeout, p)) || 749 if ((rv = get_user(ntimeout, p)) ||
750 (rv = put_user(sbi->exp_timeout/HZ, p))) 750 (rv = put_user(sbi->exp_timeout/HZ, p)))
751 return rv; 751 return rv;
752 752
753 if (ntimeout > UINT_MAX/HZ) 753 if (ntimeout > UINT_MAX/HZ)
754 sbi->exp_timeout = 0; 754 sbi->exp_timeout = 0;
755 else 755 else
756 sbi->exp_timeout = ntimeout * HZ; 756 sbi->exp_timeout = ntimeout * HZ;
757 757
758 return 0; 758 return 0;
759 } 759 }
760 #endif 760 #endif
761 761
762 static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi, 762 static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi,
763 unsigned long __user *p) 763 unsigned long __user *p)
764 { 764 {
765 int rv; 765 int rv;
766 unsigned long ntimeout; 766 unsigned long ntimeout;
767 767
768 if ((rv = get_user(ntimeout, p)) || 768 if ((rv = get_user(ntimeout, p)) ||
769 (rv = put_user(sbi->exp_timeout/HZ, p))) 769 (rv = put_user(sbi->exp_timeout/HZ, p)))
770 return rv; 770 return rv;
771 771
772 if (ntimeout > ULONG_MAX/HZ) 772 if (ntimeout > ULONG_MAX/HZ)
773 sbi->exp_timeout = 0; 773 sbi->exp_timeout = 0;
774 else 774 else
775 sbi->exp_timeout = ntimeout * HZ; 775 sbi->exp_timeout = ntimeout * HZ;
776 776
777 return 0; 777 return 0;
778 } 778 }
779 779
780 /* Return protocol version */ 780 /* Return protocol version */
781 static inline int autofs4_get_protover(struct autofs_sb_info *sbi, int __user *p) 781 static inline int autofs4_get_protover(struct autofs_sb_info *sbi, int __user *p)
782 { 782 {
783 return put_user(sbi->version, p); 783 return put_user(sbi->version, p);
784 } 784 }
785 785
786 /* Return protocol sub version */ 786 /* Return protocol sub version */
787 static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi, int __user *p) 787 static inline int autofs4_get_protosubver(struct autofs_sb_info *sbi, int __user *p)
788 { 788 {
789 return put_user(sbi->sub_version, p); 789 return put_user(sbi->sub_version, p);
790 } 790 }
791 791
792 /* 792 /*
793 * Tells the daemon whether it can umount the autofs mount. 793 * Tells the daemon whether it can umount the autofs mount.
794 */ 794 */
795 static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) 795 static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
796 { 796 {
797 int status = 0; 797 int status = 0;
798 798
799 if (may_umount(mnt)) 799 if (may_umount(mnt))
800 status = 1; 800 status = 1;
801 801
802 DPRINTK("returning %d", status); 802 DPRINTK("returning %d", status);
803 803
804 status = put_user(status, p); 804 status = put_user(status, p);
805 805
806 return status; 806 return status;
807 } 807 }
808 808
809 /* Identify autofs4_dentries - this is so we can tell if there's 809 /* Identify autofs4_dentries - this is so we can tell if there's
810 an extra dentry refcount or not. We only hold a refcount on the 810 an extra dentry refcount or not. We only hold a refcount on the
811 dentry if its non-negative (ie, d_inode != NULL) 811 dentry if its non-negative (ie, d_inode != NULL)
812 */ 812 */
813 int is_autofs4_dentry(struct dentry *dentry) 813 int is_autofs4_dentry(struct dentry *dentry)
814 { 814 {
815 return dentry && dentry->d_inode && 815 return dentry && dentry->d_inode &&
816 dentry->d_op == &autofs4_dentry_operations && 816 dentry->d_op == &autofs4_dentry_operations &&
817 dentry->d_fsdata != NULL; 817 dentry->d_fsdata != NULL;
818 } 818 }
819 819
820 /* 820 /*
821 * ioctl()'s on the root directory is the chief method for the daemon to 821 * ioctl()'s on the root directory is the chief method for the daemon to
822 * generate kernel reactions 822 * generate kernel reactions
823 */ 823 */
824 static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp, 824 static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
825 unsigned int cmd, unsigned long arg) 825 unsigned int cmd, unsigned long arg)
826 { 826 {
827 struct autofs_sb_info *sbi = autofs4_sbi(inode->i_sb); 827 struct autofs_sb_info *sbi = autofs4_sbi(inode->i_sb);
828 void __user *p = (void __user *)arg; 828 void __user *p = (void __user *)arg;
829 829
830 DPRINTK("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u", 830 DPRINTK("cmd = 0x%08x, arg = 0x%08lx, sbi = %p, pgrp = %u",
831 cmd,arg,sbi,task_pgrp_nr(current)); 831 cmd,arg,sbi,task_pgrp_nr(current));
832 832
833 if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) || 833 if (_IOC_TYPE(cmd) != _IOC_TYPE(AUTOFS_IOC_FIRST) ||
834 _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT) 834 _IOC_NR(cmd) - _IOC_NR(AUTOFS_IOC_FIRST) >= AUTOFS_IOC_COUNT)
835 return -ENOTTY; 835 return -ENOTTY;
836 836
837 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) 837 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
838 return -EPERM; 838 return -EPERM;
839 839
840 switch(cmd) { 840 switch(cmd) {
841 case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */ 841 case AUTOFS_IOC_READY: /* Wait queue: go ahead and retry */
842 return autofs4_wait_release(sbi,(autofs_wqt_t)arg,0); 842 return autofs4_wait_release(sbi,(autofs_wqt_t)arg,0);
843 case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */ 843 case AUTOFS_IOC_FAIL: /* Wait queue: fail with ENOENT */
844 return autofs4_wait_release(sbi,(autofs_wqt_t)arg,-ENOENT); 844 return autofs4_wait_release(sbi,(autofs_wqt_t)arg,-ENOENT);
845 case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */ 845 case AUTOFS_IOC_CATATONIC: /* Enter catatonic mode (daemon shutdown) */
846 autofs4_catatonic_mode(sbi); 846 autofs4_catatonic_mode(sbi);
847 return 0; 847 return 0;
848 case AUTOFS_IOC_PROTOVER: /* Get protocol version */ 848 case AUTOFS_IOC_PROTOVER: /* Get protocol version */
849 return autofs4_get_protover(sbi, p); 849 return autofs4_get_protover(sbi, p);
850 case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */ 850 case AUTOFS_IOC_PROTOSUBVER: /* Get protocol sub version */
851 return autofs4_get_protosubver(sbi, p); 851 return autofs4_get_protosubver(sbi, p);
852 case AUTOFS_IOC_SETTIMEOUT: 852 case AUTOFS_IOC_SETTIMEOUT:
853 return autofs4_get_set_timeout(sbi, p); 853 return autofs4_get_set_timeout(sbi, p);
854 #ifdef CONFIG_COMPAT 854 #ifdef CONFIG_COMPAT
855 case AUTOFS_IOC_SETTIMEOUT32: 855 case AUTOFS_IOC_SETTIMEOUT32:
856 return autofs4_compat_get_set_timeout(sbi, p); 856 return autofs4_compat_get_set_timeout(sbi, p);
857 #endif 857 #endif
858 858
859 case AUTOFS_IOC_ASKUMOUNT: 859 case AUTOFS_IOC_ASKUMOUNT:
860 return autofs4_ask_umount(filp->f_path.mnt, p); 860 return autofs4_ask_umount(filp->f_path.mnt, p);
861 861
862 /* return a single thing to expire */ 862 /* return a single thing to expire */
863 case AUTOFS_IOC_EXPIRE: 863 case AUTOFS_IOC_EXPIRE:
864 return autofs4_expire_run(inode->i_sb,filp->f_path.mnt,sbi, p); 864 return autofs4_expire_run(inode->i_sb,filp->f_path.mnt,sbi, p);
865 /* same as above, but can send multiple expires through pipe */ 865 /* same as above, but can send multiple expires through pipe */
866 case AUTOFS_IOC_EXPIRE_MULTI: 866 case AUTOFS_IOC_EXPIRE_MULTI:
867 return autofs4_expire_multi(inode->i_sb,filp->f_path.mnt,sbi, p); 867 return autofs4_expire_multi(inode->i_sb,filp->f_path.mnt,sbi, p);
868 868
869 default: 869 default:
870 return -ENOSYS; 870 return -ENOSYS;
871 } 871 }
872 } 872 }
873 873
874 static long autofs4_root_ioctl(struct file *filp, 874 static long autofs4_root_ioctl(struct file *filp,
875 unsigned int cmd, unsigned long arg) 875 unsigned int cmd, unsigned long arg)
876 { 876 {
877 struct inode *inode = file_inode(filp); 877 struct inode *inode = file_inode(filp);
878 return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg); 878 return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
879 } 879 }
880 880
881 #ifdef CONFIG_COMPAT 881 #ifdef CONFIG_COMPAT
882 static long autofs4_root_compat_ioctl(struct file *filp, 882 static long autofs4_root_compat_ioctl(struct file *filp,
883 unsigned int cmd, unsigned long arg) 883 unsigned int cmd, unsigned long arg)
884 { 884 {
885 struct inode *inode = file_inode(filp); 885 struct inode *inode = file_inode(filp);
886 int ret; 886 int ret;
887 887
888 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL) 888 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
889 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg); 889 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
890 else 890 else
891 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, 891 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
892 (unsigned long)compat_ptr(arg)); 892 (unsigned long)compat_ptr(arg));
893 893
894 return ret; 894 return ret;
895 } 895 }
896 #endif 896 #endif
897 897
1 /****************************************************************************** 1 /******************************************************************************
2 ******************************************************************************* 2 *******************************************************************************
3 ** 3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. 5 ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
6 ** 6 **
7 ** This copyrighted material is made available to anyone wishing to use, 7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions 8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2. 9 ** of the GNU General Public License v.2.
10 ** 10 **
11 ******************************************************************************* 11 *******************************************************************************
12 ******************************************************************************/ 12 ******************************************************************************/
13 13
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/configfs.h> 16 #include <linux/configfs.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/in.h> 18 #include <linux/in.h>
19 #include <linux/in6.h> 19 #include <linux/in6.h>
20 #include <linux/dlmconstants.h> 20 #include <linux/dlmconstants.h>
21 #include <net/ipv6.h> 21 #include <net/ipv6.h>
22 #include <net/sock.h> 22 #include <net/sock.h>
23 23
24 #include "config.h" 24 #include "config.h"
25 #include "lowcomms.h" 25 #include "lowcomms.h"
26 26
27 /* 27 /*
28 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid 28 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
29 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight 29 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
30 * /config/dlm/<cluster>/comms/<comm>/nodeid 30 * /config/dlm/<cluster>/comms/<comm>/nodeid
31 * /config/dlm/<cluster>/comms/<comm>/local 31 * /config/dlm/<cluster>/comms/<comm>/local
32 * /config/dlm/<cluster>/comms/<comm>/addr (write only) 32 * /config/dlm/<cluster>/comms/<comm>/addr (write only)
33 * /config/dlm/<cluster>/comms/<comm>/addr_list (read only) 33 * /config/dlm/<cluster>/comms/<comm>/addr_list (read only)
34 * The <cluster> level is useless, but I haven't figured out how to avoid it. 34 * The <cluster> level is useless, but I haven't figured out how to avoid it.
35 */ 35 */
36 36
37 static struct config_group *space_list; 37 static struct config_group *space_list;
38 static struct config_group *comm_list; 38 static struct config_group *comm_list;
39 static struct dlm_comm *local_comm; 39 static struct dlm_comm *local_comm;
40 static uint32_t dlm_comm_count; 40 static uint32_t dlm_comm_count;
41 41
42 struct dlm_clusters; 42 struct dlm_clusters;
43 struct dlm_cluster; 43 struct dlm_cluster;
44 struct dlm_spaces; 44 struct dlm_spaces;
45 struct dlm_space; 45 struct dlm_space;
46 struct dlm_comms; 46 struct dlm_comms;
47 struct dlm_comm; 47 struct dlm_comm;
48 struct dlm_nodes; 48 struct dlm_nodes;
49 struct dlm_node; 49 struct dlm_node;
50 50
51 static struct config_group *make_cluster(struct config_group *, const char *); 51 static struct config_group *make_cluster(struct config_group *, const char *);
52 static void drop_cluster(struct config_group *, struct config_item *); 52 static void drop_cluster(struct config_group *, struct config_item *);
53 static void release_cluster(struct config_item *); 53 static void release_cluster(struct config_item *);
54 static struct config_group *make_space(struct config_group *, const char *); 54 static struct config_group *make_space(struct config_group *, const char *);
55 static void drop_space(struct config_group *, struct config_item *); 55 static void drop_space(struct config_group *, struct config_item *);
56 static void release_space(struct config_item *); 56 static void release_space(struct config_item *);
57 static struct config_item *make_comm(struct config_group *, const char *); 57 static struct config_item *make_comm(struct config_group *, const char *);
58 static void drop_comm(struct config_group *, struct config_item *); 58 static void drop_comm(struct config_group *, struct config_item *);
59 static void release_comm(struct config_item *); 59 static void release_comm(struct config_item *);
60 static struct config_item *make_node(struct config_group *, const char *); 60 static struct config_item *make_node(struct config_group *, const char *);
61 static void drop_node(struct config_group *, struct config_item *); 61 static void drop_node(struct config_group *, struct config_item *);
62 static void release_node(struct config_item *); 62 static void release_node(struct config_item *);
63 63
64 static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, 64 static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
65 char *buf); 65 char *buf);
66 static ssize_t store_cluster(struct config_item *i, 66 static ssize_t store_cluster(struct config_item *i,
67 struct configfs_attribute *a, 67 struct configfs_attribute *a,
68 const char *buf, size_t len); 68 const char *buf, size_t len);
69 static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 69 static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
70 char *buf); 70 char *buf);
71 static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 71 static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
72 const char *buf, size_t len); 72 const char *buf, size_t len);
73 static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, 73 static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
74 char *buf); 74 char *buf);
75 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, 75 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
76 const char *buf, size_t len); 76 const char *buf, size_t len);
77 77
78 static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf); 78 static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf);
79 static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, 79 static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
80 size_t len); 80 size_t len);
81 static ssize_t comm_local_read(struct dlm_comm *cm, char *buf); 81 static ssize_t comm_local_read(struct dlm_comm *cm, char *buf);
82 static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, 82 static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
83 size_t len); 83 size_t len);
84 static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, 84 static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
85 size_t len); 85 size_t len);
86 static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf); 86 static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf);
87 static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf); 87 static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
88 static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, 88 static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
89 size_t len); 89 size_t len);
90 static ssize_t node_weight_read(struct dlm_node *nd, char *buf); 90 static ssize_t node_weight_read(struct dlm_node *nd, char *buf);
91 static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, 91 static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
92 size_t len); 92 size_t len);
93 93
94 struct dlm_cluster { 94 struct dlm_cluster {
95 struct config_group group; 95 struct config_group group;
96 unsigned int cl_tcp_port; 96 unsigned int cl_tcp_port;
97 unsigned int cl_buffer_size; 97 unsigned int cl_buffer_size;
98 unsigned int cl_rsbtbl_size; 98 unsigned int cl_rsbtbl_size;
99 unsigned int cl_recover_timer; 99 unsigned int cl_recover_timer;
100 unsigned int cl_toss_secs; 100 unsigned int cl_toss_secs;
101 unsigned int cl_scan_secs; 101 unsigned int cl_scan_secs;
102 unsigned int cl_log_debug; 102 unsigned int cl_log_debug;
103 unsigned int cl_protocol; 103 unsigned int cl_protocol;
104 unsigned int cl_timewarn_cs; 104 unsigned int cl_timewarn_cs;
105 unsigned int cl_waitwarn_us; 105 unsigned int cl_waitwarn_us;
106 unsigned int cl_new_rsb_count; 106 unsigned int cl_new_rsb_count;
107 unsigned int cl_recover_callbacks; 107 unsigned int cl_recover_callbacks;
108 char cl_cluster_name[DLM_LOCKSPACE_LEN]; 108 char cl_cluster_name[DLM_LOCKSPACE_LEN];
109 }; 109 };
110 110
111 enum { 111 enum {
112 CLUSTER_ATTR_TCP_PORT = 0, 112 CLUSTER_ATTR_TCP_PORT = 0,
113 CLUSTER_ATTR_BUFFER_SIZE, 113 CLUSTER_ATTR_BUFFER_SIZE,
114 CLUSTER_ATTR_RSBTBL_SIZE, 114 CLUSTER_ATTR_RSBTBL_SIZE,
115 CLUSTER_ATTR_RECOVER_TIMER, 115 CLUSTER_ATTR_RECOVER_TIMER,
116 CLUSTER_ATTR_TOSS_SECS, 116 CLUSTER_ATTR_TOSS_SECS,
117 CLUSTER_ATTR_SCAN_SECS, 117 CLUSTER_ATTR_SCAN_SECS,
118 CLUSTER_ATTR_LOG_DEBUG, 118 CLUSTER_ATTR_LOG_DEBUG,
119 CLUSTER_ATTR_PROTOCOL, 119 CLUSTER_ATTR_PROTOCOL,
120 CLUSTER_ATTR_TIMEWARN_CS, 120 CLUSTER_ATTR_TIMEWARN_CS,
121 CLUSTER_ATTR_WAITWARN_US, 121 CLUSTER_ATTR_WAITWARN_US,
122 CLUSTER_ATTR_NEW_RSB_COUNT, 122 CLUSTER_ATTR_NEW_RSB_COUNT,
123 CLUSTER_ATTR_RECOVER_CALLBACKS, 123 CLUSTER_ATTR_RECOVER_CALLBACKS,
124 CLUSTER_ATTR_CLUSTER_NAME, 124 CLUSTER_ATTR_CLUSTER_NAME,
125 }; 125 };
126 126
127 struct cluster_attribute { 127 struct cluster_attribute {
128 struct configfs_attribute attr; 128 struct configfs_attribute attr;
129 ssize_t (*show)(struct dlm_cluster *, char *); 129 ssize_t (*show)(struct dlm_cluster *, char *);
130 ssize_t (*store)(struct dlm_cluster *, const char *, size_t); 130 ssize_t (*store)(struct dlm_cluster *, const char *, size_t);
131 }; 131 };
132 132
133 static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf) 133 static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf)
134 { 134 {
135 return sprintf(buf, "%s\n", cl->cl_cluster_name); 135 return sprintf(buf, "%s\n", cl->cl_cluster_name);
136 } 136 }
137 137
138 static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl, 138 static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl,
139 const char *buf, size_t len) 139 const char *buf, size_t len)
140 { 140 {
141 strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN); 141 strncpy(dlm_config.ci_cluster_name, buf, DLM_LOCKSPACE_LEN);
142 strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN); 142 strncpy(cl->cl_cluster_name, buf, DLM_LOCKSPACE_LEN);
143 return len; 143 return len;
144 } 144 }
145 145
146 static struct cluster_attribute cluster_attr_cluster_name = { 146 static struct cluster_attribute cluster_attr_cluster_name = {
147 .attr = { .ca_owner = THIS_MODULE, 147 .attr = { .ca_owner = THIS_MODULE,
148 .ca_name = "cluster_name", 148 .ca_name = "cluster_name",
149 .ca_mode = S_IRUGO | S_IWUSR }, 149 .ca_mode = S_IRUGO | S_IWUSR },
150 .show = cluster_cluster_name_read, 150 .show = cluster_cluster_name_read,
151 .store = cluster_cluster_name_write, 151 .store = cluster_cluster_name_write,
152 }; 152 };
153 153
154 static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field, 154 static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
155 int *info_field, int check_zero, 155 int *info_field, int check_zero,
156 const char *buf, size_t len) 156 const char *buf, size_t len)
157 { 157 {
158 unsigned int x; 158 unsigned int x;
159 159
160 if (!capable(CAP_SYS_ADMIN)) 160 if (!capable(CAP_SYS_ADMIN))
161 return -EACCES; 161 return -EPERM;
162 162
163 x = simple_strtoul(buf, NULL, 0); 163 x = simple_strtoul(buf, NULL, 0);
164 164
165 if (check_zero && !x) 165 if (check_zero && !x)
166 return -EINVAL; 166 return -EINVAL;
167 167
168 *cl_field = x; 168 *cl_field = x;
169 *info_field = x; 169 *info_field = x;
170 170
171 return len; 171 return len;
172 } 172 }
173 173
174 #define CLUSTER_ATTR(name, check_zero) \ 174 #define CLUSTER_ATTR(name, check_zero) \
175 static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \ 175 static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \
176 { \ 176 { \
177 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \ 177 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
178 check_zero, buf, len); \ 178 check_zero, buf, len); \
179 } \ 179 } \
180 static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \ 180 static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \
181 { \ 181 { \
182 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \ 182 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
183 } \ 183 } \
184 static struct cluster_attribute cluster_attr_##name = \ 184 static struct cluster_attribute cluster_attr_##name = \
185 __CONFIGFS_ATTR(name, 0644, name##_read, name##_write) 185 __CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
186 186
187 CLUSTER_ATTR(tcp_port, 1); 187 CLUSTER_ATTR(tcp_port, 1);
188 CLUSTER_ATTR(buffer_size, 1); 188 CLUSTER_ATTR(buffer_size, 1);
189 CLUSTER_ATTR(rsbtbl_size, 1); 189 CLUSTER_ATTR(rsbtbl_size, 1);
190 CLUSTER_ATTR(recover_timer, 1); 190 CLUSTER_ATTR(recover_timer, 1);
191 CLUSTER_ATTR(toss_secs, 1); 191 CLUSTER_ATTR(toss_secs, 1);
192 CLUSTER_ATTR(scan_secs, 1); 192 CLUSTER_ATTR(scan_secs, 1);
193 CLUSTER_ATTR(log_debug, 0); 193 CLUSTER_ATTR(log_debug, 0);
194 CLUSTER_ATTR(protocol, 0); 194 CLUSTER_ATTR(protocol, 0);
195 CLUSTER_ATTR(timewarn_cs, 1); 195 CLUSTER_ATTR(timewarn_cs, 1);
196 CLUSTER_ATTR(waitwarn_us, 0); 196 CLUSTER_ATTR(waitwarn_us, 0);
197 CLUSTER_ATTR(new_rsb_count, 0); 197 CLUSTER_ATTR(new_rsb_count, 0);
198 CLUSTER_ATTR(recover_callbacks, 0); 198 CLUSTER_ATTR(recover_callbacks, 0);
199 199
200 static struct configfs_attribute *cluster_attrs[] = { 200 static struct configfs_attribute *cluster_attrs[] = {
201 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, 201 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
202 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr, 202 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
203 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr, 203 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
204 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr, 204 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
205 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr, 205 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
206 [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr, 206 [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr,
207 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr, 207 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr,
208 [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr, 208 [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr,
209 [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr, 209 [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
210 [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr, 210 [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
211 [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr, 211 [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr,
212 [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr, 212 [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr,
213 [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr, 213 [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr,
214 NULL, 214 NULL,
215 }; 215 };
216 216
217 enum { 217 enum {
218 COMM_ATTR_NODEID = 0, 218 COMM_ATTR_NODEID = 0,
219 COMM_ATTR_LOCAL, 219 COMM_ATTR_LOCAL,
220 COMM_ATTR_ADDR, 220 COMM_ATTR_ADDR,
221 COMM_ATTR_ADDR_LIST, 221 COMM_ATTR_ADDR_LIST,
222 }; 222 };
223 223
224 struct comm_attribute { 224 struct comm_attribute {
225 struct configfs_attribute attr; 225 struct configfs_attribute attr;
226 ssize_t (*show)(struct dlm_comm *, char *); 226 ssize_t (*show)(struct dlm_comm *, char *);
227 ssize_t (*store)(struct dlm_comm *, const char *, size_t); 227 ssize_t (*store)(struct dlm_comm *, const char *, size_t);
228 }; 228 };
229 229
230 static struct comm_attribute comm_attr_nodeid = { 230 static struct comm_attribute comm_attr_nodeid = {
231 .attr = { .ca_owner = THIS_MODULE, 231 .attr = { .ca_owner = THIS_MODULE,
232 .ca_name = "nodeid", 232 .ca_name = "nodeid",
233 .ca_mode = S_IRUGO | S_IWUSR }, 233 .ca_mode = S_IRUGO | S_IWUSR },
234 .show = comm_nodeid_read, 234 .show = comm_nodeid_read,
235 .store = comm_nodeid_write, 235 .store = comm_nodeid_write,
236 }; 236 };
237 237
238 static struct comm_attribute comm_attr_local = { 238 static struct comm_attribute comm_attr_local = {
239 .attr = { .ca_owner = THIS_MODULE, 239 .attr = { .ca_owner = THIS_MODULE,
240 .ca_name = "local", 240 .ca_name = "local",
241 .ca_mode = S_IRUGO | S_IWUSR }, 241 .ca_mode = S_IRUGO | S_IWUSR },
242 .show = comm_local_read, 242 .show = comm_local_read,
243 .store = comm_local_write, 243 .store = comm_local_write,
244 }; 244 };
245 245
246 static struct comm_attribute comm_attr_addr = { 246 static struct comm_attribute comm_attr_addr = {
247 .attr = { .ca_owner = THIS_MODULE, 247 .attr = { .ca_owner = THIS_MODULE,
248 .ca_name = "addr", 248 .ca_name = "addr",
249 .ca_mode = S_IWUSR }, 249 .ca_mode = S_IWUSR },
250 .store = comm_addr_write, 250 .store = comm_addr_write,
251 }; 251 };
252 252
253 static struct comm_attribute comm_attr_addr_list = { 253 static struct comm_attribute comm_attr_addr_list = {
254 .attr = { .ca_owner = THIS_MODULE, 254 .attr = { .ca_owner = THIS_MODULE,
255 .ca_name = "addr_list", 255 .ca_name = "addr_list",
256 .ca_mode = S_IRUGO }, 256 .ca_mode = S_IRUGO },
257 .show = comm_addr_list_read, 257 .show = comm_addr_list_read,
258 }; 258 };
259 259
260 static struct configfs_attribute *comm_attrs[] = { 260 static struct configfs_attribute *comm_attrs[] = {
261 [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr, 261 [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
262 [COMM_ATTR_LOCAL] = &comm_attr_local.attr, 262 [COMM_ATTR_LOCAL] = &comm_attr_local.attr,
263 [COMM_ATTR_ADDR] = &comm_attr_addr.attr, 263 [COMM_ATTR_ADDR] = &comm_attr_addr.attr,
264 [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr, 264 [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr,
265 NULL, 265 NULL,
266 }; 266 };
267 267
268 enum { 268 enum {
269 NODE_ATTR_NODEID = 0, 269 NODE_ATTR_NODEID = 0,
270 NODE_ATTR_WEIGHT, 270 NODE_ATTR_WEIGHT,
271 }; 271 };
272 272
273 struct node_attribute { 273 struct node_attribute {
274 struct configfs_attribute attr; 274 struct configfs_attribute attr;
275 ssize_t (*show)(struct dlm_node *, char *); 275 ssize_t (*show)(struct dlm_node *, char *);
276 ssize_t (*store)(struct dlm_node *, const char *, size_t); 276 ssize_t (*store)(struct dlm_node *, const char *, size_t);
277 }; 277 };
278 278
279 static struct node_attribute node_attr_nodeid = { 279 static struct node_attribute node_attr_nodeid = {
280 .attr = { .ca_owner = THIS_MODULE, 280 .attr = { .ca_owner = THIS_MODULE,
281 .ca_name = "nodeid", 281 .ca_name = "nodeid",
282 .ca_mode = S_IRUGO | S_IWUSR }, 282 .ca_mode = S_IRUGO | S_IWUSR },
283 .show = node_nodeid_read, 283 .show = node_nodeid_read,
284 .store = node_nodeid_write, 284 .store = node_nodeid_write,
285 }; 285 };
286 286
287 static struct node_attribute node_attr_weight = { 287 static struct node_attribute node_attr_weight = {
288 .attr = { .ca_owner = THIS_MODULE, 288 .attr = { .ca_owner = THIS_MODULE,
289 .ca_name = "weight", 289 .ca_name = "weight",
290 .ca_mode = S_IRUGO | S_IWUSR }, 290 .ca_mode = S_IRUGO | S_IWUSR },
291 .show = node_weight_read, 291 .show = node_weight_read,
292 .store = node_weight_write, 292 .store = node_weight_write,
293 }; 293 };
294 294
295 static struct configfs_attribute *node_attrs[] = { 295 static struct configfs_attribute *node_attrs[] = {
296 [NODE_ATTR_NODEID] = &node_attr_nodeid.attr, 296 [NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
297 [NODE_ATTR_WEIGHT] = &node_attr_weight.attr, 297 [NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
298 NULL, 298 NULL,
299 }; 299 };
300 300
301 struct dlm_clusters { 301 struct dlm_clusters {
302 struct configfs_subsystem subsys; 302 struct configfs_subsystem subsys;
303 }; 303 };
304 304
305 struct dlm_spaces { 305 struct dlm_spaces {
306 struct config_group ss_group; 306 struct config_group ss_group;
307 }; 307 };
308 308
309 struct dlm_space { 309 struct dlm_space {
310 struct config_group group; 310 struct config_group group;
311 struct list_head members; 311 struct list_head members;
312 struct mutex members_lock; 312 struct mutex members_lock;
313 int members_count; 313 int members_count;
314 }; 314 };
315 315
316 struct dlm_comms { 316 struct dlm_comms {
317 struct config_group cs_group; 317 struct config_group cs_group;
318 }; 318 };
319 319
320 struct dlm_comm { 320 struct dlm_comm {
321 struct config_item item; 321 struct config_item item;
322 int seq; 322 int seq;
323 int nodeid; 323 int nodeid;
324 int local; 324 int local;
325 int addr_count; 325 int addr_count;
326 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; 326 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
327 }; 327 };
328 328
329 struct dlm_nodes { 329 struct dlm_nodes {
330 struct config_group ns_group; 330 struct config_group ns_group;
331 }; 331 };
332 332
333 struct dlm_node { 333 struct dlm_node {
334 struct config_item item; 334 struct config_item item;
335 struct list_head list; /* space->members */ 335 struct list_head list; /* space->members */
336 int nodeid; 336 int nodeid;
337 int weight; 337 int weight;
338 int new; 338 int new;
339 int comm_seq; /* copy of cm->seq when nd->nodeid is set */ 339 int comm_seq; /* copy of cm->seq when nd->nodeid is set */
340 }; 340 };
341 341
342 static struct configfs_group_operations clusters_ops = { 342 static struct configfs_group_operations clusters_ops = {
343 .make_group = make_cluster, 343 .make_group = make_cluster,
344 .drop_item = drop_cluster, 344 .drop_item = drop_cluster,
345 }; 345 };
346 346
347 static struct configfs_item_operations cluster_ops = { 347 static struct configfs_item_operations cluster_ops = {
348 .release = release_cluster, 348 .release = release_cluster,
349 .show_attribute = show_cluster, 349 .show_attribute = show_cluster,
350 .store_attribute = store_cluster, 350 .store_attribute = store_cluster,
351 }; 351 };
352 352
353 static struct configfs_group_operations spaces_ops = { 353 static struct configfs_group_operations spaces_ops = {
354 .make_group = make_space, 354 .make_group = make_space,
355 .drop_item = drop_space, 355 .drop_item = drop_space,
356 }; 356 };
357 357
358 static struct configfs_item_operations space_ops = { 358 static struct configfs_item_operations space_ops = {
359 .release = release_space, 359 .release = release_space,
360 }; 360 };
361 361
362 static struct configfs_group_operations comms_ops = { 362 static struct configfs_group_operations comms_ops = {
363 .make_item = make_comm, 363 .make_item = make_comm,
364 .drop_item = drop_comm, 364 .drop_item = drop_comm,
365 }; 365 };
366 366
367 static struct configfs_item_operations comm_ops = { 367 static struct configfs_item_operations comm_ops = {
368 .release = release_comm, 368 .release = release_comm,
369 .show_attribute = show_comm, 369 .show_attribute = show_comm,
370 .store_attribute = store_comm, 370 .store_attribute = store_comm,
371 }; 371 };
372 372
373 static struct configfs_group_operations nodes_ops = { 373 static struct configfs_group_operations nodes_ops = {
374 .make_item = make_node, 374 .make_item = make_node,
375 .drop_item = drop_node, 375 .drop_item = drop_node,
376 }; 376 };
377 377
378 static struct configfs_item_operations node_ops = { 378 static struct configfs_item_operations node_ops = {
379 .release = release_node, 379 .release = release_node,
380 .show_attribute = show_node, 380 .show_attribute = show_node,
381 .store_attribute = store_node, 381 .store_attribute = store_node,
382 }; 382 };
383 383
384 static struct config_item_type clusters_type = { 384 static struct config_item_type clusters_type = {
385 .ct_group_ops = &clusters_ops, 385 .ct_group_ops = &clusters_ops,
386 .ct_owner = THIS_MODULE, 386 .ct_owner = THIS_MODULE,
387 }; 387 };
388 388
389 static struct config_item_type cluster_type = { 389 static struct config_item_type cluster_type = {
390 .ct_item_ops = &cluster_ops, 390 .ct_item_ops = &cluster_ops,
391 .ct_attrs = cluster_attrs, 391 .ct_attrs = cluster_attrs,
392 .ct_owner = THIS_MODULE, 392 .ct_owner = THIS_MODULE,
393 }; 393 };
394 394
395 static struct config_item_type spaces_type = { 395 static struct config_item_type spaces_type = {
396 .ct_group_ops = &spaces_ops, 396 .ct_group_ops = &spaces_ops,
397 .ct_owner = THIS_MODULE, 397 .ct_owner = THIS_MODULE,
398 }; 398 };
399 399
400 static struct config_item_type space_type = { 400 static struct config_item_type space_type = {
401 .ct_item_ops = &space_ops, 401 .ct_item_ops = &space_ops,
402 .ct_owner = THIS_MODULE, 402 .ct_owner = THIS_MODULE,
403 }; 403 };
404 404
405 static struct config_item_type comms_type = { 405 static struct config_item_type comms_type = {
406 .ct_group_ops = &comms_ops, 406 .ct_group_ops = &comms_ops,
407 .ct_owner = THIS_MODULE, 407 .ct_owner = THIS_MODULE,
408 }; 408 };
409 409
410 static struct config_item_type comm_type = { 410 static struct config_item_type comm_type = {
411 .ct_item_ops = &comm_ops, 411 .ct_item_ops = &comm_ops,
412 .ct_attrs = comm_attrs, 412 .ct_attrs = comm_attrs,
413 .ct_owner = THIS_MODULE, 413 .ct_owner = THIS_MODULE,
414 }; 414 };
415 415
416 static struct config_item_type nodes_type = { 416 static struct config_item_type nodes_type = {
417 .ct_group_ops = &nodes_ops, 417 .ct_group_ops = &nodes_ops,
418 .ct_owner = THIS_MODULE, 418 .ct_owner = THIS_MODULE,
419 }; 419 };
420 420
421 static struct config_item_type node_type = { 421 static struct config_item_type node_type = {
422 .ct_item_ops = &node_ops, 422 .ct_item_ops = &node_ops,
423 .ct_attrs = node_attrs, 423 .ct_attrs = node_attrs,
424 .ct_owner = THIS_MODULE, 424 .ct_owner = THIS_MODULE,
425 }; 425 };
426 426
427 static struct dlm_cluster *config_item_to_cluster(struct config_item *i) 427 static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
428 { 428 {
429 return i ? container_of(to_config_group(i), struct dlm_cluster, group) : 429 return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
430 NULL; 430 NULL;
431 } 431 }
432 432
433 static struct dlm_space *config_item_to_space(struct config_item *i) 433 static struct dlm_space *config_item_to_space(struct config_item *i)
434 { 434 {
435 return i ? container_of(to_config_group(i), struct dlm_space, group) : 435 return i ? container_of(to_config_group(i), struct dlm_space, group) :
436 NULL; 436 NULL;
437 } 437 }
438 438
439 static struct dlm_comm *config_item_to_comm(struct config_item *i) 439 static struct dlm_comm *config_item_to_comm(struct config_item *i)
440 { 440 {
441 return i ? container_of(i, struct dlm_comm, item) : NULL; 441 return i ? container_of(i, struct dlm_comm, item) : NULL;
442 } 442 }
443 443
444 static struct dlm_node *config_item_to_node(struct config_item *i) 444 static struct dlm_node *config_item_to_node(struct config_item *i)
445 { 445 {
446 return i ? container_of(i, struct dlm_node, item) : NULL; 446 return i ? container_of(i, struct dlm_node, item) : NULL;
447 } 447 }
448 448
449 static struct config_group *make_cluster(struct config_group *g, 449 static struct config_group *make_cluster(struct config_group *g,
450 const char *name) 450 const char *name)
451 { 451 {
452 struct dlm_cluster *cl = NULL; 452 struct dlm_cluster *cl = NULL;
453 struct dlm_spaces *sps = NULL; 453 struct dlm_spaces *sps = NULL;
454 struct dlm_comms *cms = NULL; 454 struct dlm_comms *cms = NULL;
455 void *gps = NULL; 455 void *gps = NULL;
456 456
457 cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); 457 cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
458 gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS); 458 gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS);
459 sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); 459 sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
460 cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); 460 cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
461 461
462 if (!cl || !gps || !sps || !cms) 462 if (!cl || !gps || !sps || !cms)
463 goto fail; 463 goto fail;
464 464
465 config_group_init_type_name(&cl->group, name, &cluster_type); 465 config_group_init_type_name(&cl->group, name, &cluster_type);
466 config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type); 466 config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
467 config_group_init_type_name(&cms->cs_group, "comms", &comms_type); 467 config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
468 468
469 cl->group.default_groups = gps; 469 cl->group.default_groups = gps;
470 cl->group.default_groups[0] = &sps->ss_group; 470 cl->group.default_groups[0] = &sps->ss_group;
471 cl->group.default_groups[1] = &cms->cs_group; 471 cl->group.default_groups[1] = &cms->cs_group;
472 cl->group.default_groups[2] = NULL; 472 cl->group.default_groups[2] = NULL;
473 473
474 cl->cl_tcp_port = dlm_config.ci_tcp_port; 474 cl->cl_tcp_port = dlm_config.ci_tcp_port;
475 cl->cl_buffer_size = dlm_config.ci_buffer_size; 475 cl->cl_buffer_size = dlm_config.ci_buffer_size;
476 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size; 476 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
477 cl->cl_recover_timer = dlm_config.ci_recover_timer; 477 cl->cl_recover_timer = dlm_config.ci_recover_timer;
478 cl->cl_toss_secs = dlm_config.ci_toss_secs; 478 cl->cl_toss_secs = dlm_config.ci_toss_secs;
479 cl->cl_scan_secs = dlm_config.ci_scan_secs; 479 cl->cl_scan_secs = dlm_config.ci_scan_secs;
480 cl->cl_log_debug = dlm_config.ci_log_debug; 480 cl->cl_log_debug = dlm_config.ci_log_debug;
481 cl->cl_protocol = dlm_config.ci_protocol; 481 cl->cl_protocol = dlm_config.ci_protocol;
482 cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs; 482 cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
483 cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us; 483 cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
484 cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count; 484 cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
485 cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks; 485 cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks;
486 memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name, 486 memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name,
487 DLM_LOCKSPACE_LEN); 487 DLM_LOCKSPACE_LEN);
488 488
489 space_list = &sps->ss_group; 489 space_list = &sps->ss_group;
490 comm_list = &cms->cs_group; 490 comm_list = &cms->cs_group;
491 return &cl->group; 491 return &cl->group;
492 492
493 fail: 493 fail:
494 kfree(cl); 494 kfree(cl);
495 kfree(gps); 495 kfree(gps);
496 kfree(sps); 496 kfree(sps);
497 kfree(cms); 497 kfree(cms);
498 return ERR_PTR(-ENOMEM); 498 return ERR_PTR(-ENOMEM);
499 } 499 }
500 500
501 static void drop_cluster(struct config_group *g, struct config_item *i) 501 static void drop_cluster(struct config_group *g, struct config_item *i)
502 { 502 {
503 struct dlm_cluster *cl = config_item_to_cluster(i); 503 struct dlm_cluster *cl = config_item_to_cluster(i);
504 struct config_item *tmp; 504 struct config_item *tmp;
505 int j; 505 int j;
506 506
507 for (j = 0; cl->group.default_groups[j]; j++) { 507 for (j = 0; cl->group.default_groups[j]; j++) {
508 tmp = &cl->group.default_groups[j]->cg_item; 508 tmp = &cl->group.default_groups[j]->cg_item;
509 cl->group.default_groups[j] = NULL; 509 cl->group.default_groups[j] = NULL;
510 config_item_put(tmp); 510 config_item_put(tmp);
511 } 511 }
512 512
513 space_list = NULL; 513 space_list = NULL;
514 comm_list = NULL; 514 comm_list = NULL;
515 515
516 config_item_put(i); 516 config_item_put(i);
517 } 517 }
518 518
519 static void release_cluster(struct config_item *i) 519 static void release_cluster(struct config_item *i)
520 { 520 {
521 struct dlm_cluster *cl = config_item_to_cluster(i); 521 struct dlm_cluster *cl = config_item_to_cluster(i);
522 kfree(cl->group.default_groups); 522 kfree(cl->group.default_groups);
523 kfree(cl); 523 kfree(cl);
524 } 524 }
525 525
526 static struct config_group *make_space(struct config_group *g, const char *name) 526 static struct config_group *make_space(struct config_group *g, const char *name)
527 { 527 {
528 struct dlm_space *sp = NULL; 528 struct dlm_space *sp = NULL;
529 struct dlm_nodes *nds = NULL; 529 struct dlm_nodes *nds = NULL;
530 void *gps = NULL; 530 void *gps = NULL;
531 531
532 sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS); 532 sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS);
533 gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS); 533 gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS);
534 nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS); 534 nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS);
535 535
536 if (!sp || !gps || !nds) 536 if (!sp || !gps || !nds)
537 goto fail; 537 goto fail;
538 538
539 config_group_init_type_name(&sp->group, name, &space_type); 539 config_group_init_type_name(&sp->group, name, &space_type);
540 config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type); 540 config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
541 541
542 sp->group.default_groups = gps; 542 sp->group.default_groups = gps;
543 sp->group.default_groups[0] = &nds->ns_group; 543 sp->group.default_groups[0] = &nds->ns_group;
544 sp->group.default_groups[1] = NULL; 544 sp->group.default_groups[1] = NULL;
545 545
546 INIT_LIST_HEAD(&sp->members); 546 INIT_LIST_HEAD(&sp->members);
547 mutex_init(&sp->members_lock); 547 mutex_init(&sp->members_lock);
548 sp->members_count = 0; 548 sp->members_count = 0;
549 return &sp->group; 549 return &sp->group;
550 550
551 fail: 551 fail:
552 kfree(sp); 552 kfree(sp);
553 kfree(gps); 553 kfree(gps);
554 kfree(nds); 554 kfree(nds);
555 return ERR_PTR(-ENOMEM); 555 return ERR_PTR(-ENOMEM);
556 } 556 }
557 557
558 static void drop_space(struct config_group *g, struct config_item *i) 558 static void drop_space(struct config_group *g, struct config_item *i)
559 { 559 {
560 struct dlm_space *sp = config_item_to_space(i); 560 struct dlm_space *sp = config_item_to_space(i);
561 struct config_item *tmp; 561 struct config_item *tmp;
562 int j; 562 int j;
563 563
564 /* assert list_empty(&sp->members) */ 564 /* assert list_empty(&sp->members) */
565 565
566 for (j = 0; sp->group.default_groups[j]; j++) { 566 for (j = 0; sp->group.default_groups[j]; j++) {
567 tmp = &sp->group.default_groups[j]->cg_item; 567 tmp = &sp->group.default_groups[j]->cg_item;
568 sp->group.default_groups[j] = NULL; 568 sp->group.default_groups[j] = NULL;
569 config_item_put(tmp); 569 config_item_put(tmp);
570 } 570 }
571 571
572 config_item_put(i); 572 config_item_put(i);
573 } 573 }
574 574
575 static void release_space(struct config_item *i) 575 static void release_space(struct config_item *i)
576 { 576 {
577 struct dlm_space *sp = config_item_to_space(i); 577 struct dlm_space *sp = config_item_to_space(i);
578 kfree(sp->group.default_groups); 578 kfree(sp->group.default_groups);
579 kfree(sp); 579 kfree(sp);
580 } 580 }
581 581
582 static struct config_item *make_comm(struct config_group *g, const char *name) 582 static struct config_item *make_comm(struct config_group *g, const char *name)
583 { 583 {
584 struct dlm_comm *cm; 584 struct dlm_comm *cm;
585 585
586 cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS); 586 cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
587 if (!cm) 587 if (!cm)
588 return ERR_PTR(-ENOMEM); 588 return ERR_PTR(-ENOMEM);
589 589
590 config_item_init_type_name(&cm->item, name, &comm_type); 590 config_item_init_type_name(&cm->item, name, &comm_type);
591 591
592 cm->seq = dlm_comm_count++; 592 cm->seq = dlm_comm_count++;
593 if (!cm->seq) 593 if (!cm->seq)
594 cm->seq = dlm_comm_count++; 594 cm->seq = dlm_comm_count++;
595 595
596 cm->nodeid = -1; 596 cm->nodeid = -1;
597 cm->local = 0; 597 cm->local = 0;
598 cm->addr_count = 0; 598 cm->addr_count = 0;
599 return &cm->item; 599 return &cm->item;
600 } 600 }
601 601
602 static void drop_comm(struct config_group *g, struct config_item *i) 602 static void drop_comm(struct config_group *g, struct config_item *i)
603 { 603 {
604 struct dlm_comm *cm = config_item_to_comm(i); 604 struct dlm_comm *cm = config_item_to_comm(i);
605 if (local_comm == cm) 605 if (local_comm == cm)
606 local_comm = NULL; 606 local_comm = NULL;
607 dlm_lowcomms_close(cm->nodeid); 607 dlm_lowcomms_close(cm->nodeid);
608 while (cm->addr_count--) 608 while (cm->addr_count--)
609 kfree(cm->addr[cm->addr_count]); 609 kfree(cm->addr[cm->addr_count]);
610 config_item_put(i); 610 config_item_put(i);
611 } 611 }
612 612
613 static void release_comm(struct config_item *i) 613 static void release_comm(struct config_item *i)
614 { 614 {
615 struct dlm_comm *cm = config_item_to_comm(i); 615 struct dlm_comm *cm = config_item_to_comm(i);
616 kfree(cm); 616 kfree(cm);
617 } 617 }
618 618
619 static struct config_item *make_node(struct config_group *g, const char *name) 619 static struct config_item *make_node(struct config_group *g, const char *name)
620 { 620 {
621 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); 621 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
622 struct dlm_node *nd; 622 struct dlm_node *nd;
623 623
624 nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS); 624 nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
625 if (!nd) 625 if (!nd)
626 return ERR_PTR(-ENOMEM); 626 return ERR_PTR(-ENOMEM);
627 627
628 config_item_init_type_name(&nd->item, name, &node_type); 628 config_item_init_type_name(&nd->item, name, &node_type);
629 nd->nodeid = -1; 629 nd->nodeid = -1;
630 nd->weight = 1; /* default weight of 1 if none is set */ 630 nd->weight = 1; /* default weight of 1 if none is set */
631 nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */ 631 nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
632 632
633 mutex_lock(&sp->members_lock); 633 mutex_lock(&sp->members_lock);
634 list_add(&nd->list, &sp->members); 634 list_add(&nd->list, &sp->members);
635 sp->members_count++; 635 sp->members_count++;
636 mutex_unlock(&sp->members_lock); 636 mutex_unlock(&sp->members_lock);
637 637
638 return &nd->item; 638 return &nd->item;
639 } 639 }
640 640
641 static void drop_node(struct config_group *g, struct config_item *i) 641 static void drop_node(struct config_group *g, struct config_item *i)
642 { 642 {
643 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); 643 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
644 struct dlm_node *nd = config_item_to_node(i); 644 struct dlm_node *nd = config_item_to_node(i);
645 645
646 mutex_lock(&sp->members_lock); 646 mutex_lock(&sp->members_lock);
647 list_del(&nd->list); 647 list_del(&nd->list);
648 sp->members_count--; 648 sp->members_count--;
649 mutex_unlock(&sp->members_lock); 649 mutex_unlock(&sp->members_lock);
650 650
651 config_item_put(i); 651 config_item_put(i);
652 } 652 }
653 653
654 static void release_node(struct config_item *i) 654 static void release_node(struct config_item *i)
655 { 655 {
656 struct dlm_node *nd = config_item_to_node(i); 656 struct dlm_node *nd = config_item_to_node(i);
657 kfree(nd); 657 kfree(nd);
658 } 658 }
659 659
660 static struct dlm_clusters clusters_root = { 660 static struct dlm_clusters clusters_root = {
661 .subsys = { 661 .subsys = {
662 .su_group = { 662 .su_group = {
663 .cg_item = { 663 .cg_item = {
664 .ci_namebuf = "dlm", 664 .ci_namebuf = "dlm",
665 .ci_type = &clusters_type, 665 .ci_type = &clusters_type,
666 }, 666 },
667 }, 667 },
668 }, 668 },
669 }; 669 };
670 670
671 int __init dlm_config_init(void) 671 int __init dlm_config_init(void)
672 { 672 {
673 config_group_init(&clusters_root.subsys.su_group); 673 config_group_init(&clusters_root.subsys.su_group);
674 mutex_init(&clusters_root.subsys.su_mutex); 674 mutex_init(&clusters_root.subsys.su_mutex);
675 return configfs_register_subsystem(&clusters_root.subsys); 675 return configfs_register_subsystem(&clusters_root.subsys);
676 } 676 }
677 677
678 void dlm_config_exit(void) 678 void dlm_config_exit(void)
679 { 679 {
680 configfs_unregister_subsystem(&clusters_root.subsys); 680 configfs_unregister_subsystem(&clusters_root.subsys);
681 } 681 }
682 682
683 /* 683 /*
684 * Functions for user space to read/write attributes 684 * Functions for user space to read/write attributes
685 */ 685 */
686 686
687 static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, 687 static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
688 char *buf) 688 char *buf)
689 { 689 {
690 struct dlm_cluster *cl = config_item_to_cluster(i); 690 struct dlm_cluster *cl = config_item_to_cluster(i);
691 struct cluster_attribute *cla = 691 struct cluster_attribute *cla =
692 container_of(a, struct cluster_attribute, attr); 692 container_of(a, struct cluster_attribute, attr);
693 return cla->show ? cla->show(cl, buf) : 0; 693 return cla->show ? cla->show(cl, buf) : 0;
694 } 694 }
695 695
696 static ssize_t store_cluster(struct config_item *i, 696 static ssize_t store_cluster(struct config_item *i,
697 struct configfs_attribute *a, 697 struct configfs_attribute *a,
698 const char *buf, size_t len) 698 const char *buf, size_t len)
699 { 699 {
700 struct dlm_cluster *cl = config_item_to_cluster(i); 700 struct dlm_cluster *cl = config_item_to_cluster(i);
701 struct cluster_attribute *cla = 701 struct cluster_attribute *cla =
702 container_of(a, struct cluster_attribute, attr); 702 container_of(a, struct cluster_attribute, attr);
703 return cla->store ? cla->store(cl, buf, len) : -EINVAL; 703 return cla->store ? cla->store(cl, buf, len) : -EINVAL;
704 } 704 }
705 705
706 static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 706 static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
707 char *buf) 707 char *buf)
708 { 708 {
709 struct dlm_comm *cm = config_item_to_comm(i); 709 struct dlm_comm *cm = config_item_to_comm(i);
710 struct comm_attribute *cma = 710 struct comm_attribute *cma =
711 container_of(a, struct comm_attribute, attr); 711 container_of(a, struct comm_attribute, attr);
712 return cma->show ? cma->show(cm, buf) : 0; 712 return cma->show ? cma->show(cm, buf) : 0;
713 } 713 }
714 714
715 static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 715 static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
716 const char *buf, size_t len) 716 const char *buf, size_t len)
717 { 717 {
718 struct dlm_comm *cm = config_item_to_comm(i); 718 struct dlm_comm *cm = config_item_to_comm(i);
719 struct comm_attribute *cma = 719 struct comm_attribute *cma =
720 container_of(a, struct comm_attribute, attr); 720 container_of(a, struct comm_attribute, attr);
721 return cma->store ? cma->store(cm, buf, len) : -EINVAL; 721 return cma->store ? cma->store(cm, buf, len) : -EINVAL;
722 } 722 }
723 723
724 static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf) 724 static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf)
725 { 725 {
726 return sprintf(buf, "%d\n", cm->nodeid); 726 return sprintf(buf, "%d\n", cm->nodeid);
727 } 727 }
728 728
729 static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, 729 static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf,
730 size_t len) 730 size_t len)
731 { 731 {
732 cm->nodeid = simple_strtol(buf, NULL, 0); 732 cm->nodeid = simple_strtol(buf, NULL, 0);
733 return len; 733 return len;
734 } 734 }
735 735
736 static ssize_t comm_local_read(struct dlm_comm *cm, char *buf) 736 static ssize_t comm_local_read(struct dlm_comm *cm, char *buf)
737 { 737 {
738 return sprintf(buf, "%d\n", cm->local); 738 return sprintf(buf, "%d\n", cm->local);
739 } 739 }
740 740
741 static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, 741 static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
742 size_t len) 742 size_t len)
743 { 743 {
744 cm->local= simple_strtol(buf, NULL, 0); 744 cm->local= simple_strtol(buf, NULL, 0);
745 if (cm->local && !local_comm) 745 if (cm->local && !local_comm)
746 local_comm = cm; 746 local_comm = cm;
747 return len; 747 return len;
748 } 748 }
749 749
750 static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) 750 static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
751 { 751 {
752 struct sockaddr_storage *addr; 752 struct sockaddr_storage *addr;
753 int rv; 753 int rv;
754 754
755 if (len != sizeof(struct sockaddr_storage)) 755 if (len != sizeof(struct sockaddr_storage))
756 return -EINVAL; 756 return -EINVAL;
757 757
758 if (cm->addr_count >= DLM_MAX_ADDR_COUNT) 758 if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
759 return -ENOSPC; 759 return -ENOSPC;
760 760
761 addr = kzalloc(sizeof(*addr), GFP_NOFS); 761 addr = kzalloc(sizeof(*addr), GFP_NOFS);
762 if (!addr) 762 if (!addr)
763 return -ENOMEM; 763 return -ENOMEM;
764 764
765 memcpy(addr, buf, len); 765 memcpy(addr, buf, len);
766 766
767 rv = dlm_lowcomms_addr(cm->nodeid, addr, len); 767 rv = dlm_lowcomms_addr(cm->nodeid, addr, len);
768 if (rv) { 768 if (rv) {
769 kfree(addr); 769 kfree(addr);
770 return rv; 770 return rv;
771 } 771 }
772 772
773 cm->addr[cm->addr_count++] = addr; 773 cm->addr[cm->addr_count++] = addr;
774 return len; 774 return len;
775 } 775 }
776 776
777 static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf) 777 static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf)
778 { 778 {
779 ssize_t s; 779 ssize_t s;
780 ssize_t allowance; 780 ssize_t allowance;
781 int i; 781 int i;
782 struct sockaddr_storage *addr; 782 struct sockaddr_storage *addr;
783 struct sockaddr_in *addr_in; 783 struct sockaddr_in *addr_in;
784 struct sockaddr_in6 *addr_in6; 784 struct sockaddr_in6 *addr_in6;
785 785
786 /* Taken from ip6_addr_string() defined in lib/vsprintf.c */ 786 /* Taken from ip6_addr_string() defined in lib/vsprintf.c */
787 char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")]; 787 char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")];
788 788
789 789
790 /* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */ 790 /* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */
791 allowance = 4096; 791 allowance = 4096;
792 buf[0] = '\0'; 792 buf[0] = '\0';
793 793
794 for (i = 0; i < cm->addr_count; i++) { 794 for (i = 0; i < cm->addr_count; i++) {
795 addr = cm->addr[i]; 795 addr = cm->addr[i];
796 796
797 switch(addr->ss_family) { 797 switch(addr->ss_family) {
798 case AF_INET: 798 case AF_INET:
799 addr_in = (struct sockaddr_in *)addr; 799 addr_in = (struct sockaddr_in *)addr;
800 s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr); 800 s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr);
801 break; 801 break;
802 case AF_INET6: 802 case AF_INET6:
803 addr_in6 = (struct sockaddr_in6 *)addr; 803 addr_in6 = (struct sockaddr_in6 *)addr;
804 s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr); 804 s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr);
805 break; 805 break;
806 default: 806 default:
807 s = sprintf(buf0, "%s\n", "<UNKNOWN>"); 807 s = sprintf(buf0, "%s\n", "<UNKNOWN>");
808 break; 808 break;
809 } 809 }
810 allowance -= s; 810 allowance -= s;
811 if (allowance >= 0) 811 if (allowance >= 0)
812 strcat(buf, buf0); 812 strcat(buf, buf0);
813 else { 813 else {
814 allowance += s; 814 allowance += s;
815 break; 815 break;
816 } 816 }
817 } 817 }
818 return 4096 - allowance; 818 return 4096 - allowance;
819 } 819 }
820 820
821 static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, 821 static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
822 char *buf) 822 char *buf)
823 { 823 {
824 struct dlm_node *nd = config_item_to_node(i); 824 struct dlm_node *nd = config_item_to_node(i);
825 struct node_attribute *nda = 825 struct node_attribute *nda =
826 container_of(a, struct node_attribute, attr); 826 container_of(a, struct node_attribute, attr);
827 return nda->show ? nda->show(nd, buf) : 0; 827 return nda->show ? nda->show(nd, buf) : 0;
828 } 828 }
829 829
830 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, 830 static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
831 const char *buf, size_t len) 831 const char *buf, size_t len)
832 { 832 {
833 struct dlm_node *nd = config_item_to_node(i); 833 struct dlm_node *nd = config_item_to_node(i);
834 struct node_attribute *nda = 834 struct node_attribute *nda =
835 container_of(a, struct node_attribute, attr); 835 container_of(a, struct node_attribute, attr);
836 return nda->store ? nda->store(nd, buf, len) : -EINVAL; 836 return nda->store ? nda->store(nd, buf, len) : -EINVAL;
837 } 837 }
838 838
839 static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf) 839 static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf)
840 { 840 {
841 return sprintf(buf, "%d\n", nd->nodeid); 841 return sprintf(buf, "%d\n", nd->nodeid);
842 } 842 }
843 843
844 static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, 844 static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
845 size_t len) 845 size_t len)
846 { 846 {
847 uint32_t seq = 0; 847 uint32_t seq = 0;
848 nd->nodeid = simple_strtol(buf, NULL, 0); 848 nd->nodeid = simple_strtol(buf, NULL, 0);
849 dlm_comm_seq(nd->nodeid, &seq); 849 dlm_comm_seq(nd->nodeid, &seq);
850 nd->comm_seq = seq; 850 nd->comm_seq = seq;
851 return len; 851 return len;
852 } 852 }
853 853
854 static ssize_t node_weight_read(struct dlm_node *nd, char *buf) 854 static ssize_t node_weight_read(struct dlm_node *nd, char *buf)
855 { 855 {
856 return sprintf(buf, "%d\n", nd->weight); 856 return sprintf(buf, "%d\n", nd->weight);
857 } 857 }
858 858
859 static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, 859 static ssize_t node_weight_write(struct dlm_node *nd, const char *buf,
860 size_t len) 860 size_t len)
861 { 861 {
862 nd->weight = simple_strtol(buf, NULL, 0); 862 nd->weight = simple_strtol(buf, NULL, 0);
863 return len; 863 return len;
864 } 864 }
865 865
866 /* 866 /*
867 * Functions for the dlm to get the info that's been configured 867 * Functions for the dlm to get the info that's been configured
868 */ 868 */
869 869
870 static struct dlm_space *get_space(char *name) 870 static struct dlm_space *get_space(char *name)
871 { 871 {
872 struct config_item *i; 872 struct config_item *i;
873 873
874 if (!space_list) 874 if (!space_list)
875 return NULL; 875 return NULL;
876 876
877 mutex_lock(&space_list->cg_subsys->su_mutex); 877 mutex_lock(&space_list->cg_subsys->su_mutex);
878 i = config_group_find_item(space_list, name); 878 i = config_group_find_item(space_list, name);
879 mutex_unlock(&space_list->cg_subsys->su_mutex); 879 mutex_unlock(&space_list->cg_subsys->su_mutex);
880 880
881 return config_item_to_space(i); 881 return config_item_to_space(i);
882 } 882 }
883 883
884 static void put_space(struct dlm_space *sp) 884 static void put_space(struct dlm_space *sp)
885 { 885 {
886 config_item_put(&sp->group.cg_item); 886 config_item_put(&sp->group.cg_item);
887 } 887 }
888 888
889 static struct dlm_comm *get_comm(int nodeid) 889 static struct dlm_comm *get_comm(int nodeid)
890 { 890 {
891 struct config_item *i; 891 struct config_item *i;
892 struct dlm_comm *cm = NULL; 892 struct dlm_comm *cm = NULL;
893 int found = 0; 893 int found = 0;
894 894
895 if (!comm_list) 895 if (!comm_list)
896 return NULL; 896 return NULL;
897 897
898 mutex_lock(&clusters_root.subsys.su_mutex); 898 mutex_lock(&clusters_root.subsys.su_mutex);
899 899
900 list_for_each_entry(i, &comm_list->cg_children, ci_entry) { 900 list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
901 cm = config_item_to_comm(i); 901 cm = config_item_to_comm(i);
902 902
903 if (cm->nodeid != nodeid) 903 if (cm->nodeid != nodeid)
904 continue; 904 continue;
905 found = 1; 905 found = 1;
906 config_item_get(i); 906 config_item_get(i);
907 break; 907 break;
908 } 908 }
909 mutex_unlock(&clusters_root.subsys.su_mutex); 909 mutex_unlock(&clusters_root.subsys.su_mutex);
910 910
911 if (!found) 911 if (!found)
912 cm = NULL; 912 cm = NULL;
913 return cm; 913 return cm;
914 } 914 }
915 915
916 static void put_comm(struct dlm_comm *cm) 916 static void put_comm(struct dlm_comm *cm)
917 { 917 {
918 config_item_put(&cm->item); 918 config_item_put(&cm->item);
919 } 919 }
920 920
921 /* caller must free mem */ 921 /* caller must free mem */
922 int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, 922 int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
923 int *count_out) 923 int *count_out)
924 { 924 {
925 struct dlm_space *sp; 925 struct dlm_space *sp;
926 struct dlm_node *nd; 926 struct dlm_node *nd;
927 struct dlm_config_node *nodes, *node; 927 struct dlm_config_node *nodes, *node;
928 int rv, count; 928 int rv, count;
929 929
930 sp = get_space(lsname); 930 sp = get_space(lsname);
931 if (!sp) 931 if (!sp)
932 return -EEXIST; 932 return -EEXIST;
933 933
934 mutex_lock(&sp->members_lock); 934 mutex_lock(&sp->members_lock);
935 if (!sp->members_count) { 935 if (!sp->members_count) {
936 rv = -EINVAL; 936 rv = -EINVAL;
937 printk(KERN_ERR "dlm: zero members_count\n"); 937 printk(KERN_ERR "dlm: zero members_count\n");
938 goto out; 938 goto out;
939 } 939 }
940 940
941 count = sp->members_count; 941 count = sp->members_count;
942 942
943 nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS); 943 nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
944 if (!nodes) { 944 if (!nodes) {
945 rv = -ENOMEM; 945 rv = -ENOMEM;
946 goto out; 946 goto out;
947 } 947 }
948 948
949 node = nodes; 949 node = nodes;
950 list_for_each_entry(nd, &sp->members, list) { 950 list_for_each_entry(nd, &sp->members, list) {
951 node->nodeid = nd->nodeid; 951 node->nodeid = nd->nodeid;
952 node->weight = nd->weight; 952 node->weight = nd->weight;
953 node->new = nd->new; 953 node->new = nd->new;
954 node->comm_seq = nd->comm_seq; 954 node->comm_seq = nd->comm_seq;
955 node++; 955 node++;
956 956
957 nd->new = 0; 957 nd->new = 0;
958 } 958 }
959 959
960 *count_out = count; 960 *count_out = count;
961 *nodes_out = nodes; 961 *nodes_out = nodes;
962 rv = 0; 962 rv = 0;
963 out: 963 out:
964 mutex_unlock(&sp->members_lock); 964 mutex_unlock(&sp->members_lock);
965 put_space(sp); 965 put_space(sp);
966 return rv; 966 return rv;
967 } 967 }
968 968
969 int dlm_comm_seq(int nodeid, uint32_t *seq) 969 int dlm_comm_seq(int nodeid, uint32_t *seq)
970 { 970 {
971 struct dlm_comm *cm = get_comm(nodeid); 971 struct dlm_comm *cm = get_comm(nodeid);
972 if (!cm) 972 if (!cm)
973 return -EEXIST; 973 return -EEXIST;
974 *seq = cm->seq; 974 *seq = cm->seq;
975 put_comm(cm); 975 put_comm(cm);
976 return 0; 976 return 0;
977 } 977 }
978 978
979 int dlm_our_nodeid(void) 979 int dlm_our_nodeid(void)
980 { 980 {
981 return local_comm ? local_comm->nodeid : 0; 981 return local_comm ? local_comm->nodeid : 0;
982 } 982 }
983 983
984 /* num 0 is first addr, num 1 is second addr */ 984 /* num 0 is first addr, num 1 is second addr */
985 int dlm_our_addr(struct sockaddr_storage *addr, int num) 985 int dlm_our_addr(struct sockaddr_storage *addr, int num)
986 { 986 {
987 if (!local_comm) 987 if (!local_comm)
988 return -1; 988 return -1;
989 if (num + 1 > local_comm->addr_count) 989 if (num + 1 > local_comm->addr_count)
990 return -1; 990 return -1;
991 memcpy(addr, local_comm->addr[num], sizeof(*addr)); 991 memcpy(addr, local_comm->addr[num], sizeof(*addr));
992 return 0; 992 return 0;
993 } 993 }
994 994
995 /* Config file defaults */ 995 /* Config file defaults */
996 #define DEFAULT_TCP_PORT 21064 996 #define DEFAULT_TCP_PORT 21064
997 #define DEFAULT_BUFFER_SIZE 4096 997 #define DEFAULT_BUFFER_SIZE 4096
998 #define DEFAULT_RSBTBL_SIZE 1024 998 #define DEFAULT_RSBTBL_SIZE 1024
999 #define DEFAULT_RECOVER_TIMER 5 999 #define DEFAULT_RECOVER_TIMER 5
1000 #define DEFAULT_TOSS_SECS 10 1000 #define DEFAULT_TOSS_SECS 10
1001 #define DEFAULT_SCAN_SECS 5 1001 #define DEFAULT_SCAN_SECS 5
1002 #define DEFAULT_LOG_DEBUG 0 1002 #define DEFAULT_LOG_DEBUG 0
1003 #define DEFAULT_PROTOCOL 0 1003 #define DEFAULT_PROTOCOL 0
1004 #define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ 1004 #define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
1005 #define DEFAULT_WAITWARN_US 0 1005 #define DEFAULT_WAITWARN_US 0
1006 #define DEFAULT_NEW_RSB_COUNT 128 1006 #define DEFAULT_NEW_RSB_COUNT 128
1007 #define DEFAULT_RECOVER_CALLBACKS 0 1007 #define DEFAULT_RECOVER_CALLBACKS 0
1008 #define DEFAULT_CLUSTER_NAME "" 1008 #define DEFAULT_CLUSTER_NAME ""
1009 1009
1010 struct dlm_config_info dlm_config = { 1010 struct dlm_config_info dlm_config = {
1011 .ci_tcp_port = DEFAULT_TCP_PORT, 1011 .ci_tcp_port = DEFAULT_TCP_PORT,
1012 .ci_buffer_size = DEFAULT_BUFFER_SIZE, 1012 .ci_buffer_size = DEFAULT_BUFFER_SIZE,
1013 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE, 1013 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
1014 .ci_recover_timer = DEFAULT_RECOVER_TIMER, 1014 .ci_recover_timer = DEFAULT_RECOVER_TIMER,
1015 .ci_toss_secs = DEFAULT_TOSS_SECS, 1015 .ci_toss_secs = DEFAULT_TOSS_SECS,
1016 .ci_scan_secs = DEFAULT_SCAN_SECS, 1016 .ci_scan_secs = DEFAULT_SCAN_SECS,
1017 .ci_log_debug = DEFAULT_LOG_DEBUG, 1017 .ci_log_debug = DEFAULT_LOG_DEBUG,
1018 .ci_protocol = DEFAULT_PROTOCOL, 1018 .ci_protocol = DEFAULT_PROTOCOL,
1019 .ci_timewarn_cs = DEFAULT_TIMEWARN_CS, 1019 .ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
1020 .ci_waitwarn_us = DEFAULT_WAITWARN_US, 1020 .ci_waitwarn_us = DEFAULT_WAITWARN_US,
1021 .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT, 1021 .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT,
1022 .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS, 1022 .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS,
1023 .ci_cluster_name = DEFAULT_CLUSTER_NAME 1023 .ci_cluster_name = DEFAULT_CLUSTER_NAME
1024 }; 1024 };
1025 1025
1026 1026
1 /* 1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This copyrighted material is made available to anyone wishing to use, 5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions 6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2. 7 * of the GNU General Public License version 2.
8 */ 8 */
9 9
10 #include <linux/sched.h> 10 #include <linux/sched.h>
11 #include <linux/spinlock.h> 11 #include <linux/spinlock.h>
12 #include <linux/completion.h> 12 #include <linux/completion.h>
13 #include <linux/buffer_head.h> 13 #include <linux/buffer_head.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/kobject.h> 15 #include <linux/kobject.h>
16 #include <asm/uaccess.h> 16 #include <asm/uaccess.h>
17 #include <linux/gfs2_ondisk.h> 17 #include <linux/gfs2_ondisk.h>
18 #include <linux/genhd.h> 18 #include <linux/genhd.h>
19 19
20 #include "gfs2.h" 20 #include "gfs2.h"
21 #include "incore.h" 21 #include "incore.h"
22 #include "sys.h" 22 #include "sys.h"
23 #include "super.h" 23 #include "super.h"
24 #include "glock.h" 24 #include "glock.h"
25 #include "quota.h" 25 #include "quota.h"
26 #include "util.h" 26 #include "util.h"
27 #include "glops.h" 27 #include "glops.h"
28 #include "recovery.h" 28 #include "recovery.h"
29 29
30 struct gfs2_attr { 30 struct gfs2_attr {
31 struct attribute attr; 31 struct attribute attr;
32 ssize_t (*show)(struct gfs2_sbd *, char *); 32 ssize_t (*show)(struct gfs2_sbd *, char *);
33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t); 33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
34 }; 34 };
35 35
36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr, 36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr,
37 char *buf) 37 char *buf)
38 { 38 {
39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
41 return a->show ? a->show(sdp, buf) : 0; 41 return a->show ? a->show(sdp, buf) : 0;
42 } 42 }
43 43
44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, 44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
45 const char *buf, size_t len) 45 const char *buf, size_t len)
46 { 46 {
47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
49 return a->store ? a->store(sdp, buf, len) : len; 49 return a->store ? a->store(sdp, buf, len) : len;
50 } 50 }
51 51
52 static const struct sysfs_ops gfs2_attr_ops = { 52 static const struct sysfs_ops gfs2_attr_ops = {
53 .show = gfs2_attr_show, 53 .show = gfs2_attr_show,
54 .store = gfs2_attr_store, 54 .store = gfs2_attr_store,
55 }; 55 };
56 56
57 57
58 static struct kset *gfs2_kset; 58 static struct kset *gfs2_kset;
59 59
60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) 60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
61 { 61 {
62 return snprintf(buf, PAGE_SIZE, "%u:%u\n", 62 return snprintf(buf, PAGE_SIZE, "%u:%u\n",
63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); 63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
64 } 64 }
65 65
66 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf) 66 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
67 { 67 {
68 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname); 68 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
69 } 69 }
70 70
71 static int gfs2_uuid_valid(const u8 *uuid) 71 static int gfs2_uuid_valid(const u8 *uuid)
72 { 72 {
73 int i; 73 int i;
74 74
75 for (i = 0; i < 16; i++) { 75 for (i = 0; i < 16; i++) {
76 if (uuid[i]) 76 if (uuid[i])
77 return 1; 77 return 1;
78 } 78 }
79 return 0; 79 return 0;
80 } 80 }
81 81
82 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) 82 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
83 { 83 {
84 struct super_block *s = sdp->sd_vfs; 84 struct super_block *s = sdp->sd_vfs;
85 const u8 *uuid = s->s_uuid; 85 const u8 *uuid = s->s_uuid;
86 buf[0] = '\0'; 86 buf[0] = '\0';
87 if (!gfs2_uuid_valid(uuid)) 87 if (!gfs2_uuid_valid(uuid))
88 return 0; 88 return 0;
89 return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid); 89 return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
90 } 90 }
91 91
92 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 92 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
93 { 93 {
94 unsigned int count; 94 unsigned int count;
95 95
96 mutex_lock(&sdp->sd_freeze_lock); 96 mutex_lock(&sdp->sd_freeze_lock);
97 count = sdp->sd_freeze_count; 97 count = sdp->sd_freeze_count;
98 mutex_unlock(&sdp->sd_freeze_lock); 98 mutex_unlock(&sdp->sd_freeze_lock);
99 99
100 return snprintf(buf, PAGE_SIZE, "%u\n", count); 100 return snprintf(buf, PAGE_SIZE, "%u\n", count);
101 } 101 }
102 102
103 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 103 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
104 { 104 {
105 ssize_t ret = len; 105 ssize_t ret = len;
106 int error = 0; 106 int error = 0;
107 int n = simple_strtol(buf, NULL, 0); 107 int n = simple_strtol(buf, NULL, 0);
108 108
109 if (!capable(CAP_SYS_ADMIN)) 109 if (!capable(CAP_SYS_ADMIN))
110 return -EACCES; 110 return -EPERM;
111 111
112 switch (n) { 112 switch (n) {
113 case 0: 113 case 0:
114 gfs2_unfreeze_fs(sdp); 114 gfs2_unfreeze_fs(sdp);
115 break; 115 break;
116 case 1: 116 case 1:
117 error = gfs2_freeze_fs(sdp); 117 error = gfs2_freeze_fs(sdp);
118 break; 118 break;
119 default: 119 default:
120 ret = -EINVAL; 120 ret = -EINVAL;
121 } 121 }
122 122
123 if (error) 123 if (error)
124 fs_warn(sdp, "freeze %d error %d", n, error); 124 fs_warn(sdp, "freeze %d error %d", n, error);
125 125
126 return ret; 126 return ret;
127 } 127 }
128 128
129 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) 129 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
130 { 130 {
131 unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags); 131 unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags);
132 return snprintf(buf, PAGE_SIZE, "%u\n", b); 132 return snprintf(buf, PAGE_SIZE, "%u\n", b);
133 } 133 }
134 134
135 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 135 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
136 { 136 {
137 if (!capable(CAP_SYS_ADMIN)) 137 if (!capable(CAP_SYS_ADMIN))
138 return -EACCES; 138 return -EPERM;
139 139
140 if (simple_strtol(buf, NULL, 0) != 1) 140 if (simple_strtol(buf, NULL, 0) != 1)
141 return -EINVAL; 141 return -EINVAL;
142 142
143 gfs2_lm_withdraw(sdp, 143 gfs2_lm_withdraw(sdp,
144 "GFS2: fsid=%s: withdrawing from cluster at user's request\n", 144 "GFS2: fsid=%s: withdrawing from cluster at user's request\n",
145 sdp->sd_fsname); 145 sdp->sd_fsname);
146 return len; 146 return len;
147 } 147 }
148 148
149 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, 149 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
150 size_t len) 150 size_t len)
151 { 151 {
152 if (!capable(CAP_SYS_ADMIN)) 152 if (!capable(CAP_SYS_ADMIN))
153 return -EACCES; 153 return -EPERM;
154 154
155 if (simple_strtol(buf, NULL, 0) != 1) 155 if (simple_strtol(buf, NULL, 0) != 1)
156 return -EINVAL; 156 return -EINVAL;
157 157
158 gfs2_statfs_sync(sdp->sd_vfs, 0); 158 gfs2_statfs_sync(sdp->sd_vfs, 0);
159 return len; 159 return len;
160 } 160 }
161 161
162 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 162 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
163 size_t len) 163 size_t len)
164 { 164 {
165 if (!capable(CAP_SYS_ADMIN)) 165 if (!capable(CAP_SYS_ADMIN))
166 return -EACCES; 166 return -EPERM;
167 167
168 if (simple_strtol(buf, NULL, 0) != 1) 168 if (simple_strtol(buf, NULL, 0) != 1)
169 return -EINVAL; 169 return -EINVAL;
170 170
171 gfs2_quota_sync(sdp->sd_vfs, 0); 171 gfs2_quota_sync(sdp->sd_vfs, 0);
172 return len; 172 return len;
173 } 173 }
174 174
175 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 175 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
176 size_t len) 176 size_t len)
177 { 177 {
178 int error; 178 int error;
179 u32 id; 179 u32 id;
180 180
181 if (!capable(CAP_SYS_ADMIN)) 181 if (!capable(CAP_SYS_ADMIN))
182 return -EACCES; 182 return -EPERM;
183 183
184 id = simple_strtoul(buf, NULL, 0); 184 id = simple_strtoul(buf, NULL, 0);
185 185
186 error = gfs2_quota_refresh(sdp, 1, id); 186 error = gfs2_quota_refresh(sdp, 1, id);
187 return error ? error : len; 187 return error ? error : len;
188 } 188 }
189 189
190 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 190 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
191 size_t len) 191 size_t len)
192 { 192 {
193 int error; 193 int error;
194 u32 id; 194 u32 id;
195 195
196 if (!capable(CAP_SYS_ADMIN)) 196 if (!capable(CAP_SYS_ADMIN))
197 return -EACCES; 197 return -EPERM;
198 198
199 id = simple_strtoul(buf, NULL, 0); 199 id = simple_strtoul(buf, NULL, 0);
200 200
201 error = gfs2_quota_refresh(sdp, 0, id); 201 error = gfs2_quota_refresh(sdp, 0, id);
202 return error ? error : len; 202 return error ? error : len;
203 } 203 }
204 204
205 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 205 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
206 { 206 {
207 struct gfs2_glock *gl; 207 struct gfs2_glock *gl;
208 const struct gfs2_glock_operations *glops; 208 const struct gfs2_glock_operations *glops;
209 unsigned int glmode; 209 unsigned int glmode;
210 unsigned int gltype; 210 unsigned int gltype;
211 unsigned long long glnum; 211 unsigned long long glnum;
212 char mode[16]; 212 char mode[16];
213 int rv; 213 int rv;
214 214
215 if (!capable(CAP_SYS_ADMIN)) 215 if (!capable(CAP_SYS_ADMIN))
216 return -EACCES; 216 return -EPERM;
217 217
218 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum, 218 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
219 mode); 219 mode);
220 if (rv != 3) 220 if (rv != 3)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 if (strcmp(mode, "EX") == 0) 223 if (strcmp(mode, "EX") == 0)
224 glmode = LM_ST_UNLOCKED; 224 glmode = LM_ST_UNLOCKED;
225 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0)) 225 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0))
226 glmode = LM_ST_DEFERRED; 226 glmode = LM_ST_DEFERRED;
227 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0)) 227 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0))
228 glmode = LM_ST_SHARED; 228 glmode = LM_ST_SHARED;
229 else 229 else
230 return -EINVAL; 230 return -EINVAL;
231 231
232 if (gltype > LM_TYPE_JOURNAL) 232 if (gltype > LM_TYPE_JOURNAL)
233 return -EINVAL; 233 return -EINVAL;
234 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK) 234 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_TRANS_LOCK)
235 glops = &gfs2_trans_glops; 235 glops = &gfs2_trans_glops;
236 else 236 else
237 glops = gfs2_glops_list[gltype]; 237 glops = gfs2_glops_list[gltype];
238 if (glops == NULL) 238 if (glops == NULL)
239 return -EINVAL; 239 return -EINVAL;
240 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) 240 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
241 fs_info(sdp, "demote interface used\n"); 241 fs_info(sdp, "demote interface used\n");
242 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); 242 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
243 if (rv) 243 if (rv)
244 return rv; 244 return rv;
245 gfs2_glock_cb(gl, glmode); 245 gfs2_glock_cb(gl, glmode);
246 gfs2_glock_put(gl); 246 gfs2_glock_put(gl);
247 return len; 247 return len;
248 } 248 }
249 249
250 250
251 #define GFS2_ATTR(name, mode, show, store) \ 251 #define GFS2_ATTR(name, mode, show, store) \
252 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store) 252 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
253 253
254 GFS2_ATTR(id, 0444, id_show, NULL); 254 GFS2_ATTR(id, 0444, id_show, NULL);
255 GFS2_ATTR(fsname, 0444, fsname_show, NULL); 255 GFS2_ATTR(fsname, 0444, fsname_show, NULL);
256 GFS2_ATTR(uuid, 0444, uuid_show, NULL); 256 GFS2_ATTR(uuid, 0444, uuid_show, NULL);
257 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 257 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
258 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 258 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
259 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 259 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
260 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); 260 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
261 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store); 261 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store);
262 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store); 262 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store);
263 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store); 263 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store);
264 264
265 static struct attribute *gfs2_attrs[] = { 265 static struct attribute *gfs2_attrs[] = {
266 &gfs2_attr_id.attr, 266 &gfs2_attr_id.attr,
267 &gfs2_attr_fsname.attr, 267 &gfs2_attr_fsname.attr,
268 &gfs2_attr_uuid.attr, 268 &gfs2_attr_uuid.attr,
269 &gfs2_attr_freeze.attr, 269 &gfs2_attr_freeze.attr,
270 &gfs2_attr_withdraw.attr, 270 &gfs2_attr_withdraw.attr,
271 &gfs2_attr_statfs_sync.attr, 271 &gfs2_attr_statfs_sync.attr,
272 &gfs2_attr_quota_sync.attr, 272 &gfs2_attr_quota_sync.attr,
273 &gfs2_attr_quota_refresh_user.attr, 273 &gfs2_attr_quota_refresh_user.attr,
274 &gfs2_attr_quota_refresh_group.attr, 274 &gfs2_attr_quota_refresh_group.attr,
275 &gfs2_attr_demote_rq.attr, 275 &gfs2_attr_demote_rq.attr,
276 NULL, 276 NULL,
277 }; 277 };
278 278
279 static void gfs2_sbd_release(struct kobject *kobj) 279 static void gfs2_sbd_release(struct kobject *kobj)
280 { 280 {
281 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 281 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
282 282
283 kfree(sdp); 283 kfree(sdp);
284 } 284 }
285 285
286 static struct kobj_type gfs2_ktype = { 286 static struct kobj_type gfs2_ktype = {
287 .release = gfs2_sbd_release, 287 .release = gfs2_sbd_release,
288 .default_attrs = gfs2_attrs, 288 .default_attrs = gfs2_attrs,
289 .sysfs_ops = &gfs2_attr_ops, 289 .sysfs_ops = &gfs2_attr_ops,
290 }; 290 };
291 291
292 292
293 /* 293 /*
294 * lock_module. Originally from lock_dlm 294 * lock_module. Originally from lock_dlm
295 */ 295 */
296 296
297 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) 297 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
298 { 298 {
299 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; 299 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
300 return sprintf(buf, "%s\n", ops->lm_proto_name); 300 return sprintf(buf, "%s\n", ops->lm_proto_name);
301 } 301 }
302 302
303 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) 303 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
304 { 304 {
305 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 305 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
306 ssize_t ret; 306 ssize_t ret;
307 int val = 0; 307 int val = 0;
308 308
309 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags)) 309 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
310 val = 1; 310 val = 1;
311 ret = sprintf(buf, "%d\n", val); 311 ret = sprintf(buf, "%d\n", val);
312 return ret; 312 return ret;
313 } 313 }
314 314
315 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 315 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
316 { 316 {
317 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 317 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
318 ssize_t ret = len; 318 ssize_t ret = len;
319 int val; 319 int val;
320 320
321 val = simple_strtol(buf, NULL, 0); 321 val = simple_strtol(buf, NULL, 0);
322 322
323 if (val == 1) 323 if (val == 1)
324 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 324 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
325 else if (val == 0) { 325 else if (val == 0) {
326 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 326 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
327 smp_mb__after_clear_bit(); 327 smp_mb__after_clear_bit();
328 gfs2_glock_thaw(sdp); 328 gfs2_glock_thaw(sdp);
329 } else { 329 } else {
330 ret = -EINVAL; 330 ret = -EINVAL;
331 } 331 }
332 return ret; 332 return ret;
333 } 333 }
334 334
335 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) 335 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
336 { 336 {
337 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 337 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
338 return sprintf(buf, "%d\n", ls->ls_first); 338 return sprintf(buf, "%d\n", ls->ls_first);
339 } 339 }
340 340
341 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 341 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
342 { 342 {
343 unsigned first; 343 unsigned first;
344 int rv; 344 int rv;
345 345
346 rv = sscanf(buf, "%u", &first); 346 rv = sscanf(buf, "%u", &first);
347 if (rv != 1 || first > 1) 347 if (rv != 1 || first > 1)
348 return -EINVAL; 348 return -EINVAL;
349 rv = wait_for_completion_killable(&sdp->sd_locking_init); 349 rv = wait_for_completion_killable(&sdp->sd_locking_init);
350 if (rv) 350 if (rv)
351 return rv; 351 return rv;
352 spin_lock(&sdp->sd_jindex_spin); 352 spin_lock(&sdp->sd_jindex_spin);
353 rv = -EBUSY; 353 rv = -EBUSY;
354 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 354 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
355 goto out; 355 goto out;
356 rv = -EINVAL; 356 rv = -EINVAL;
357 if (sdp->sd_args.ar_spectator) 357 if (sdp->sd_args.ar_spectator)
358 goto out; 358 goto out;
359 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 359 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
360 goto out; 360 goto out;
361 sdp->sd_lockstruct.ls_first = first; 361 sdp->sd_lockstruct.ls_first = first;
362 rv = 0; 362 rv = 0;
363 out: 363 out:
364 spin_unlock(&sdp->sd_jindex_spin); 364 spin_unlock(&sdp->sd_jindex_spin);
365 return rv ? rv : len; 365 return rv ? rv : len;
366 } 366 }
367 367
368 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 368 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
369 { 369 {
370 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 370 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
371 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags)); 371 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
372 } 372 }
373 373
374 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) 374 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
375 { 375 {
376 struct gfs2_jdesc *jd; 376 struct gfs2_jdesc *jd;
377 int rv; 377 int rv;
378 378
379 spin_lock(&sdp->sd_jindex_spin); 379 spin_lock(&sdp->sd_jindex_spin);
380 rv = -EBUSY; 380 rv = -EBUSY;
381 if (sdp->sd_jdesc->jd_jid == jid) 381 if (sdp->sd_jdesc->jd_jid == jid)
382 goto out; 382 goto out;
383 rv = -ENOENT; 383 rv = -ENOENT;
384 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 384 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
385 if (jd->jd_jid != jid) 385 if (jd->jd_jid != jid)
386 continue; 386 continue;
387 rv = gfs2_recover_journal(jd, false); 387 rv = gfs2_recover_journal(jd, false);
388 break; 388 break;
389 } 389 }
390 out: 390 out:
391 spin_unlock(&sdp->sd_jindex_spin); 391 spin_unlock(&sdp->sd_jindex_spin);
392 return rv; 392 return rv;
393 } 393 }
394 394
395 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 395 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
396 { 396 {
397 unsigned jid; 397 unsigned jid;
398 int rv; 398 int rv;
399 399
400 rv = sscanf(buf, "%u", &jid); 400 rv = sscanf(buf, "%u", &jid);
401 if (rv != 1) 401 if (rv != 1)
402 return -EINVAL; 402 return -EINVAL;
403 403
404 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 404 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
405 rv = -ESHUTDOWN; 405 rv = -ESHUTDOWN;
406 goto out; 406 goto out;
407 } 407 }
408 408
409 rv = gfs2_recover_set(sdp, jid); 409 rv = gfs2_recover_set(sdp, jid);
410 out: 410 out:
411 return rv ? rv : len; 411 return rv ? rv : len;
412 } 412 }
413 413
414 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) 414 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
415 { 415 {
416 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 416 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
417 return sprintf(buf, "%d\n", ls->ls_recover_jid_done); 417 return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
418 } 418 }
419 419
420 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) 420 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
421 { 421 {
422 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 422 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
423 return sprintf(buf, "%d\n", ls->ls_recover_jid_status); 423 return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
424 } 424 }
425 425
426 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) 426 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
427 { 427 {
428 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); 428 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
429 } 429 }
430 430
431 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 431 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
432 { 432 {
433 int jid; 433 int jid;
434 int rv; 434 int rv;
435 435
436 rv = sscanf(buf, "%d", &jid); 436 rv = sscanf(buf, "%d", &jid);
437 if (rv != 1) 437 if (rv != 1)
438 return -EINVAL; 438 return -EINVAL;
439 rv = wait_for_completion_killable(&sdp->sd_locking_init); 439 rv = wait_for_completion_killable(&sdp->sd_locking_init);
440 if (rv) 440 if (rv)
441 return rv; 441 return rv;
442 spin_lock(&sdp->sd_jindex_spin); 442 spin_lock(&sdp->sd_jindex_spin);
443 rv = -EINVAL; 443 rv = -EINVAL;
444 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 444 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
445 goto out; 445 goto out;
446 rv = -EBUSY; 446 rv = -EBUSY;
447 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 447 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
448 goto out; 448 goto out;
449 rv = 0; 449 rv = 0;
450 if (sdp->sd_args.ar_spectator && jid > 0) 450 if (sdp->sd_args.ar_spectator && jid > 0)
451 rv = jid = -EINVAL; 451 rv = jid = -EINVAL;
452 sdp->sd_lockstruct.ls_jid = jid; 452 sdp->sd_lockstruct.ls_jid = jid;
453 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 453 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
454 smp_mb__after_clear_bit(); 454 smp_mb__after_clear_bit();
455 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 455 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
456 out: 456 out:
457 spin_unlock(&sdp->sd_jindex_spin); 457 spin_unlock(&sdp->sd_jindex_spin);
458 return rv ? rv : len; 458 return rv ? rv : len;
459 } 459 }
460 460
461 #define GDLM_ATTR(_name,_mode,_show,_store) \ 461 #define GDLM_ATTR(_name,_mode,_show,_store) \
462 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 462 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
463 463
464 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 464 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
465 GDLM_ATTR(block, 0644, block_show, block_store); 465 GDLM_ATTR(block, 0644, block_show, block_store);
466 GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 466 GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
467 GDLM_ATTR(jid, 0644, jid_show, jid_store); 467 GDLM_ATTR(jid, 0644, jid_show, jid_store);
468 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 468 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
469 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 469 GDLM_ATTR(first_done, 0444, first_done_show, NULL);
470 GDLM_ATTR(recover, 0600, NULL, recover_store); 470 GDLM_ATTR(recover, 0600, NULL, recover_store);
471 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 471 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
472 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 472 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
473 473
474 static struct attribute *lock_module_attrs[] = { 474 static struct attribute *lock_module_attrs[] = {
475 &gdlm_attr_proto_name.attr, 475 &gdlm_attr_proto_name.attr,
476 &gdlm_attr_block.attr, 476 &gdlm_attr_block.attr,
477 &gdlm_attr_withdraw.attr, 477 &gdlm_attr_withdraw.attr,
478 &gdlm_attr_jid.attr, 478 &gdlm_attr_jid.attr,
479 &gdlm_attr_first.attr, 479 &gdlm_attr_first.attr,
480 &gdlm_attr_first_done.attr, 480 &gdlm_attr_first_done.attr,
481 &gdlm_attr_recover.attr, 481 &gdlm_attr_recover.attr,
482 &gdlm_attr_recover_done.attr, 482 &gdlm_attr_recover_done.attr,
483 &gdlm_attr_recover_status.attr, 483 &gdlm_attr_recover_status.attr,
484 NULL, 484 NULL,
485 }; 485 };
486 486
487 /* 487 /*
488 * get and set struct gfs2_tune fields 488 * get and set struct gfs2_tune fields
489 */ 489 */
490 490
491 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf) 491 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
492 { 492 {
493 return snprintf(buf, PAGE_SIZE, "%u %u\n", 493 return snprintf(buf, PAGE_SIZE, "%u %u\n",
494 sdp->sd_tune.gt_quota_scale_num, 494 sdp->sd_tune.gt_quota_scale_num,
495 sdp->sd_tune.gt_quota_scale_den); 495 sdp->sd_tune.gt_quota_scale_den);
496 } 496 }
497 497
498 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf, 498 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
499 size_t len) 499 size_t len)
500 { 500 {
501 struct gfs2_tune *gt = &sdp->sd_tune; 501 struct gfs2_tune *gt = &sdp->sd_tune;
502 unsigned int x, y; 502 unsigned int x, y;
503 503
504 if (!capable(CAP_SYS_ADMIN)) 504 if (!capable(CAP_SYS_ADMIN))
505 return -EACCES; 505 return -EPERM;
506 506
507 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y) 507 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
508 return -EINVAL; 508 return -EINVAL;
509 509
510 spin_lock(&gt->gt_spin); 510 spin_lock(&gt->gt_spin);
511 gt->gt_quota_scale_num = x; 511 gt->gt_quota_scale_num = x;
512 gt->gt_quota_scale_den = y; 512 gt->gt_quota_scale_den = y;
513 spin_unlock(&gt->gt_spin); 513 spin_unlock(&gt->gt_spin);
514 return len; 514 return len;
515 } 515 }
516 516
517 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field, 517 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
518 int check_zero, const char *buf, size_t len) 518 int check_zero, const char *buf, size_t len)
519 { 519 {
520 struct gfs2_tune *gt = &sdp->sd_tune; 520 struct gfs2_tune *gt = &sdp->sd_tune;
521 unsigned int x; 521 unsigned int x;
522 522
523 if (!capable(CAP_SYS_ADMIN)) 523 if (!capable(CAP_SYS_ADMIN))
524 return -EACCES; 524 return -EPERM;
525 525
526 x = simple_strtoul(buf, NULL, 0); 526 x = simple_strtoul(buf, NULL, 0);
527 527
528 if (check_zero && !x) 528 if (check_zero && !x)
529 return -EINVAL; 529 return -EINVAL;
530 530
531 spin_lock(&gt->gt_spin); 531 spin_lock(&gt->gt_spin);
532 *field = x; 532 *field = x;
533 spin_unlock(&gt->gt_spin); 533 spin_unlock(&gt->gt_spin);
534 return len; 534 return len;
535 } 535 }
536 536
537 #define TUNE_ATTR_3(name, show, store) \ 537 #define TUNE_ATTR_3(name, show, store) \
538 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store) 538 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
539 539
540 #define TUNE_ATTR_2(name, store) \ 540 #define TUNE_ATTR_2(name, store) \
541 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ 541 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
542 { \ 542 { \
543 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \ 543 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
544 } \ 544 } \
545 TUNE_ATTR_3(name, name##_show, store) 545 TUNE_ATTR_3(name, name##_show, store)
546 546
547 #define TUNE_ATTR(name, check_zero) \ 547 #define TUNE_ATTR(name, check_zero) \
548 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ 548 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
549 { \ 549 { \
550 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \ 550 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
551 } \ 551 } \
552 TUNE_ATTR_2(name, name##_store) 552 TUNE_ATTR_2(name, name##_store)
553 553
554 TUNE_ATTR(quota_warn_period, 0); 554 TUNE_ATTR(quota_warn_period, 0);
555 TUNE_ATTR(quota_quantum, 0); 555 TUNE_ATTR(quota_quantum, 0);
556 TUNE_ATTR(max_readahead, 0); 556 TUNE_ATTR(max_readahead, 0);
557 TUNE_ATTR(complain_secs, 0); 557 TUNE_ATTR(complain_secs, 0);
558 TUNE_ATTR(statfs_slow, 0); 558 TUNE_ATTR(statfs_slow, 0);
559 TUNE_ATTR(new_files_jdata, 0); 559 TUNE_ATTR(new_files_jdata, 0);
560 TUNE_ATTR(quota_simul_sync, 1); 560 TUNE_ATTR(quota_simul_sync, 1);
561 TUNE_ATTR(statfs_quantum, 1); 561 TUNE_ATTR(statfs_quantum, 1);
562 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 562 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
563 563
564 static struct attribute *tune_attrs[] = { 564 static struct attribute *tune_attrs[] = {
565 &tune_attr_quota_warn_period.attr, 565 &tune_attr_quota_warn_period.attr,
566 &tune_attr_quota_quantum.attr, 566 &tune_attr_quota_quantum.attr,
567 &tune_attr_max_readahead.attr, 567 &tune_attr_max_readahead.attr,
568 &tune_attr_complain_secs.attr, 568 &tune_attr_complain_secs.attr,
569 &tune_attr_statfs_slow.attr, 569 &tune_attr_statfs_slow.attr,
570 &tune_attr_quota_simul_sync.attr, 570 &tune_attr_quota_simul_sync.attr,
571 &tune_attr_statfs_quantum.attr, 571 &tune_attr_statfs_quantum.attr,
572 &tune_attr_quota_scale.attr, 572 &tune_attr_quota_scale.attr,
573 &tune_attr_new_files_jdata.attr, 573 &tune_attr_new_files_jdata.attr,
574 NULL, 574 NULL,
575 }; 575 };
576 576
577 static struct attribute_group tune_group = { 577 static struct attribute_group tune_group = {
578 .name = "tune", 578 .name = "tune",
579 .attrs = tune_attrs, 579 .attrs = tune_attrs,
580 }; 580 };
581 581
582 static struct attribute_group lock_module_group = { 582 static struct attribute_group lock_module_group = {
583 .name = "lock_module", 583 .name = "lock_module",
584 .attrs = lock_module_attrs, 584 .attrs = lock_module_attrs,
585 }; 585 };
586 586
587 int gfs2_sys_fs_add(struct gfs2_sbd *sdp) 587 int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
588 { 588 {
589 struct super_block *sb = sdp->sd_vfs; 589 struct super_block *sb = sdp->sd_vfs;
590 int error; 590 int error;
591 char ro[20]; 591 char ro[20];
592 char spectator[20]; 592 char spectator[20];
593 char *envp[] = { ro, spectator, NULL }; 593 char *envp[] = { ro, spectator, NULL };
594 int sysfs_frees_sdp = 0; 594 int sysfs_frees_sdp = 0;
595 595
596 sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0); 596 sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0);
597 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); 597 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
598 598
599 sdp->sd_kobj.kset = gfs2_kset; 599 sdp->sd_kobj.kset = gfs2_kset;
600 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, 600 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
601 "%s", sdp->sd_table_name); 601 "%s", sdp->sd_table_name);
602 if (error) 602 if (error)
603 goto fail_reg; 603 goto fail_reg;
604 604
605 sysfs_frees_sdp = 1; /* Freeing sdp is now done by sysfs calling 605 sysfs_frees_sdp = 1; /* Freeing sdp is now done by sysfs calling
606 function gfs2_sbd_release. */ 606 function gfs2_sbd_release. */
607 error = sysfs_create_group(&sdp->sd_kobj, &tune_group); 607 error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
608 if (error) 608 if (error)
609 goto fail_reg; 609 goto fail_reg;
610 610
611 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group); 611 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
612 if (error) 612 if (error)
613 goto fail_tune; 613 goto fail_tune;
614 614
615 error = sysfs_create_link(&sdp->sd_kobj, 615 error = sysfs_create_link(&sdp->sd_kobj,
616 &disk_to_dev(sb->s_bdev->bd_disk)->kobj, 616 &disk_to_dev(sb->s_bdev->bd_disk)->kobj,
617 "device"); 617 "device");
618 if (error) 618 if (error)
619 goto fail_lock_module; 619 goto fail_lock_module;
620 620
621 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp); 621 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp);
622 return 0; 622 return 0;
623 623
624 fail_lock_module: 624 fail_lock_module:
625 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 625 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
626 fail_tune: 626 fail_tune:
627 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 627 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
628 fail_reg: 628 fail_reg:
629 free_percpu(sdp->sd_lkstats); 629 free_percpu(sdp->sd_lkstats);
630 fs_err(sdp, "error %d adding sysfs files", error); 630 fs_err(sdp, "error %d adding sysfs files", error);
631 if (sysfs_frees_sdp) 631 if (sysfs_frees_sdp)
632 kobject_put(&sdp->sd_kobj); 632 kobject_put(&sdp->sd_kobj);
633 else 633 else
634 kfree(sdp); 634 kfree(sdp);
635 sb->s_fs_info = NULL; 635 sb->s_fs_info = NULL;
636 return error; 636 return error;
637 } 637 }
638 638
639 void gfs2_sys_fs_del(struct gfs2_sbd *sdp) 639 void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
640 { 640 {
641 sysfs_remove_link(&sdp->sd_kobj, "device"); 641 sysfs_remove_link(&sdp->sd_kobj, "device");
642 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 642 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
643 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 643 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
644 kobject_put(&sdp->sd_kobj); 644 kobject_put(&sdp->sd_kobj);
645 } 645 }
646 646
647 static int gfs2_uevent(struct kset *kset, struct kobject *kobj, 647 static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
648 struct kobj_uevent_env *env) 648 struct kobj_uevent_env *env)
649 { 649 {
650 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 650 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
651 struct super_block *s = sdp->sd_vfs; 651 struct super_block *s = sdp->sd_vfs;
652 const u8 *uuid = s->s_uuid; 652 const u8 *uuid = s->s_uuid;
653 653
654 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 654 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
655 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 655 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
656 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 656 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
657 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); 657 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
658 if (gfs2_uuid_valid(uuid)) 658 if (gfs2_uuid_valid(uuid))
659 add_uevent_var(env, "UUID=%pUB", uuid); 659 add_uevent_var(env, "UUID=%pUB", uuid);
660 return 0; 660 return 0;
661 } 661 }
662 662
663 static const struct kset_uevent_ops gfs2_uevent_ops = { 663 static const struct kset_uevent_ops gfs2_uevent_ops = {
664 .uevent = gfs2_uevent, 664 .uevent = gfs2_uevent,
665 }; 665 };
666 666
667 int gfs2_sys_init(void) 667 int gfs2_sys_init(void)
668 { 668 {
669 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); 669 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj);
670 if (!gfs2_kset) 670 if (!gfs2_kset)
671 return -ENOMEM; 671 return -ENOMEM;
672 return 0; 672 return 0;
673 } 673 }
674 674
675 void gfs2_sys_uninit(void) 675 void gfs2_sys_uninit(void)
676 { 676 {
677 kset_unregister(gfs2_kset); 677 kset_unregister(gfs2_kset);
678 } 678 }
679 679
680 680
1 /* 1 /*
2 * ioctl.c 2 * ioctl.c
3 * 3 *
4 * Copyright (C) 1995, 1996 by Volker Lendecke 4 * Copyright (C) 1995, 1996 by Volker Lendecke
5 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache 5 * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
6 * Modified 1998, 1999 Wolfram Pienkoss for NLS 6 * Modified 1998, 1999 Wolfram Pienkoss for NLS
7 * 7 *
8 */ 8 */
9 9
10 #include <linux/capability.h> 10 #include <linux/capability.h>
11 #include <linux/compat.h> 11 #include <linux/compat.h>
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/fs.h> 13 #include <linux/fs.h>
14 #include <linux/ioctl.h> 14 #include <linux/ioctl.h>
15 #include <linux/time.h> 15 #include <linux/time.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/mount.h> 17 #include <linux/mount.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/highuid.h> 19 #include <linux/highuid.h>
20 #include <linux/vmalloc.h> 20 #include <linux/vmalloc.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 22
23 #include <asm/uaccess.h> 23 #include <asm/uaccess.h>
24 24
25 #include "ncp_fs.h" 25 #include "ncp_fs.h"
26 26
27 /* maximum limit for ncp_objectname_ioctl */ 27 /* maximum limit for ncp_objectname_ioctl */
28 #define NCP_OBJECT_NAME_MAX_LEN 4096 28 #define NCP_OBJECT_NAME_MAX_LEN 4096
29 /* maximum limit for ncp_privatedata_ioctl */ 29 /* maximum limit for ncp_privatedata_ioctl */
30 #define NCP_PRIVATE_DATA_MAX_LEN 8192 30 #define NCP_PRIVATE_DATA_MAX_LEN 8192
31 /* maximum negotiable packet size */ 31 /* maximum negotiable packet size */
32 #define NCP_PACKET_SIZE_INTERNAL 65536 32 #define NCP_PACKET_SIZE_INTERNAL 65536
33 33
34 static int 34 static int
35 ncp_get_fs_info(struct ncp_server * server, struct inode *inode, 35 ncp_get_fs_info(struct ncp_server * server, struct inode *inode,
36 struct ncp_fs_info __user *arg) 36 struct ncp_fs_info __user *arg)
37 { 37 {
38 struct ncp_fs_info info; 38 struct ncp_fs_info info;
39 39
40 if (copy_from_user(&info, arg, sizeof(info))) 40 if (copy_from_user(&info, arg, sizeof(info)))
41 return -EFAULT; 41 return -EFAULT;
42 42
43 if (info.version != NCP_GET_FS_INFO_VERSION) { 43 if (info.version != NCP_GET_FS_INFO_VERSION) {
44 DPRINTK("info.version invalid: %d\n", info.version); 44 DPRINTK("info.version invalid: %d\n", info.version);
45 return -EINVAL; 45 return -EINVAL;
46 } 46 }
47 /* TODO: info.addr = server->m.serv_addr; */ 47 /* TODO: info.addr = server->m.serv_addr; */
48 SET_UID(info.mounted_uid, server->m.mounted_uid); 48 SET_UID(info.mounted_uid, server->m.mounted_uid);
49 info.connection = server->connection; 49 info.connection = server->connection;
50 info.buffer_size = server->buffer_size; 50 info.buffer_size = server->buffer_size;
51 info.volume_number = NCP_FINFO(inode)->volNumber; 51 info.volume_number = NCP_FINFO(inode)->volNumber;
52 info.directory_id = NCP_FINFO(inode)->DosDirNum; 52 info.directory_id = NCP_FINFO(inode)->DosDirNum;
53 53
54 if (copy_to_user(arg, &info, sizeof(info))) 54 if (copy_to_user(arg, &info, sizeof(info)))
55 return -EFAULT; 55 return -EFAULT;
56 return 0; 56 return 0;
57 } 57 }
58 58
59 static int 59 static int
60 ncp_get_fs_info_v2(struct ncp_server * server, struct inode *inode, 60 ncp_get_fs_info_v2(struct ncp_server * server, struct inode *inode,
61 struct ncp_fs_info_v2 __user * arg) 61 struct ncp_fs_info_v2 __user * arg)
62 { 62 {
63 struct ncp_fs_info_v2 info2; 63 struct ncp_fs_info_v2 info2;
64 64
65 if (copy_from_user(&info2, arg, sizeof(info2))) 65 if (copy_from_user(&info2, arg, sizeof(info2)))
66 return -EFAULT; 66 return -EFAULT;
67 67
68 if (info2.version != NCP_GET_FS_INFO_VERSION_V2) { 68 if (info2.version != NCP_GET_FS_INFO_VERSION_V2) {
69 DPRINTK("info.version invalid: %d\n", info2.version); 69 DPRINTK("info.version invalid: %d\n", info2.version);
70 return -EINVAL; 70 return -EINVAL;
71 } 71 }
72 info2.mounted_uid = server->m.mounted_uid; 72 info2.mounted_uid = server->m.mounted_uid;
73 info2.connection = server->connection; 73 info2.connection = server->connection;
74 info2.buffer_size = server->buffer_size; 74 info2.buffer_size = server->buffer_size;
75 info2.volume_number = NCP_FINFO(inode)->volNumber; 75 info2.volume_number = NCP_FINFO(inode)->volNumber;
76 info2.directory_id = NCP_FINFO(inode)->DosDirNum; 76 info2.directory_id = NCP_FINFO(inode)->DosDirNum;
77 info2.dummy1 = info2.dummy2 = info2.dummy3 = 0; 77 info2.dummy1 = info2.dummy2 = info2.dummy3 = 0;
78 78
79 if (copy_to_user(arg, &info2, sizeof(info2))) 79 if (copy_to_user(arg, &info2, sizeof(info2)))
80 return -EFAULT; 80 return -EFAULT;
81 return 0; 81 return 0;
82 } 82 }
83 83
84 #ifdef CONFIG_COMPAT 84 #ifdef CONFIG_COMPAT
85 struct compat_ncp_objectname_ioctl 85 struct compat_ncp_objectname_ioctl
86 { 86 {
87 s32 auth_type; 87 s32 auth_type;
88 u32 object_name_len; 88 u32 object_name_len;
89 compat_caddr_t object_name; /* a userspace data, in most cases user name */ 89 compat_caddr_t object_name; /* a userspace data, in most cases user name */
90 }; 90 };
91 91
92 struct compat_ncp_fs_info_v2 { 92 struct compat_ncp_fs_info_v2 {
93 s32 version; 93 s32 version;
94 u32 mounted_uid; 94 u32 mounted_uid;
95 u32 connection; 95 u32 connection;
96 u32 buffer_size; 96 u32 buffer_size;
97 97
98 u32 volume_number; 98 u32 volume_number;
99 u32 directory_id; 99 u32 directory_id;
100 100
101 u32 dummy1; 101 u32 dummy1;
102 u32 dummy2; 102 u32 dummy2;
103 u32 dummy3; 103 u32 dummy3;
104 }; 104 };
105 105
106 struct compat_ncp_ioctl_request { 106 struct compat_ncp_ioctl_request {
107 u32 function; 107 u32 function;
108 u32 size; 108 u32 size;
109 compat_caddr_t data; 109 compat_caddr_t data;
110 }; 110 };
111 111
112 struct compat_ncp_privatedata_ioctl 112 struct compat_ncp_privatedata_ioctl
113 { 113 {
114 u32 len; 114 u32 len;
115 compat_caddr_t data; /* ~1000 for NDS */ 115 compat_caddr_t data; /* ~1000 for NDS */
116 }; 116 };
117 117
118 #define NCP_IOC_GET_FS_INFO_V2_32 _IOWR('n', 4, struct compat_ncp_fs_info_v2) 118 #define NCP_IOC_GET_FS_INFO_V2_32 _IOWR('n', 4, struct compat_ncp_fs_info_v2)
119 #define NCP_IOC_NCPREQUEST_32 _IOR('n', 1, struct compat_ncp_ioctl_request) 119 #define NCP_IOC_NCPREQUEST_32 _IOR('n', 1, struct compat_ncp_ioctl_request)
120 #define NCP_IOC_GETOBJECTNAME_32 _IOWR('n', 9, struct compat_ncp_objectname_ioctl) 120 #define NCP_IOC_GETOBJECTNAME_32 _IOWR('n', 9, struct compat_ncp_objectname_ioctl)
121 #define NCP_IOC_SETOBJECTNAME_32 _IOR('n', 9, struct compat_ncp_objectname_ioctl) 121 #define NCP_IOC_SETOBJECTNAME_32 _IOR('n', 9, struct compat_ncp_objectname_ioctl)
122 #define NCP_IOC_GETPRIVATEDATA_32 _IOWR('n', 10, struct compat_ncp_privatedata_ioctl) 122 #define NCP_IOC_GETPRIVATEDATA_32 _IOWR('n', 10, struct compat_ncp_privatedata_ioctl)
123 #define NCP_IOC_SETPRIVATEDATA_32 _IOR('n', 10, struct compat_ncp_privatedata_ioctl) 123 #define NCP_IOC_SETPRIVATEDATA_32 _IOR('n', 10, struct compat_ncp_privatedata_ioctl)
124 124
125 static int 125 static int
126 ncp_get_compat_fs_info_v2(struct ncp_server * server, struct inode *inode, 126 ncp_get_compat_fs_info_v2(struct ncp_server * server, struct inode *inode,
127 struct compat_ncp_fs_info_v2 __user * arg) 127 struct compat_ncp_fs_info_v2 __user * arg)
128 { 128 {
129 struct compat_ncp_fs_info_v2 info2; 129 struct compat_ncp_fs_info_v2 info2;
130 130
131 if (copy_from_user(&info2, arg, sizeof(info2))) 131 if (copy_from_user(&info2, arg, sizeof(info2)))
132 return -EFAULT; 132 return -EFAULT;
133 133
134 if (info2.version != NCP_GET_FS_INFO_VERSION_V2) { 134 if (info2.version != NCP_GET_FS_INFO_VERSION_V2) {
135 DPRINTK("info.version invalid: %d\n", info2.version); 135 DPRINTK("info.version invalid: %d\n", info2.version);
136 return -EINVAL; 136 return -EINVAL;
137 } 137 }
138 info2.mounted_uid = server->m.mounted_uid; 138 info2.mounted_uid = server->m.mounted_uid;
139 info2.connection = server->connection; 139 info2.connection = server->connection;
140 info2.buffer_size = server->buffer_size; 140 info2.buffer_size = server->buffer_size;
141 info2.volume_number = NCP_FINFO(inode)->volNumber; 141 info2.volume_number = NCP_FINFO(inode)->volNumber;
142 info2.directory_id = NCP_FINFO(inode)->DosDirNum; 142 info2.directory_id = NCP_FINFO(inode)->DosDirNum;
143 info2.dummy1 = info2.dummy2 = info2.dummy3 = 0; 143 info2.dummy1 = info2.dummy2 = info2.dummy3 = 0;
144 144
145 if (copy_to_user(arg, &info2, sizeof(info2))) 145 if (copy_to_user(arg, &info2, sizeof(info2)))
146 return -EFAULT; 146 return -EFAULT;
147 return 0; 147 return 0;
148 } 148 }
149 #endif 149 #endif
150 150
151 #define NCP_IOC_GETMOUNTUID16 _IOW('n', 2, u16) 151 #define NCP_IOC_GETMOUNTUID16 _IOW('n', 2, u16)
152 #define NCP_IOC_GETMOUNTUID32 _IOW('n', 2, u32) 152 #define NCP_IOC_GETMOUNTUID32 _IOW('n', 2, u32)
153 #define NCP_IOC_GETMOUNTUID64 _IOW('n', 2, u64) 153 #define NCP_IOC_GETMOUNTUID64 _IOW('n', 2, u64)
154 154
155 #ifdef CONFIG_NCPFS_NLS 155 #ifdef CONFIG_NCPFS_NLS
156 /* Here we are select the iocharset and the codepage for NLS. 156 /* Here we are select the iocharset and the codepage for NLS.
157 * Thanks Petr Vandrovec for idea and many hints. 157 * Thanks Petr Vandrovec for idea and many hints.
158 */ 158 */
159 static int 159 static int
160 ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) 160 ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
161 { 161 {
162 struct ncp_nls_ioctl user; 162 struct ncp_nls_ioctl user;
163 struct nls_table *codepage; 163 struct nls_table *codepage;
164 struct nls_table *iocharset; 164 struct nls_table *iocharset;
165 struct nls_table *oldset_io; 165 struct nls_table *oldset_io;
166 struct nls_table *oldset_cp; 166 struct nls_table *oldset_cp;
167 int utf8; 167 int utf8;
168 int err; 168 int err;
169 169
170 if (copy_from_user(&user, arg, sizeof(user))) 170 if (copy_from_user(&user, arg, sizeof(user)))
171 return -EFAULT; 171 return -EFAULT;
172 172
173 codepage = NULL; 173 codepage = NULL;
174 user.codepage[NCP_IOCSNAME_LEN] = 0; 174 user.codepage[NCP_IOCSNAME_LEN] = 0;
175 if (!user.codepage[0] || !strcmp(user.codepage, "default")) 175 if (!user.codepage[0] || !strcmp(user.codepage, "default"))
176 codepage = load_nls_default(); 176 codepage = load_nls_default();
177 else { 177 else {
178 codepage = load_nls(user.codepage); 178 codepage = load_nls(user.codepage);
179 if (!codepage) { 179 if (!codepage) {
180 return -EBADRQC; 180 return -EBADRQC;
181 } 181 }
182 } 182 }
183 183
184 iocharset = NULL; 184 iocharset = NULL;
185 user.iocharset[NCP_IOCSNAME_LEN] = 0; 185 user.iocharset[NCP_IOCSNAME_LEN] = 0;
186 if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) { 186 if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) {
187 iocharset = load_nls_default(); 187 iocharset = load_nls_default();
188 utf8 = 0; 188 utf8 = 0;
189 } else if (!strcmp(user.iocharset, "utf8")) { 189 } else if (!strcmp(user.iocharset, "utf8")) {
190 iocharset = load_nls_default(); 190 iocharset = load_nls_default();
191 utf8 = 1; 191 utf8 = 1;
192 } else { 192 } else {
193 iocharset = load_nls(user.iocharset); 193 iocharset = load_nls(user.iocharset);
194 if (!iocharset) { 194 if (!iocharset) {
195 unload_nls(codepage); 195 unload_nls(codepage);
196 return -EBADRQC; 196 return -EBADRQC;
197 } 197 }
198 utf8 = 0; 198 utf8 = 0;
199 } 199 }
200 200
201 mutex_lock(&server->root_setup_lock); 201 mutex_lock(&server->root_setup_lock);
202 if (server->root_setuped) { 202 if (server->root_setuped) {
203 oldset_cp = codepage; 203 oldset_cp = codepage;
204 oldset_io = iocharset; 204 oldset_io = iocharset;
205 err = -EBUSY; 205 err = -EBUSY;
206 } else { 206 } else {
207 if (utf8) 207 if (utf8)
208 NCP_SET_FLAG(server, NCP_FLAG_UTF8); 208 NCP_SET_FLAG(server, NCP_FLAG_UTF8);
209 else 209 else
210 NCP_CLR_FLAG(server, NCP_FLAG_UTF8); 210 NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
211 oldset_cp = server->nls_vol; 211 oldset_cp = server->nls_vol;
212 server->nls_vol = codepage; 212 server->nls_vol = codepage;
213 oldset_io = server->nls_io; 213 oldset_io = server->nls_io;
214 server->nls_io = iocharset; 214 server->nls_io = iocharset;
215 err = 0; 215 err = 0;
216 } 216 }
217 mutex_unlock(&server->root_setup_lock); 217 mutex_unlock(&server->root_setup_lock);
218 unload_nls(oldset_cp); 218 unload_nls(oldset_cp);
219 unload_nls(oldset_io); 219 unload_nls(oldset_io);
220 220
221 return err; 221 return err;
222 } 222 }
223 223
224 static int 224 static int
225 ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg) 225 ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
226 { 226 {
227 struct ncp_nls_ioctl user; 227 struct ncp_nls_ioctl user;
228 int len; 228 int len;
229 229
230 memset(&user, 0, sizeof(user)); 230 memset(&user, 0, sizeof(user));
231 mutex_lock(&server->root_setup_lock); 231 mutex_lock(&server->root_setup_lock);
232 if (server->nls_vol && server->nls_vol->charset) { 232 if (server->nls_vol && server->nls_vol->charset) {
233 len = strlen(server->nls_vol->charset); 233 len = strlen(server->nls_vol->charset);
234 if (len > NCP_IOCSNAME_LEN) 234 if (len > NCP_IOCSNAME_LEN)
235 len = NCP_IOCSNAME_LEN; 235 len = NCP_IOCSNAME_LEN;
236 strncpy(user.codepage, server->nls_vol->charset, len); 236 strncpy(user.codepage, server->nls_vol->charset, len);
237 user.codepage[len] = 0; 237 user.codepage[len] = 0;
238 } 238 }
239 239
240 if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) 240 if (NCP_IS_FLAG(server, NCP_FLAG_UTF8))
241 strcpy(user.iocharset, "utf8"); 241 strcpy(user.iocharset, "utf8");
242 else if (server->nls_io && server->nls_io->charset) { 242 else if (server->nls_io && server->nls_io->charset) {
243 len = strlen(server->nls_io->charset); 243 len = strlen(server->nls_io->charset);
244 if (len > NCP_IOCSNAME_LEN) 244 if (len > NCP_IOCSNAME_LEN)
245 len = NCP_IOCSNAME_LEN; 245 len = NCP_IOCSNAME_LEN;
246 strncpy(user.iocharset, server->nls_io->charset, len); 246 strncpy(user.iocharset, server->nls_io->charset, len);
247 user.iocharset[len] = 0; 247 user.iocharset[len] = 0;
248 } 248 }
249 mutex_unlock(&server->root_setup_lock); 249 mutex_unlock(&server->root_setup_lock);
250 250
251 if (copy_to_user(arg, &user, sizeof(user))) 251 if (copy_to_user(arg, &user, sizeof(user)))
252 return -EFAULT; 252 return -EFAULT;
253 return 0; 253 return 0;
254 } 254 }
255 #endif /* CONFIG_NCPFS_NLS */ 255 #endif /* CONFIG_NCPFS_NLS */
256 256
257 static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg) 257 static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg)
258 { 258 {
259 struct ncp_server *server = NCP_SERVER(inode); 259 struct ncp_server *server = NCP_SERVER(inode);
260 int result; 260 int result;
261 struct ncp_ioctl_request request; 261 struct ncp_ioctl_request request;
262 char* bouncebuffer; 262 char* bouncebuffer;
263 void __user *argp = (void __user *)arg; 263 void __user *argp = (void __user *)arg;
264 264
265 switch (cmd) { 265 switch (cmd) {
266 #ifdef CONFIG_COMPAT 266 #ifdef CONFIG_COMPAT
267 case NCP_IOC_NCPREQUEST_32: 267 case NCP_IOC_NCPREQUEST_32:
268 #endif 268 #endif
269 case NCP_IOC_NCPREQUEST: 269 case NCP_IOC_NCPREQUEST:
270 #ifdef CONFIG_COMPAT 270 #ifdef CONFIG_COMPAT
271 if (cmd == NCP_IOC_NCPREQUEST_32) { 271 if (cmd == NCP_IOC_NCPREQUEST_32) {
272 struct compat_ncp_ioctl_request request32; 272 struct compat_ncp_ioctl_request request32;
273 if (copy_from_user(&request32, argp, sizeof(request32))) 273 if (copy_from_user(&request32, argp, sizeof(request32)))
274 return -EFAULT; 274 return -EFAULT;
275 request.function = request32.function; 275 request.function = request32.function;
276 request.size = request32.size; 276 request.size = request32.size;
277 request.data = compat_ptr(request32.data); 277 request.data = compat_ptr(request32.data);
278 } else 278 } else
279 #endif 279 #endif
280 if (copy_from_user(&request, argp, sizeof(request))) 280 if (copy_from_user(&request, argp, sizeof(request)))
281 return -EFAULT; 281 return -EFAULT;
282 282
283 if ((request.function > 255) 283 if ((request.function > 255)
284 || (request.size > 284 || (request.size >
285 NCP_PACKET_SIZE - sizeof(struct ncp_request_header))) { 285 NCP_PACKET_SIZE - sizeof(struct ncp_request_header))) {
286 return -EINVAL; 286 return -EINVAL;
287 } 287 }
288 bouncebuffer = vmalloc(NCP_PACKET_SIZE_INTERNAL); 288 bouncebuffer = vmalloc(NCP_PACKET_SIZE_INTERNAL);
289 if (!bouncebuffer) 289 if (!bouncebuffer)
290 return -ENOMEM; 290 return -ENOMEM;
291 if (copy_from_user(bouncebuffer, request.data, request.size)) { 291 if (copy_from_user(bouncebuffer, request.data, request.size)) {
292 vfree(bouncebuffer); 292 vfree(bouncebuffer);
293 return -EFAULT; 293 return -EFAULT;
294 } 294 }
295 ncp_lock_server(server); 295 ncp_lock_server(server);
296 296
297 /* FIXME: We hack around in the server's structures 297 /* FIXME: We hack around in the server's structures
298 here to be able to use ncp_request */ 298 here to be able to use ncp_request */
299 299
300 server->has_subfunction = 0; 300 server->has_subfunction = 0;
301 server->current_size = request.size; 301 server->current_size = request.size;
302 memcpy(server->packet, bouncebuffer, request.size); 302 memcpy(server->packet, bouncebuffer, request.size);
303 303
304 result = ncp_request2(server, request.function, 304 result = ncp_request2(server, request.function,
305 bouncebuffer, NCP_PACKET_SIZE_INTERNAL); 305 bouncebuffer, NCP_PACKET_SIZE_INTERNAL);
306 if (result < 0) 306 if (result < 0)
307 result = -EIO; 307 result = -EIO;
308 else 308 else
309 result = server->reply_size; 309 result = server->reply_size;
310 ncp_unlock_server(server); 310 ncp_unlock_server(server);
311 DPRINTK("ncp_ioctl: copy %d bytes\n", 311 DPRINTK("ncp_ioctl: copy %d bytes\n",
312 result); 312 result);
313 if (result >= 0) 313 if (result >= 0)
314 if (copy_to_user(request.data, bouncebuffer, result)) 314 if (copy_to_user(request.data, bouncebuffer, result))
315 result = -EFAULT; 315 result = -EFAULT;
316 vfree(bouncebuffer); 316 vfree(bouncebuffer);
317 return result; 317 return result;
318 318
319 case NCP_IOC_CONN_LOGGED_IN: 319 case NCP_IOC_CONN_LOGGED_IN:
320 320
321 if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE)) 321 if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE))
322 return -EINVAL; 322 return -EINVAL;
323 mutex_lock(&server->root_setup_lock); 323 mutex_lock(&server->root_setup_lock);
324 if (server->root_setuped) 324 if (server->root_setuped)
325 result = -EBUSY; 325 result = -EBUSY;
326 else { 326 else {
327 result = ncp_conn_logged_in(inode->i_sb); 327 result = ncp_conn_logged_in(inode->i_sb);
328 if (result == 0) 328 if (result == 0)
329 server->root_setuped = 1; 329 server->root_setuped = 1;
330 } 330 }
331 mutex_unlock(&server->root_setup_lock); 331 mutex_unlock(&server->root_setup_lock);
332 return result; 332 return result;
333 333
334 case NCP_IOC_GET_FS_INFO: 334 case NCP_IOC_GET_FS_INFO:
335 return ncp_get_fs_info(server, inode, argp); 335 return ncp_get_fs_info(server, inode, argp);
336 336
337 case NCP_IOC_GET_FS_INFO_V2: 337 case NCP_IOC_GET_FS_INFO_V2:
338 return ncp_get_fs_info_v2(server, inode, argp); 338 return ncp_get_fs_info_v2(server, inode, argp);
339 339
340 #ifdef CONFIG_COMPAT 340 #ifdef CONFIG_COMPAT
341 case NCP_IOC_GET_FS_INFO_V2_32: 341 case NCP_IOC_GET_FS_INFO_V2_32:
342 return ncp_get_compat_fs_info_v2(server, inode, argp); 342 return ncp_get_compat_fs_info_v2(server, inode, argp);
343 #endif 343 #endif
344 /* we have too many combinations of CONFIG_COMPAT, 344 /* we have too many combinations of CONFIG_COMPAT,
345 * CONFIG_64BIT and CONFIG_UID16, so just handle 345 * CONFIG_64BIT and CONFIG_UID16, so just handle
346 * any of the possible ioctls */ 346 * any of the possible ioctls */
347 case NCP_IOC_GETMOUNTUID16: 347 case NCP_IOC_GETMOUNTUID16:
348 { 348 {
349 u16 uid; 349 u16 uid;
350 350
351 SET_UID(uid, server->m.mounted_uid); 351 SET_UID(uid, server->m.mounted_uid);
352 if (put_user(uid, (u16 __user *)argp)) 352 if (put_user(uid, (u16 __user *)argp))
353 return -EFAULT; 353 return -EFAULT;
354 return 0; 354 return 0;
355 } 355 }
356 case NCP_IOC_GETMOUNTUID32: 356 case NCP_IOC_GETMOUNTUID32:
357 if (put_user(server->m.mounted_uid, 357 if (put_user(server->m.mounted_uid,
358 (u32 __user *)argp)) 358 (u32 __user *)argp))
359 return -EFAULT; 359 return -EFAULT;
360 return 0; 360 return 0;
361 case NCP_IOC_GETMOUNTUID64: 361 case NCP_IOC_GETMOUNTUID64:
362 if (put_user(server->m.mounted_uid, 362 if (put_user(server->m.mounted_uid,
363 (u64 __user *)argp)) 363 (u64 __user *)argp))
364 return -EFAULT; 364 return -EFAULT;
365 return 0; 365 return 0;
366 366
367 case NCP_IOC_GETROOT: 367 case NCP_IOC_GETROOT:
368 { 368 {
369 struct ncp_setroot_ioctl sr; 369 struct ncp_setroot_ioctl sr;
370 370
371 result = -EACCES; 371 result = -EACCES;
372 mutex_lock(&server->root_setup_lock); 372 mutex_lock(&server->root_setup_lock);
373 if (server->m.mounted_vol[0]) { 373 if (server->m.mounted_vol[0]) {
374 struct dentry* dentry = inode->i_sb->s_root; 374 struct dentry* dentry = inode->i_sb->s_root;
375 375
376 if (dentry) { 376 if (dentry) {
377 struct inode* s_inode = dentry->d_inode; 377 struct inode* s_inode = dentry->d_inode;
378 378
379 if (s_inode) { 379 if (s_inode) {
380 sr.volNumber = NCP_FINFO(s_inode)->volNumber; 380 sr.volNumber = NCP_FINFO(s_inode)->volNumber;
381 sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum; 381 sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum;
382 sr.namespace = server->name_space[sr.volNumber]; 382 sr.namespace = server->name_space[sr.volNumber];
383 result = 0; 383 result = 0;
384 } else 384 } else
385 DPRINTK("ncpfs: s_root->d_inode==NULL\n"); 385 DPRINTK("ncpfs: s_root->d_inode==NULL\n");
386 } else 386 } else
387 DPRINTK("ncpfs: s_root==NULL\n"); 387 DPRINTK("ncpfs: s_root==NULL\n");
388 } else { 388 } else {
389 sr.volNumber = -1; 389 sr.volNumber = -1;
390 sr.namespace = 0; 390 sr.namespace = 0;
391 sr.dirEntNum = 0; 391 sr.dirEntNum = 0;
392 result = 0; 392 result = 0;
393 } 393 }
394 mutex_unlock(&server->root_setup_lock); 394 mutex_unlock(&server->root_setup_lock);
395 if (!result && copy_to_user(argp, &sr, sizeof(sr))) 395 if (!result && copy_to_user(argp, &sr, sizeof(sr)))
396 result = -EFAULT; 396 result = -EFAULT;
397 return result; 397 return result;
398 } 398 }
399 399
400 case NCP_IOC_SETROOT: 400 case NCP_IOC_SETROOT:
401 { 401 {
402 struct ncp_setroot_ioctl sr; 402 struct ncp_setroot_ioctl sr;
403 __u32 vnum; 403 __u32 vnum;
404 __le32 de; 404 __le32 de;
405 __le32 dosde; 405 __le32 dosde;
406 struct dentry* dentry; 406 struct dentry* dentry;
407 407
408 if (copy_from_user(&sr, argp, sizeof(sr))) 408 if (copy_from_user(&sr, argp, sizeof(sr)))
409 return -EFAULT; 409 return -EFAULT;
410 mutex_lock(&server->root_setup_lock); 410 mutex_lock(&server->root_setup_lock);
411 if (server->root_setuped) 411 if (server->root_setuped)
412 result = -EBUSY; 412 result = -EBUSY;
413 else { 413 else {
414 if (sr.volNumber < 0) { 414 if (sr.volNumber < 0) {
415 server->m.mounted_vol[0] = 0; 415 server->m.mounted_vol[0] = 0;
416 vnum = NCP_NUMBER_OF_VOLUMES; 416 vnum = NCP_NUMBER_OF_VOLUMES;
417 de = 0; 417 de = 0;
418 dosde = 0; 418 dosde = 0;
419 result = 0; 419 result = 0;
420 } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) { 420 } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) {
421 result = -EINVAL; 421 result = -EINVAL;
422 } else if (ncp_mount_subdir(server, sr.volNumber, 422 } else if (ncp_mount_subdir(server, sr.volNumber,
423 sr.namespace, sr.dirEntNum, 423 sr.namespace, sr.dirEntNum,
424 &vnum, &de, &dosde)) { 424 &vnum, &de, &dosde)) {
425 result = -ENOENT; 425 result = -ENOENT;
426 } else 426 } else
427 result = 0; 427 result = 0;
428 428
429 if (result == 0) { 429 if (result == 0) {
430 dentry = inode->i_sb->s_root; 430 dentry = inode->i_sb->s_root;
431 if (dentry) { 431 if (dentry) {
432 struct inode* s_inode = dentry->d_inode; 432 struct inode* s_inode = dentry->d_inode;
433 433
434 if (s_inode) { 434 if (s_inode) {
435 NCP_FINFO(s_inode)->volNumber = vnum; 435 NCP_FINFO(s_inode)->volNumber = vnum;
436 NCP_FINFO(s_inode)->dirEntNum = de; 436 NCP_FINFO(s_inode)->dirEntNum = de;
437 NCP_FINFO(s_inode)->DosDirNum = dosde; 437 NCP_FINFO(s_inode)->DosDirNum = dosde;
438 server->root_setuped = 1; 438 server->root_setuped = 1;
439 } else { 439 } else {
440 DPRINTK("ncpfs: s_root->d_inode==NULL\n"); 440 DPRINTK("ncpfs: s_root->d_inode==NULL\n");
441 result = -EIO; 441 result = -EIO;
442 } 442 }
443 } else { 443 } else {
444 DPRINTK("ncpfs: s_root==NULL\n"); 444 DPRINTK("ncpfs: s_root==NULL\n");
445 result = -EIO; 445 result = -EIO;
446 } 446 }
447 } 447 }
448 result = 0; 448 result = 0;
449 } 449 }
450 mutex_unlock(&server->root_setup_lock); 450 mutex_unlock(&server->root_setup_lock);
451 451
452 return result; 452 return result;
453 } 453 }
454 454
455 #ifdef CONFIG_NCPFS_PACKET_SIGNING 455 #ifdef CONFIG_NCPFS_PACKET_SIGNING
456 case NCP_IOC_SIGN_INIT: 456 case NCP_IOC_SIGN_INIT:
457 { 457 {
458 struct ncp_sign_init sign; 458 struct ncp_sign_init sign;
459 459
460 if (argp) 460 if (argp)
461 if (copy_from_user(&sign, argp, sizeof(sign))) 461 if (copy_from_user(&sign, argp, sizeof(sign)))
462 return -EFAULT; 462 return -EFAULT;
463 ncp_lock_server(server); 463 ncp_lock_server(server);
464 mutex_lock(&server->rcv.creq_mutex); 464 mutex_lock(&server->rcv.creq_mutex);
465 if (argp) { 465 if (argp) {
466 if (server->sign_wanted) { 466 if (server->sign_wanted) {
467 memcpy(server->sign_root,sign.sign_root,8); 467 memcpy(server->sign_root,sign.sign_root,8);
468 memcpy(server->sign_last,sign.sign_last,16); 468 memcpy(server->sign_last,sign.sign_last,16);
469 server->sign_active = 1; 469 server->sign_active = 1;
470 } 470 }
471 /* ignore when signatures not wanted */ 471 /* ignore when signatures not wanted */
472 } else { 472 } else {
473 server->sign_active = 0; 473 server->sign_active = 0;
474 } 474 }
475 mutex_unlock(&server->rcv.creq_mutex); 475 mutex_unlock(&server->rcv.creq_mutex);
476 ncp_unlock_server(server); 476 ncp_unlock_server(server);
477 return 0; 477 return 0;
478 } 478 }
479 479
480 case NCP_IOC_SIGN_WANTED: 480 case NCP_IOC_SIGN_WANTED:
481 { 481 {
482 int state; 482 int state;
483 483
484 ncp_lock_server(server); 484 ncp_lock_server(server);
485 state = server->sign_wanted; 485 state = server->sign_wanted;
486 ncp_unlock_server(server); 486 ncp_unlock_server(server);
487 if (put_user(state, (int __user *)argp)) 487 if (put_user(state, (int __user *)argp))
488 return -EFAULT; 488 return -EFAULT;
489 return 0; 489 return 0;
490 } 490 }
491 491
492 case NCP_IOC_SET_SIGN_WANTED: 492 case NCP_IOC_SET_SIGN_WANTED:
493 { 493 {
494 int newstate; 494 int newstate;
495 495
496 /* get only low 8 bits... */ 496 /* get only low 8 bits... */
497 if (get_user(newstate, (unsigned char __user *)argp)) 497 if (get_user(newstate, (unsigned char __user *)argp))
498 return -EFAULT; 498 return -EFAULT;
499 result = 0; 499 result = 0;
500 ncp_lock_server(server); 500 ncp_lock_server(server);
501 if (server->sign_active) { 501 if (server->sign_active) {
502 /* cannot turn signatures OFF when active */ 502 /* cannot turn signatures OFF when active */
503 if (!newstate) 503 if (!newstate)
504 result = -EINVAL; 504 result = -EINVAL;
505 } else { 505 } else {
506 server->sign_wanted = newstate != 0; 506 server->sign_wanted = newstate != 0;
507 } 507 }
508 ncp_unlock_server(server); 508 ncp_unlock_server(server);
509 return result; 509 return result;
510 } 510 }
511 511
512 #endif /* CONFIG_NCPFS_PACKET_SIGNING */ 512 #endif /* CONFIG_NCPFS_PACKET_SIGNING */
513 513
514 #ifdef CONFIG_NCPFS_IOCTL_LOCKING 514 #ifdef CONFIG_NCPFS_IOCTL_LOCKING
515 case NCP_IOC_LOCKUNLOCK: 515 case NCP_IOC_LOCKUNLOCK:
516 { 516 {
517 struct ncp_lock_ioctl rqdata; 517 struct ncp_lock_ioctl rqdata;
518 518
519 if (copy_from_user(&rqdata, argp, sizeof(rqdata))) 519 if (copy_from_user(&rqdata, argp, sizeof(rqdata)))
520 return -EFAULT; 520 return -EFAULT;
521 if (rqdata.origin != 0) 521 if (rqdata.origin != 0)
522 return -EINVAL; 522 return -EINVAL;
523 /* check for cmd */ 523 /* check for cmd */
524 switch (rqdata.cmd) { 524 switch (rqdata.cmd) {
525 case NCP_LOCK_EX: 525 case NCP_LOCK_EX:
526 case NCP_LOCK_SH: 526 case NCP_LOCK_SH:
527 if (rqdata.timeout == 0) 527 if (rqdata.timeout == 0)
528 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; 528 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
529 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) 529 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
530 rqdata.timeout = NCP_LOCK_MAX_TIMEOUT; 530 rqdata.timeout = NCP_LOCK_MAX_TIMEOUT;
531 break; 531 break;
532 case NCP_LOCK_LOG: 532 case NCP_LOCK_LOG:
533 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; /* has no effect */ 533 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; /* has no effect */
534 case NCP_LOCK_CLEAR: 534 case NCP_LOCK_CLEAR:
535 break; 535 break;
536 default: 536 default:
537 return -EINVAL; 537 return -EINVAL;
538 } 538 }
539 /* locking needs both read and write access */ 539 /* locking needs both read and write access */
540 if ((result = ncp_make_open(inode, O_RDWR)) != 0) 540 if ((result = ncp_make_open(inode, O_RDWR)) != 0)
541 { 541 {
542 return result; 542 return result;
543 } 543 }
544 result = -EISDIR; 544 result = -EISDIR;
545 if (!S_ISREG(inode->i_mode)) 545 if (!S_ISREG(inode->i_mode))
546 goto outrel; 546 goto outrel;
547 if (rqdata.cmd == NCP_LOCK_CLEAR) 547 if (rqdata.cmd == NCP_LOCK_CLEAR)
548 { 548 {
549 result = ncp_ClearPhysicalRecord(NCP_SERVER(inode), 549 result = ncp_ClearPhysicalRecord(NCP_SERVER(inode),
550 NCP_FINFO(inode)->file_handle, 550 NCP_FINFO(inode)->file_handle,
551 rqdata.offset, 551 rqdata.offset,
552 rqdata.length); 552 rqdata.length);
553 if (result > 0) result = 0; /* no such lock */ 553 if (result > 0) result = 0; /* no such lock */
554 } 554 }
555 else 555 else
556 { 556 {
557 int lockcmd; 557 int lockcmd;
558 558
559 switch (rqdata.cmd) 559 switch (rqdata.cmd)
560 { 560 {
561 case NCP_LOCK_EX: lockcmd=1; break; 561 case NCP_LOCK_EX: lockcmd=1; break;
562 case NCP_LOCK_SH: lockcmd=3; break; 562 case NCP_LOCK_SH: lockcmd=3; break;
563 default: lockcmd=0; break; 563 default: lockcmd=0; break;
564 } 564 }
565 result = ncp_LogPhysicalRecord(NCP_SERVER(inode), 565 result = ncp_LogPhysicalRecord(NCP_SERVER(inode),
566 NCP_FINFO(inode)->file_handle, 566 NCP_FINFO(inode)->file_handle,
567 lockcmd, 567 lockcmd,
568 rqdata.offset, 568 rqdata.offset,
569 rqdata.length, 569 rqdata.length,
570 rqdata.timeout); 570 rqdata.timeout);
571 if (result > 0) result = -EAGAIN; 571 if (result > 0) result = -EAGAIN;
572 } 572 }
573 outrel: 573 outrel:
574 ncp_inode_close(inode); 574 ncp_inode_close(inode);
575 return result; 575 return result;
576 } 576 }
577 #endif /* CONFIG_NCPFS_IOCTL_LOCKING */ 577 #endif /* CONFIG_NCPFS_IOCTL_LOCKING */
578 578
579 #ifdef CONFIG_COMPAT 579 #ifdef CONFIG_COMPAT
580 case NCP_IOC_GETOBJECTNAME_32: 580 case NCP_IOC_GETOBJECTNAME_32:
581 { 581 {
582 struct compat_ncp_objectname_ioctl user; 582 struct compat_ncp_objectname_ioctl user;
583 size_t outl; 583 size_t outl;
584 584
585 if (copy_from_user(&user, argp, sizeof(user))) 585 if (copy_from_user(&user, argp, sizeof(user)))
586 return -EFAULT; 586 return -EFAULT;
587 down_read(&server->auth_rwsem); 587 down_read(&server->auth_rwsem);
588 user.auth_type = server->auth.auth_type; 588 user.auth_type = server->auth.auth_type;
589 outl = user.object_name_len; 589 outl = user.object_name_len;
590 user.object_name_len = server->auth.object_name_len; 590 user.object_name_len = server->auth.object_name_len;
591 if (outl > user.object_name_len) 591 if (outl > user.object_name_len)
592 outl = user.object_name_len; 592 outl = user.object_name_len;
593 result = 0; 593 result = 0;
594 if (outl) { 594 if (outl) {
595 if (copy_to_user(compat_ptr(user.object_name), 595 if (copy_to_user(compat_ptr(user.object_name),
596 server->auth.object_name, 596 server->auth.object_name,
597 outl)) 597 outl))
598 result = -EFAULT; 598 result = -EFAULT;
599 } 599 }
600 up_read(&server->auth_rwsem); 600 up_read(&server->auth_rwsem);
601 if (!result && copy_to_user(argp, &user, sizeof(user))) 601 if (!result && copy_to_user(argp, &user, sizeof(user)))
602 result = -EFAULT; 602 result = -EFAULT;
603 return result; 603 return result;
604 } 604 }
605 #endif 605 #endif
606 606
607 case NCP_IOC_GETOBJECTNAME: 607 case NCP_IOC_GETOBJECTNAME:
608 { 608 {
609 struct ncp_objectname_ioctl user; 609 struct ncp_objectname_ioctl user;
610 size_t outl; 610 size_t outl;
611 611
612 if (copy_from_user(&user, argp, sizeof(user))) 612 if (copy_from_user(&user, argp, sizeof(user)))
613 return -EFAULT; 613 return -EFAULT;
614 down_read(&server->auth_rwsem); 614 down_read(&server->auth_rwsem);
615 user.auth_type = server->auth.auth_type; 615 user.auth_type = server->auth.auth_type;
616 outl = user.object_name_len; 616 outl = user.object_name_len;
617 user.object_name_len = server->auth.object_name_len; 617 user.object_name_len = server->auth.object_name_len;
618 if (outl > user.object_name_len) 618 if (outl > user.object_name_len)
619 outl = user.object_name_len; 619 outl = user.object_name_len;
620 result = 0; 620 result = 0;
621 if (outl) { 621 if (outl) {
622 if (copy_to_user(user.object_name, 622 if (copy_to_user(user.object_name,
623 server->auth.object_name, 623 server->auth.object_name,
624 outl)) 624 outl))
625 result = -EFAULT; 625 result = -EFAULT;
626 } 626 }
627 up_read(&server->auth_rwsem); 627 up_read(&server->auth_rwsem);
628 if (!result && copy_to_user(argp, &user, sizeof(user))) 628 if (!result && copy_to_user(argp, &user, sizeof(user)))
629 result = -EFAULT; 629 result = -EFAULT;
630 return result; 630 return result;
631 } 631 }
632 632
633 #ifdef CONFIG_COMPAT 633 #ifdef CONFIG_COMPAT
634 case NCP_IOC_SETOBJECTNAME_32: 634 case NCP_IOC_SETOBJECTNAME_32:
635 #endif 635 #endif
636 case NCP_IOC_SETOBJECTNAME: 636 case NCP_IOC_SETOBJECTNAME:
637 { 637 {
638 struct ncp_objectname_ioctl user; 638 struct ncp_objectname_ioctl user;
639 void* newname; 639 void* newname;
640 void* oldname; 640 void* oldname;
641 size_t oldnamelen; 641 size_t oldnamelen;
642 void* oldprivate; 642 void* oldprivate;
643 size_t oldprivatelen; 643 size_t oldprivatelen;
644 644
645 #ifdef CONFIG_COMPAT 645 #ifdef CONFIG_COMPAT
646 if (cmd == NCP_IOC_SETOBJECTNAME_32) { 646 if (cmd == NCP_IOC_SETOBJECTNAME_32) {
647 struct compat_ncp_objectname_ioctl user32; 647 struct compat_ncp_objectname_ioctl user32;
648 if (copy_from_user(&user32, argp, sizeof(user32))) 648 if (copy_from_user(&user32, argp, sizeof(user32)))
649 return -EFAULT; 649 return -EFAULT;
650 user.auth_type = user32.auth_type; 650 user.auth_type = user32.auth_type;
651 user.object_name_len = user32.object_name_len; 651 user.object_name_len = user32.object_name_len;
652 user.object_name = compat_ptr(user32.object_name); 652 user.object_name = compat_ptr(user32.object_name);
653 } else 653 } else
654 #endif 654 #endif
655 if (copy_from_user(&user, argp, sizeof(user))) 655 if (copy_from_user(&user, argp, sizeof(user)))
656 return -EFAULT; 656 return -EFAULT;
657 657
658 if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN) 658 if (user.object_name_len > NCP_OBJECT_NAME_MAX_LEN)
659 return -ENOMEM; 659 return -ENOMEM;
660 if (user.object_name_len) { 660 if (user.object_name_len) {
661 newname = memdup_user(user.object_name, 661 newname = memdup_user(user.object_name,
662 user.object_name_len); 662 user.object_name_len);
663 if (IS_ERR(newname)) 663 if (IS_ERR(newname))
664 return PTR_ERR(newname); 664 return PTR_ERR(newname);
665 } else { 665 } else {
666 newname = NULL; 666 newname = NULL;
667 } 667 }
668 down_write(&server->auth_rwsem); 668 down_write(&server->auth_rwsem);
669 oldname = server->auth.object_name; 669 oldname = server->auth.object_name;
670 oldnamelen = server->auth.object_name_len; 670 oldnamelen = server->auth.object_name_len;
671 oldprivate = server->priv.data; 671 oldprivate = server->priv.data;
672 oldprivatelen = server->priv.len; 672 oldprivatelen = server->priv.len;
673 server->auth.auth_type = user.auth_type; 673 server->auth.auth_type = user.auth_type;
674 server->auth.object_name_len = user.object_name_len; 674 server->auth.object_name_len = user.object_name_len;
675 server->auth.object_name = newname; 675 server->auth.object_name = newname;
676 server->priv.len = 0; 676 server->priv.len = 0;
677 server->priv.data = NULL; 677 server->priv.data = NULL;
678 up_write(&server->auth_rwsem); 678 up_write(&server->auth_rwsem);
679 kfree(oldprivate); 679 kfree(oldprivate);
680 kfree(oldname); 680 kfree(oldname);
681 return 0; 681 return 0;
682 } 682 }
683 683
684 #ifdef CONFIG_COMPAT 684 #ifdef CONFIG_COMPAT
685 case NCP_IOC_GETPRIVATEDATA_32: 685 case NCP_IOC_GETPRIVATEDATA_32:
686 #endif 686 #endif
687 case NCP_IOC_GETPRIVATEDATA: 687 case NCP_IOC_GETPRIVATEDATA:
688 { 688 {
689 struct ncp_privatedata_ioctl user; 689 struct ncp_privatedata_ioctl user;
690 size_t outl; 690 size_t outl;
691 691
692 #ifdef CONFIG_COMPAT 692 #ifdef CONFIG_COMPAT
693 if (cmd == NCP_IOC_GETPRIVATEDATA_32) { 693 if (cmd == NCP_IOC_GETPRIVATEDATA_32) {
694 struct compat_ncp_privatedata_ioctl user32; 694 struct compat_ncp_privatedata_ioctl user32;
695 if (copy_from_user(&user32, argp, sizeof(user32))) 695 if (copy_from_user(&user32, argp, sizeof(user32)))
696 return -EFAULT; 696 return -EFAULT;
697 user.len = user32.len; 697 user.len = user32.len;
698 user.data = compat_ptr(user32.data); 698 user.data = compat_ptr(user32.data);
699 } else 699 } else
700 #endif 700 #endif
701 if (copy_from_user(&user, argp, sizeof(user))) 701 if (copy_from_user(&user, argp, sizeof(user)))
702 return -EFAULT; 702 return -EFAULT;
703 703
704 down_read(&server->auth_rwsem); 704 down_read(&server->auth_rwsem);
705 outl = user.len; 705 outl = user.len;
706 user.len = server->priv.len; 706 user.len = server->priv.len;
707 if (outl > user.len) outl = user.len; 707 if (outl > user.len) outl = user.len;
708 result = 0; 708 result = 0;
709 if (outl) { 709 if (outl) {
710 if (copy_to_user(user.data, 710 if (copy_to_user(user.data,
711 server->priv.data, 711 server->priv.data,
712 outl)) 712 outl))
713 result = -EFAULT; 713 result = -EFAULT;
714 } 714 }
715 up_read(&server->auth_rwsem); 715 up_read(&server->auth_rwsem);
716 if (result) 716 if (result)
717 return result; 717 return result;
718 #ifdef CONFIG_COMPAT 718 #ifdef CONFIG_COMPAT
719 if (cmd == NCP_IOC_GETPRIVATEDATA_32) { 719 if (cmd == NCP_IOC_GETPRIVATEDATA_32) {
720 struct compat_ncp_privatedata_ioctl user32; 720 struct compat_ncp_privatedata_ioctl user32;
721 user32.len = user.len; 721 user32.len = user.len;
722 user32.data = (unsigned long) user.data; 722 user32.data = (unsigned long) user.data;
723 if (copy_to_user(argp, &user32, sizeof(user32))) 723 if (copy_to_user(argp, &user32, sizeof(user32)))
724 return -EFAULT; 724 return -EFAULT;
725 } else 725 } else
726 #endif 726 #endif
727 if (copy_to_user(argp, &user, sizeof(user))) 727 if (copy_to_user(argp, &user, sizeof(user)))
728 return -EFAULT; 728 return -EFAULT;
729 729
730 return 0; 730 return 0;
731 } 731 }
732 732
733 #ifdef CONFIG_COMPAT 733 #ifdef CONFIG_COMPAT
734 case NCP_IOC_SETPRIVATEDATA_32: 734 case NCP_IOC_SETPRIVATEDATA_32:
735 #endif 735 #endif
736 case NCP_IOC_SETPRIVATEDATA: 736 case NCP_IOC_SETPRIVATEDATA:
737 { 737 {
738 struct ncp_privatedata_ioctl user; 738 struct ncp_privatedata_ioctl user;
739 void* new; 739 void* new;
740 void* old; 740 void* old;
741 size_t oldlen; 741 size_t oldlen;
742 742
743 #ifdef CONFIG_COMPAT 743 #ifdef CONFIG_COMPAT
744 if (cmd == NCP_IOC_SETPRIVATEDATA_32) { 744 if (cmd == NCP_IOC_SETPRIVATEDATA_32) {
745 struct compat_ncp_privatedata_ioctl user32; 745 struct compat_ncp_privatedata_ioctl user32;
746 if (copy_from_user(&user32, argp, sizeof(user32))) 746 if (copy_from_user(&user32, argp, sizeof(user32)))
747 return -EFAULT; 747 return -EFAULT;
748 user.len = user32.len; 748 user.len = user32.len;
749 user.data = compat_ptr(user32.data); 749 user.data = compat_ptr(user32.data);
750 } else 750 } else
751 #endif 751 #endif
752 if (copy_from_user(&user, argp, sizeof(user))) 752 if (copy_from_user(&user, argp, sizeof(user)))
753 return -EFAULT; 753 return -EFAULT;
754 754
755 if (user.len > NCP_PRIVATE_DATA_MAX_LEN) 755 if (user.len > NCP_PRIVATE_DATA_MAX_LEN)
756 return -ENOMEM; 756 return -ENOMEM;
757 if (user.len) { 757 if (user.len) {
758 new = memdup_user(user.data, user.len); 758 new = memdup_user(user.data, user.len);
759 if (IS_ERR(new)) 759 if (IS_ERR(new))
760 return PTR_ERR(new); 760 return PTR_ERR(new);
761 } else { 761 } else {
762 new = NULL; 762 new = NULL;
763 } 763 }
764 down_write(&server->auth_rwsem); 764 down_write(&server->auth_rwsem);
765 old = server->priv.data; 765 old = server->priv.data;
766 oldlen = server->priv.len; 766 oldlen = server->priv.len;
767 server->priv.len = user.len; 767 server->priv.len = user.len;
768 server->priv.data = new; 768 server->priv.data = new;
769 up_write(&server->auth_rwsem); 769 up_write(&server->auth_rwsem);
770 kfree(old); 770 kfree(old);
771 return 0; 771 return 0;
772 } 772 }
773 773
774 #ifdef CONFIG_NCPFS_NLS 774 #ifdef CONFIG_NCPFS_NLS
775 case NCP_IOC_SETCHARSETS: 775 case NCP_IOC_SETCHARSETS:
776 return ncp_set_charsets(server, argp); 776 return ncp_set_charsets(server, argp);
777 777
778 case NCP_IOC_GETCHARSETS: 778 case NCP_IOC_GETCHARSETS:
779 return ncp_get_charsets(server, argp); 779 return ncp_get_charsets(server, argp);
780 780
781 #endif /* CONFIG_NCPFS_NLS */ 781 #endif /* CONFIG_NCPFS_NLS */
782 782
783 case NCP_IOC_SETDENTRYTTL: 783 case NCP_IOC_SETDENTRYTTL:
784 { 784 {
785 u_int32_t user; 785 u_int32_t user;
786 786
787 if (copy_from_user(&user, argp, sizeof(user))) 787 if (copy_from_user(&user, argp, sizeof(user)))
788 return -EFAULT; 788 return -EFAULT;
789 /* 20 secs at most... */ 789 /* 20 secs at most... */
790 if (user > 20000) 790 if (user > 20000)
791 return -EINVAL; 791 return -EINVAL;
792 user = (user * HZ) / 1000; 792 user = (user * HZ) / 1000;
793 atomic_set(&server->dentry_ttl, user); 793 atomic_set(&server->dentry_ttl, user);
794 return 0; 794 return 0;
795 } 795 }
796 796
797 case NCP_IOC_GETDENTRYTTL: 797 case NCP_IOC_GETDENTRYTTL:
798 { 798 {
799 u_int32_t user = (atomic_read(&server->dentry_ttl) * 1000) / HZ; 799 u_int32_t user = (atomic_read(&server->dentry_ttl) * 1000) / HZ;
800 if (copy_to_user(argp, &user, sizeof(user))) 800 if (copy_to_user(argp, &user, sizeof(user)))
801 return -EFAULT; 801 return -EFAULT;
802 return 0; 802 return 0;
803 } 803 }
804 804
805 } 805 }
806 return -EINVAL; 806 return -EINVAL;
807 } 807 }
808 808
809 long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 809 long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
810 { 810 {
811 struct inode *inode = file_inode(filp); 811 struct inode *inode = file_inode(filp);
812 struct ncp_server *server = NCP_SERVER(inode); 812 struct ncp_server *server = NCP_SERVER(inode);
813 uid_t uid = current_uid(); 813 uid_t uid = current_uid();
814 int need_drop_write = 0; 814 int need_drop_write = 0;
815 long ret; 815 long ret;
816 816
817 switch (cmd) { 817 switch (cmd) {
818 case NCP_IOC_SETCHARSETS: 818 case NCP_IOC_SETCHARSETS:
819 case NCP_IOC_CONN_LOGGED_IN: 819 case NCP_IOC_CONN_LOGGED_IN:
820 case NCP_IOC_SETROOT: 820 case NCP_IOC_SETROOT:
821 if (!capable(CAP_SYS_ADMIN)) { 821 if (!capable(CAP_SYS_ADMIN)) {
822 ret = -EACCES; 822 ret = -EPERM;
823 goto out; 823 goto out;
824 } 824 }
825 break; 825 break;
826 } 826 }
827 if (server->m.mounted_uid != uid) { 827 if (server->m.mounted_uid != uid) {
828 switch (cmd) { 828 switch (cmd) {
829 /* 829 /*
830 * Only mount owner can issue these ioctls. Information 830 * Only mount owner can issue these ioctls. Information
831 * necessary to authenticate to other NDS servers are 831 * necessary to authenticate to other NDS servers are
832 * stored here. 832 * stored here.
833 */ 833 */
834 case NCP_IOC_GETOBJECTNAME: 834 case NCP_IOC_GETOBJECTNAME:
835 case NCP_IOC_SETOBJECTNAME: 835 case NCP_IOC_SETOBJECTNAME:
836 case NCP_IOC_GETPRIVATEDATA: 836 case NCP_IOC_GETPRIVATEDATA:
837 case NCP_IOC_SETPRIVATEDATA: 837 case NCP_IOC_SETPRIVATEDATA:
838 #ifdef CONFIG_COMPAT 838 #ifdef CONFIG_COMPAT
839 case NCP_IOC_GETOBJECTNAME_32: 839 case NCP_IOC_GETOBJECTNAME_32:
840 case NCP_IOC_SETOBJECTNAME_32: 840 case NCP_IOC_SETOBJECTNAME_32:
841 case NCP_IOC_GETPRIVATEDATA_32: 841 case NCP_IOC_GETPRIVATEDATA_32:
842 case NCP_IOC_SETPRIVATEDATA_32: 842 case NCP_IOC_SETPRIVATEDATA_32:
843 #endif 843 #endif
844 ret = -EACCES; 844 ret = -EACCES;
845 goto out; 845 goto out;
846 /* 846 /*
847 * These require write access on the inode if user id 847 * These require write access on the inode if user id
848 * does not match. Note that they do not write to the 848 * does not match. Note that they do not write to the
849 * file... But old code did mnt_want_write, so I keep 849 * file... But old code did mnt_want_write, so I keep
850 * it as is. Of course not for mountpoint owner, as 850 * it as is. Of course not for mountpoint owner, as
851 * that breaks read-only mounts altogether as ncpmount 851 * that breaks read-only mounts altogether as ncpmount
852 * needs working NCP_IOC_NCPREQUEST and 852 * needs working NCP_IOC_NCPREQUEST and
853 * NCP_IOC_GET_FS_INFO. Some of these codes (setdentryttl, 853 * NCP_IOC_GET_FS_INFO. Some of these codes (setdentryttl,
854 * signinit, setsignwanted) should be probably restricted 854 * signinit, setsignwanted) should be probably restricted
855 * to owner only, or even more to CAP_SYS_ADMIN). 855 * to owner only, or even more to CAP_SYS_ADMIN).
856 */ 856 */
857 case NCP_IOC_GET_FS_INFO: 857 case NCP_IOC_GET_FS_INFO:
858 case NCP_IOC_GET_FS_INFO_V2: 858 case NCP_IOC_GET_FS_INFO_V2:
859 case NCP_IOC_NCPREQUEST: 859 case NCP_IOC_NCPREQUEST:
860 case NCP_IOC_SETDENTRYTTL: 860 case NCP_IOC_SETDENTRYTTL:
861 case NCP_IOC_SIGN_INIT: 861 case NCP_IOC_SIGN_INIT:
862 case NCP_IOC_LOCKUNLOCK: 862 case NCP_IOC_LOCKUNLOCK:
863 case NCP_IOC_SET_SIGN_WANTED: 863 case NCP_IOC_SET_SIGN_WANTED:
864 #ifdef CONFIG_COMPAT 864 #ifdef CONFIG_COMPAT
865 case NCP_IOC_GET_FS_INFO_V2_32: 865 case NCP_IOC_GET_FS_INFO_V2_32:
866 case NCP_IOC_NCPREQUEST_32: 866 case NCP_IOC_NCPREQUEST_32:
867 #endif 867 #endif
868 ret = mnt_want_write_file(filp); 868 ret = mnt_want_write_file(filp);
869 if (ret) 869 if (ret)
870 goto out; 870 goto out;
871 need_drop_write = 1; 871 need_drop_write = 1;
872 ret = inode_permission(inode, MAY_WRITE); 872 ret = inode_permission(inode, MAY_WRITE);
873 if (ret) 873 if (ret)
874 goto outDropWrite; 874 goto outDropWrite;
875 break; 875 break;
876 /* 876 /*
877 * Read access required. 877 * Read access required.
878 */ 878 */
879 case NCP_IOC_GETMOUNTUID16: 879 case NCP_IOC_GETMOUNTUID16:
880 case NCP_IOC_GETMOUNTUID32: 880 case NCP_IOC_GETMOUNTUID32:
881 case NCP_IOC_GETMOUNTUID64: 881 case NCP_IOC_GETMOUNTUID64:
882 case NCP_IOC_GETROOT: 882 case NCP_IOC_GETROOT:
883 case NCP_IOC_SIGN_WANTED: 883 case NCP_IOC_SIGN_WANTED:
884 ret = inode_permission(inode, MAY_READ); 884 ret = inode_permission(inode, MAY_READ);
885 if (ret) 885 if (ret)
886 goto out; 886 goto out;
887 break; 887 break;
888 /* 888 /*
889 * Anybody can read these. 889 * Anybody can read these.
890 */ 890 */
891 case NCP_IOC_GETCHARSETS: 891 case NCP_IOC_GETCHARSETS:
892 case NCP_IOC_GETDENTRYTTL: 892 case NCP_IOC_GETDENTRYTTL:
893 default: 893 default:
894 /* Three codes below are protected by CAP_SYS_ADMIN above. */ 894 /* Three codes below are protected by CAP_SYS_ADMIN above. */
895 case NCP_IOC_SETCHARSETS: 895 case NCP_IOC_SETCHARSETS:
896 case NCP_IOC_CONN_LOGGED_IN: 896 case NCP_IOC_CONN_LOGGED_IN:
897 case NCP_IOC_SETROOT: 897 case NCP_IOC_SETROOT:
898 break; 898 break;
899 } 899 }
900 } 900 }
901 ret = __ncp_ioctl(inode, cmd, arg); 901 ret = __ncp_ioctl(inode, cmd, arg);
902 outDropWrite: 902 outDropWrite:
903 if (need_drop_write) 903 if (need_drop_write)
904 mnt_drop_write_file(filp); 904 mnt_drop_write_file(filp);
905 out: 905 out:
906 return ret; 906 return ret;
907 } 907 }
908 908
909 #ifdef CONFIG_COMPAT 909 #ifdef CONFIG_COMPAT
910 long ncp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 910 long ncp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
911 { 911 {
912 long ret; 912 long ret;
913 913
914 arg = (unsigned long) compat_ptr(arg); 914 arg = (unsigned long) compat_ptr(arg);
915 ret = ncp_ioctl(file, cmd, arg); 915 ret = ncp_ioctl(file, cmd, arg);
916 return ret; 916 return ret;
917 } 917 }
918 #endif 918 #endif
919 919
1 /* 1 /*
2 * linux/fs/proc/base.c 2 * linux/fs/proc/base.c
3 * 3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * 5 *
6 * proc base directory handling functions 6 * proc base directory handling functions
7 * 7 *
8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. 8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
9 * Instead of using magical inumbers to determine the kind of object 9 * Instead of using magical inumbers to determine the kind of object
10 * we allocate and fill in-core inodes upon lookup. They don't even 10 * we allocate and fill in-core inodes upon lookup. They don't even
11 * go into icache. We cache the reference to task_struct upon lookup too. 11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the 12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore. 13 * rest of procfs anymore.
14 * 14 *
15 * 15 *
16 * Changelog: 16 * Changelog:
17 * 17-Jan-2005 17 * 17-Jan-2005
18 * Allan Bezerra 18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br> 19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br> 20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br> 21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br> 22 * Mauricio Lin <mauricio.lin@indt.org.br>
23 * 23 *
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
25 * 25 *
26 * A new process specific entry (smaps) included in /proc. It shows the 26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information 27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e., 28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files. 29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know 30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library. 31 * about physical memory consumption for a process specific library.
32 * 32 *
33 * Changelog: 33 * Changelog:
34 * 21-Feb-2005 34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking. 36 * Pud inclusion in the page table walking.
37 * 37 *
38 * ChangeLog: 38 * ChangeLog:
39 * 10-Mar-2005 39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT: 40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins. 41 * A better way to walks through the page table as suggested by Hugh Dickins.
42 * 42 *
43 * Simo Piiroinen <simo.piiroinen@nokia.com>: 43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages. 44 * Smaps information related to shared, private, clean and dirty pages.
45 * 45 *
46 * Paul Mundt <paul.mundt@nokia.com>: 46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps. 47 * Overall revision about smaps.
48 */ 48 */
49 49
50 #include <asm/uaccess.h> 50 #include <asm/uaccess.h>
51 51
52 #include <linux/errno.h> 52 #include <linux/errno.h>
53 #include <linux/time.h> 53 #include <linux/time.h>
54 #include <linux/proc_fs.h> 54 #include <linux/proc_fs.h>
55 #include <linux/stat.h> 55 #include <linux/stat.h>
56 #include <linux/task_io_accounting_ops.h> 56 #include <linux/task_io_accounting_ops.h>
57 #include <linux/init.h> 57 #include <linux/init.h>
58 #include <linux/capability.h> 58 #include <linux/capability.h>
59 #include <linux/file.h> 59 #include <linux/file.h>
60 #include <linux/fdtable.h> 60 #include <linux/fdtable.h>
61 #include <linux/string.h> 61 #include <linux/string.h>
62 #include <linux/seq_file.h> 62 #include <linux/seq_file.h>
63 #include <linux/namei.h> 63 #include <linux/namei.h>
64 #include <linux/mnt_namespace.h> 64 #include <linux/mnt_namespace.h>
65 #include <linux/mm.h> 65 #include <linux/mm.h>
66 #include <linux/swap.h> 66 #include <linux/swap.h>
67 #include <linux/rcupdate.h> 67 #include <linux/rcupdate.h>
68 #include <linux/kallsyms.h> 68 #include <linux/kallsyms.h>
69 #include <linux/stacktrace.h> 69 #include <linux/stacktrace.h>
70 #include <linux/resource.h> 70 #include <linux/resource.h>
71 #include <linux/module.h> 71 #include <linux/module.h>
72 #include <linux/mount.h> 72 #include <linux/mount.h>
73 #include <linux/security.h> 73 #include <linux/security.h>
74 #include <linux/ptrace.h> 74 #include <linux/ptrace.h>
75 #include <linux/tracehook.h> 75 #include <linux/tracehook.h>
76 #include <linux/cgroup.h> 76 #include <linux/cgroup.h>
77 #include <linux/cpuset.h> 77 #include <linux/cpuset.h>
78 #include <linux/audit.h> 78 #include <linux/audit.h>
79 #include <linux/poll.h> 79 #include <linux/poll.h>
80 #include <linux/nsproxy.h> 80 #include <linux/nsproxy.h>
81 #include <linux/oom.h> 81 #include <linux/oom.h>
82 #include <linux/elf.h> 82 #include <linux/elf.h>
83 #include <linux/pid_namespace.h> 83 #include <linux/pid_namespace.h>
84 #include <linux/user_namespace.h> 84 #include <linux/user_namespace.h>
85 #include <linux/fs_struct.h> 85 #include <linux/fs_struct.h>
86 #include <linux/slab.h> 86 #include <linux/slab.h>
87 #include <linux/flex_array.h> 87 #include <linux/flex_array.h>
88 #ifdef CONFIG_HARDWALL 88 #ifdef CONFIG_HARDWALL
89 #include <asm/hardwall.h> 89 #include <asm/hardwall.h>
90 #endif 90 #endif
91 #include <trace/events/oom.h> 91 #include <trace/events/oom.h>
92 #include "internal.h" 92 #include "internal.h"
93 #include "fd.h" 93 #include "fd.h"
94 94
95 /* NOTE: 95 /* NOTE:
96 * Implementing inode permission operations in /proc is almost 96 * Implementing inode permission operations in /proc is almost
97 * certainly an error. Permission checks need to happen during 97 * certainly an error. Permission checks need to happen during
98 * each system call not at open time. The reason is that most of 98 * each system call not at open time. The reason is that most of
99 * what we wish to check for permissions in /proc varies at runtime. 99 * what we wish to check for permissions in /proc varies at runtime.
100 * 100 *
101 * The classic example of a problem is opening file descriptors 101 * The classic example of a problem is opening file descriptors
102 * in /proc for a task before it execs a suid executable. 102 * in /proc for a task before it execs a suid executable.
103 */ 103 */
104 104
105 struct pid_entry { 105 struct pid_entry {
106 char *name; 106 char *name;
107 int len; 107 int len;
108 umode_t mode; 108 umode_t mode;
109 const struct inode_operations *iop; 109 const struct inode_operations *iop;
110 const struct file_operations *fop; 110 const struct file_operations *fop;
111 union proc_op op; 111 union proc_op op;
112 }; 112 };
113 113
114 #define NOD(NAME, MODE, IOP, FOP, OP) { \ 114 #define NOD(NAME, MODE, IOP, FOP, OP) { \
115 .name = (NAME), \ 115 .name = (NAME), \
116 .len = sizeof(NAME) - 1, \ 116 .len = sizeof(NAME) - 1, \
117 .mode = MODE, \ 117 .mode = MODE, \
118 .iop = IOP, \ 118 .iop = IOP, \
119 .fop = FOP, \ 119 .fop = FOP, \
120 .op = OP, \ 120 .op = OP, \
121 } 121 }
122 122
123 #define DIR(NAME, MODE, iops, fops) \ 123 #define DIR(NAME, MODE, iops, fops) \
124 NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} ) 124 NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
125 #define LNK(NAME, get_link) \ 125 #define LNK(NAME, get_link) \
126 NOD(NAME, (S_IFLNK|S_IRWXUGO), \ 126 NOD(NAME, (S_IFLNK|S_IRWXUGO), \
127 &proc_pid_link_inode_operations, NULL, \ 127 &proc_pid_link_inode_operations, NULL, \
128 { .proc_get_link = get_link } ) 128 { .proc_get_link = get_link } )
129 #define REG(NAME, MODE, fops) \ 129 #define REG(NAME, MODE, fops) \
130 NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {}) 130 NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
131 #define INF(NAME, MODE, read) \ 131 #define INF(NAME, MODE, read) \
132 NOD(NAME, (S_IFREG|(MODE)), \ 132 NOD(NAME, (S_IFREG|(MODE)), \
133 NULL, &proc_info_file_operations, \ 133 NULL, &proc_info_file_operations, \
134 { .proc_read = read } ) 134 { .proc_read = read } )
135 #define ONE(NAME, MODE, show) \ 135 #define ONE(NAME, MODE, show) \
136 NOD(NAME, (S_IFREG|(MODE)), \ 136 NOD(NAME, (S_IFREG|(MODE)), \
137 NULL, &proc_single_file_operations, \ 137 NULL, &proc_single_file_operations, \
138 { .proc_show = show } ) 138 { .proc_show = show } )
139 139
140 /* 140 /*
141 * Count the number of hardlinks for the pid_entry table, excluding the . 141 * Count the number of hardlinks for the pid_entry table, excluding the .
142 * and .. links. 142 * and .. links.
143 */ 143 */
144 static unsigned int pid_entry_count_dirs(const struct pid_entry *entries, 144 static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
145 unsigned int n) 145 unsigned int n)
146 { 146 {
147 unsigned int i; 147 unsigned int i;
148 unsigned int count; 148 unsigned int count;
149 149
150 count = 0; 150 count = 0;
151 for (i = 0; i < n; ++i) { 151 for (i = 0; i < n; ++i) {
152 if (S_ISDIR(entries[i].mode)) 152 if (S_ISDIR(entries[i].mode))
153 ++count; 153 ++count;
154 } 154 }
155 155
156 return count; 156 return count;
157 } 157 }
158 158
159 static int get_task_root(struct task_struct *task, struct path *root) 159 static int get_task_root(struct task_struct *task, struct path *root)
160 { 160 {
161 int result = -ENOENT; 161 int result = -ENOENT;
162 162
163 task_lock(task); 163 task_lock(task);
164 if (task->fs) { 164 if (task->fs) {
165 get_fs_root(task->fs, root); 165 get_fs_root(task->fs, root);
166 result = 0; 166 result = 0;
167 } 167 }
168 task_unlock(task); 168 task_unlock(task);
169 return result; 169 return result;
170 } 170 }
171 171
172 static int proc_cwd_link(struct dentry *dentry, struct path *path) 172 static int proc_cwd_link(struct dentry *dentry, struct path *path)
173 { 173 {
174 struct task_struct *task = get_proc_task(dentry->d_inode); 174 struct task_struct *task = get_proc_task(dentry->d_inode);
175 int result = -ENOENT; 175 int result = -ENOENT;
176 176
177 if (task) { 177 if (task) {
178 task_lock(task); 178 task_lock(task);
179 if (task->fs) { 179 if (task->fs) {
180 get_fs_pwd(task->fs, path); 180 get_fs_pwd(task->fs, path);
181 result = 0; 181 result = 0;
182 } 182 }
183 task_unlock(task); 183 task_unlock(task);
184 put_task_struct(task); 184 put_task_struct(task);
185 } 185 }
186 return result; 186 return result;
187 } 187 }
188 188
189 static int proc_root_link(struct dentry *dentry, struct path *path) 189 static int proc_root_link(struct dentry *dentry, struct path *path)
190 { 190 {
191 struct task_struct *task = get_proc_task(dentry->d_inode); 191 struct task_struct *task = get_proc_task(dentry->d_inode);
192 int result = -ENOENT; 192 int result = -ENOENT;
193 193
194 if (task) { 194 if (task) {
195 result = get_task_root(task, path); 195 result = get_task_root(task, path);
196 put_task_struct(task); 196 put_task_struct(task);
197 } 197 }
198 return result; 198 return result;
199 } 199 }
200 200
201 static int proc_pid_cmdline(struct task_struct *task, char * buffer) 201 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
202 { 202 {
203 int res = 0; 203 int res = 0;
204 unsigned int len; 204 unsigned int len;
205 struct mm_struct *mm = get_task_mm(task); 205 struct mm_struct *mm = get_task_mm(task);
206 if (!mm) 206 if (!mm)
207 goto out; 207 goto out;
208 if (!mm->arg_end) 208 if (!mm->arg_end)
209 goto out_mm; /* Shh! No looking before we're done */ 209 goto out_mm; /* Shh! No looking before we're done */
210 210
211 len = mm->arg_end - mm->arg_start; 211 len = mm->arg_end - mm->arg_start;
212 212
213 if (len > PAGE_SIZE) 213 if (len > PAGE_SIZE)
214 len = PAGE_SIZE; 214 len = PAGE_SIZE;
215 215
216 res = access_process_vm(task, mm->arg_start, buffer, len, 0); 216 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
217 217
218 // If the nul at the end of args has been overwritten, then 218 // If the nul at the end of args has been overwritten, then
219 // assume application is using setproctitle(3). 219 // assume application is using setproctitle(3).
220 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) { 220 if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
221 len = strnlen(buffer, res); 221 len = strnlen(buffer, res);
222 if (len < res) { 222 if (len < res) {
223 res = len; 223 res = len;
224 } else { 224 } else {
225 len = mm->env_end - mm->env_start; 225 len = mm->env_end - mm->env_start;
226 if (len > PAGE_SIZE - res) 226 if (len > PAGE_SIZE - res)
227 len = PAGE_SIZE - res; 227 len = PAGE_SIZE - res;
228 res += access_process_vm(task, mm->env_start, buffer+res, len, 0); 228 res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
229 res = strnlen(buffer, res); 229 res = strnlen(buffer, res);
230 } 230 }
231 } 231 }
232 out_mm: 232 out_mm:
233 mmput(mm); 233 mmput(mm);
234 out: 234 out:
235 return res; 235 return res;
236 } 236 }
237 237
238 static int proc_pid_auxv(struct task_struct *task, char *buffer) 238 static int proc_pid_auxv(struct task_struct *task, char *buffer)
239 { 239 {
240 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ); 240 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
241 int res = PTR_ERR(mm); 241 int res = PTR_ERR(mm);
242 if (mm && !IS_ERR(mm)) { 242 if (mm && !IS_ERR(mm)) {
243 unsigned int nwords = 0; 243 unsigned int nwords = 0;
244 do { 244 do {
245 nwords += 2; 245 nwords += 2;
246 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 246 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
247 res = nwords * sizeof(mm->saved_auxv[0]); 247 res = nwords * sizeof(mm->saved_auxv[0]);
248 if (res > PAGE_SIZE) 248 if (res > PAGE_SIZE)
249 res = PAGE_SIZE; 249 res = PAGE_SIZE;
250 memcpy(buffer, mm->saved_auxv, res); 250 memcpy(buffer, mm->saved_auxv, res);
251 mmput(mm); 251 mmput(mm);
252 } 252 }
253 return res; 253 return res;
254 } 254 }
255 255
256 256
257 #ifdef CONFIG_KALLSYMS 257 #ifdef CONFIG_KALLSYMS
258 /* 258 /*
259 * Provides a wchan file via kallsyms in a proper one-value-per-file format. 259 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
260 * Returns the resolved symbol. If that fails, simply return the address. 260 * Returns the resolved symbol. If that fails, simply return the address.
261 */ 261 */
262 static int proc_pid_wchan(struct task_struct *task, char *buffer) 262 static int proc_pid_wchan(struct task_struct *task, char *buffer)
263 { 263 {
264 unsigned long wchan; 264 unsigned long wchan;
265 char symname[KSYM_NAME_LEN]; 265 char symname[KSYM_NAME_LEN];
266 266
267 wchan = get_wchan(task); 267 wchan = get_wchan(task);
268 268
269 if (lookup_symbol_name(wchan, symname) < 0) 269 if (lookup_symbol_name(wchan, symname) < 0)
270 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 270 if (!ptrace_may_access(task, PTRACE_MODE_READ))
271 return 0; 271 return 0;
272 else 272 else
273 return sprintf(buffer, "%lu", wchan); 273 return sprintf(buffer, "%lu", wchan);
274 else 274 else
275 return sprintf(buffer, "%s", symname); 275 return sprintf(buffer, "%s", symname);
276 } 276 }
277 #endif /* CONFIG_KALLSYMS */ 277 #endif /* CONFIG_KALLSYMS */
278 278
279 static int lock_trace(struct task_struct *task) 279 static int lock_trace(struct task_struct *task)
280 { 280 {
281 int err = mutex_lock_killable(&task->signal->cred_guard_mutex); 281 int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
282 if (err) 282 if (err)
283 return err; 283 return err;
284 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) { 284 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
285 mutex_unlock(&task->signal->cred_guard_mutex); 285 mutex_unlock(&task->signal->cred_guard_mutex);
286 return -EPERM; 286 return -EPERM;
287 } 287 }
288 return 0; 288 return 0;
289 } 289 }
290 290
291 static void unlock_trace(struct task_struct *task) 291 static void unlock_trace(struct task_struct *task)
292 { 292 {
293 mutex_unlock(&task->signal->cred_guard_mutex); 293 mutex_unlock(&task->signal->cred_guard_mutex);
294 } 294 }
295 295
296 #ifdef CONFIG_STACKTRACE 296 #ifdef CONFIG_STACKTRACE
297 297
298 #define MAX_STACK_TRACE_DEPTH 64 298 #define MAX_STACK_TRACE_DEPTH 64
299 299
300 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, 300 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
301 struct pid *pid, struct task_struct *task) 301 struct pid *pid, struct task_struct *task)
302 { 302 {
303 struct stack_trace trace; 303 struct stack_trace trace;
304 unsigned long *entries; 304 unsigned long *entries;
305 int err; 305 int err;
306 int i; 306 int i;
307 307
308 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL); 308 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
309 if (!entries) 309 if (!entries)
310 return -ENOMEM; 310 return -ENOMEM;
311 311
312 trace.nr_entries = 0; 312 trace.nr_entries = 0;
313 trace.max_entries = MAX_STACK_TRACE_DEPTH; 313 trace.max_entries = MAX_STACK_TRACE_DEPTH;
314 trace.entries = entries; 314 trace.entries = entries;
315 trace.skip = 0; 315 trace.skip = 0;
316 316
317 err = lock_trace(task); 317 err = lock_trace(task);
318 if (!err) { 318 if (!err) {
319 save_stack_trace_tsk(task, &trace); 319 save_stack_trace_tsk(task, &trace);
320 320
321 for (i = 0; i < trace.nr_entries; i++) { 321 for (i = 0; i < trace.nr_entries; i++) {
322 seq_printf(m, "[<%pK>] %pS\n", 322 seq_printf(m, "[<%pK>] %pS\n",
323 (void *)entries[i], (void *)entries[i]); 323 (void *)entries[i], (void *)entries[i]);
324 } 324 }
325 unlock_trace(task); 325 unlock_trace(task);
326 } 326 }
327 kfree(entries); 327 kfree(entries);
328 328
329 return err; 329 return err;
330 } 330 }
331 #endif 331 #endif
332 332
333 #ifdef CONFIG_SCHEDSTATS 333 #ifdef CONFIG_SCHEDSTATS
334 /* 334 /*
335 * Provides /proc/PID/schedstat 335 * Provides /proc/PID/schedstat
336 */ 336 */
337 static int proc_pid_schedstat(struct task_struct *task, char *buffer) 337 static int proc_pid_schedstat(struct task_struct *task, char *buffer)
338 { 338 {
339 return sprintf(buffer, "%llu %llu %lu\n", 339 return sprintf(buffer, "%llu %llu %lu\n",
340 (unsigned long long)task->se.sum_exec_runtime, 340 (unsigned long long)task->se.sum_exec_runtime,
341 (unsigned long long)task->sched_info.run_delay, 341 (unsigned long long)task->sched_info.run_delay,
342 task->sched_info.pcount); 342 task->sched_info.pcount);
343 } 343 }
344 #endif 344 #endif
345 345
346 #ifdef CONFIG_LATENCYTOP 346 #ifdef CONFIG_LATENCYTOP
347 static int lstats_show_proc(struct seq_file *m, void *v) 347 static int lstats_show_proc(struct seq_file *m, void *v)
348 { 348 {
349 int i; 349 int i;
350 struct inode *inode = m->private; 350 struct inode *inode = m->private;
351 struct task_struct *task = get_proc_task(inode); 351 struct task_struct *task = get_proc_task(inode);
352 352
353 if (!task) 353 if (!task)
354 return -ESRCH; 354 return -ESRCH;
355 seq_puts(m, "Latency Top version : v0.1\n"); 355 seq_puts(m, "Latency Top version : v0.1\n");
356 for (i = 0; i < 32; i++) { 356 for (i = 0; i < 32; i++) {
357 struct latency_record *lr = &task->latency_record[i]; 357 struct latency_record *lr = &task->latency_record[i];
358 if (lr->backtrace[0]) { 358 if (lr->backtrace[0]) {
359 int q; 359 int q;
360 seq_printf(m, "%i %li %li", 360 seq_printf(m, "%i %li %li",
361 lr->count, lr->time, lr->max); 361 lr->count, lr->time, lr->max);
362 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 362 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
363 unsigned long bt = lr->backtrace[q]; 363 unsigned long bt = lr->backtrace[q];
364 if (!bt) 364 if (!bt)
365 break; 365 break;
366 if (bt == ULONG_MAX) 366 if (bt == ULONG_MAX)
367 break; 367 break;
368 seq_printf(m, " %ps", (void *)bt); 368 seq_printf(m, " %ps", (void *)bt);
369 } 369 }
370 seq_putc(m, '\n'); 370 seq_putc(m, '\n');
371 } 371 }
372 372
373 } 373 }
374 put_task_struct(task); 374 put_task_struct(task);
375 return 0; 375 return 0;
376 } 376 }
377 377
378 static int lstats_open(struct inode *inode, struct file *file) 378 static int lstats_open(struct inode *inode, struct file *file)
379 { 379 {
380 return single_open(file, lstats_show_proc, inode); 380 return single_open(file, lstats_show_proc, inode);
381 } 381 }
382 382
383 static ssize_t lstats_write(struct file *file, const char __user *buf, 383 static ssize_t lstats_write(struct file *file, const char __user *buf,
384 size_t count, loff_t *offs) 384 size_t count, loff_t *offs)
385 { 385 {
386 struct task_struct *task = get_proc_task(file_inode(file)); 386 struct task_struct *task = get_proc_task(file_inode(file));
387 387
388 if (!task) 388 if (!task)
389 return -ESRCH; 389 return -ESRCH;
390 clear_all_latency_tracing(task); 390 clear_all_latency_tracing(task);
391 put_task_struct(task); 391 put_task_struct(task);
392 392
393 return count; 393 return count;
394 } 394 }
395 395
396 static const struct file_operations proc_lstats_operations = { 396 static const struct file_operations proc_lstats_operations = {
397 .open = lstats_open, 397 .open = lstats_open,
398 .read = seq_read, 398 .read = seq_read,
399 .write = lstats_write, 399 .write = lstats_write,
400 .llseek = seq_lseek, 400 .llseek = seq_lseek,
401 .release = single_release, 401 .release = single_release,
402 }; 402 };
403 403
404 #endif 404 #endif
405 405
406 static int proc_oom_score(struct task_struct *task, char *buffer) 406 static int proc_oom_score(struct task_struct *task, char *buffer)
407 { 407 {
408 unsigned long totalpages = totalram_pages + total_swap_pages; 408 unsigned long totalpages = totalram_pages + total_swap_pages;
409 unsigned long points = 0; 409 unsigned long points = 0;
410 410
411 read_lock(&tasklist_lock); 411 read_lock(&tasklist_lock);
412 if (pid_alive(task)) 412 if (pid_alive(task))
413 points = oom_badness(task, NULL, NULL, totalpages) * 413 points = oom_badness(task, NULL, NULL, totalpages) *
414 1000 / totalpages; 414 1000 / totalpages;
415 read_unlock(&tasklist_lock); 415 read_unlock(&tasklist_lock);
416 return sprintf(buffer, "%lu\n", points); 416 return sprintf(buffer, "%lu\n", points);
417 } 417 }
418 418
419 struct limit_names { 419 struct limit_names {
420 char *name; 420 char *name;
421 char *unit; 421 char *unit;
422 }; 422 };
423 423
424 static const struct limit_names lnames[RLIM_NLIMITS] = { 424 static const struct limit_names lnames[RLIM_NLIMITS] = {
425 [RLIMIT_CPU] = {"Max cpu time", "seconds"}, 425 [RLIMIT_CPU] = {"Max cpu time", "seconds"},
426 [RLIMIT_FSIZE] = {"Max file size", "bytes"}, 426 [RLIMIT_FSIZE] = {"Max file size", "bytes"},
427 [RLIMIT_DATA] = {"Max data size", "bytes"}, 427 [RLIMIT_DATA] = {"Max data size", "bytes"},
428 [RLIMIT_STACK] = {"Max stack size", "bytes"}, 428 [RLIMIT_STACK] = {"Max stack size", "bytes"},
429 [RLIMIT_CORE] = {"Max core file size", "bytes"}, 429 [RLIMIT_CORE] = {"Max core file size", "bytes"},
430 [RLIMIT_RSS] = {"Max resident set", "bytes"}, 430 [RLIMIT_RSS] = {"Max resident set", "bytes"},
431 [RLIMIT_NPROC] = {"Max processes", "processes"}, 431 [RLIMIT_NPROC] = {"Max processes", "processes"},
432 [RLIMIT_NOFILE] = {"Max open files", "files"}, 432 [RLIMIT_NOFILE] = {"Max open files", "files"},
433 [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"}, 433 [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
434 [RLIMIT_AS] = {"Max address space", "bytes"}, 434 [RLIMIT_AS] = {"Max address space", "bytes"},
435 [RLIMIT_LOCKS] = {"Max file locks", "locks"}, 435 [RLIMIT_LOCKS] = {"Max file locks", "locks"},
436 [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"}, 436 [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
437 [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"}, 437 [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
438 [RLIMIT_NICE] = {"Max nice priority", NULL}, 438 [RLIMIT_NICE] = {"Max nice priority", NULL},
439 [RLIMIT_RTPRIO] = {"Max realtime priority", NULL}, 439 [RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
440 [RLIMIT_RTTIME] = {"Max realtime timeout", "us"}, 440 [RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
441 }; 441 };
442 442
443 /* Display limits for a process */ 443 /* Display limits for a process */
444 static int proc_pid_limits(struct task_struct *task, char *buffer) 444 static int proc_pid_limits(struct task_struct *task, char *buffer)
445 { 445 {
446 unsigned int i; 446 unsigned int i;
447 int count = 0; 447 int count = 0;
448 unsigned long flags; 448 unsigned long flags;
449 char *bufptr = buffer; 449 char *bufptr = buffer;
450 450
451 struct rlimit rlim[RLIM_NLIMITS]; 451 struct rlimit rlim[RLIM_NLIMITS];
452 452
453 if (!lock_task_sighand(task, &flags)) 453 if (!lock_task_sighand(task, &flags))
454 return 0; 454 return 0;
455 memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); 455 memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
456 unlock_task_sighand(task, &flags); 456 unlock_task_sighand(task, &flags);
457 457
458 /* 458 /*
459 * print the file header 459 * print the file header
460 */ 460 */
461 count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n", 461 count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n",
462 "Limit", "Soft Limit", "Hard Limit", "Units"); 462 "Limit", "Soft Limit", "Hard Limit", "Units");
463 463
464 for (i = 0; i < RLIM_NLIMITS; i++) { 464 for (i = 0; i < RLIM_NLIMITS; i++) {
465 if (rlim[i].rlim_cur == RLIM_INFINITY) 465 if (rlim[i].rlim_cur == RLIM_INFINITY)
466 count += sprintf(&bufptr[count], "%-25s %-20s ", 466 count += sprintf(&bufptr[count], "%-25s %-20s ",
467 lnames[i].name, "unlimited"); 467 lnames[i].name, "unlimited");
468 else 468 else
469 count += sprintf(&bufptr[count], "%-25s %-20lu ", 469 count += sprintf(&bufptr[count], "%-25s %-20lu ",
470 lnames[i].name, rlim[i].rlim_cur); 470 lnames[i].name, rlim[i].rlim_cur);
471 471
472 if (rlim[i].rlim_max == RLIM_INFINITY) 472 if (rlim[i].rlim_max == RLIM_INFINITY)
473 count += sprintf(&bufptr[count], "%-20s ", "unlimited"); 473 count += sprintf(&bufptr[count], "%-20s ", "unlimited");
474 else 474 else
475 count += sprintf(&bufptr[count], "%-20lu ", 475 count += sprintf(&bufptr[count], "%-20lu ",
476 rlim[i].rlim_max); 476 rlim[i].rlim_max);
477 477
478 if (lnames[i].unit) 478 if (lnames[i].unit)
479 count += sprintf(&bufptr[count], "%-10s\n", 479 count += sprintf(&bufptr[count], "%-10s\n",
480 lnames[i].unit); 480 lnames[i].unit);
481 else 481 else
482 count += sprintf(&bufptr[count], "\n"); 482 count += sprintf(&bufptr[count], "\n");
483 } 483 }
484 484
485 return count; 485 return count;
486 } 486 }
487 487
488 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 488 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
489 static int proc_pid_syscall(struct task_struct *task, char *buffer) 489 static int proc_pid_syscall(struct task_struct *task, char *buffer)
490 { 490 {
491 long nr; 491 long nr;
492 unsigned long args[6], sp, pc; 492 unsigned long args[6], sp, pc;
493 int res = lock_trace(task); 493 int res = lock_trace(task);
494 if (res) 494 if (res)
495 return res; 495 return res;
496 496
497 if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) 497 if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
498 res = sprintf(buffer, "running\n"); 498 res = sprintf(buffer, "running\n");
499 else if (nr < 0) 499 else if (nr < 0)
500 res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc); 500 res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
501 else 501 else
502 res = sprintf(buffer, 502 res = sprintf(buffer,
503 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", 503 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
504 nr, 504 nr,
505 args[0], args[1], args[2], args[3], args[4], args[5], 505 args[0], args[1], args[2], args[3], args[4], args[5],
506 sp, pc); 506 sp, pc);
507 unlock_trace(task); 507 unlock_trace(task);
508 return res; 508 return res;
509 } 509 }
510 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 510 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
511 511
512 /************************************************************************/ 512 /************************************************************************/
513 /* Here the fs part begins */ 513 /* Here the fs part begins */
514 /************************************************************************/ 514 /************************************************************************/
515 515
516 /* permission checks */ 516 /* permission checks */
517 static int proc_fd_access_allowed(struct inode *inode) 517 static int proc_fd_access_allowed(struct inode *inode)
518 { 518 {
519 struct task_struct *task; 519 struct task_struct *task;
520 int allowed = 0; 520 int allowed = 0;
521 /* Allow access to a task's file descriptors if it is us or we 521 /* Allow access to a task's file descriptors if it is us or we
522 * may use ptrace attach to the process and find out that 522 * may use ptrace attach to the process and find out that
523 * information. 523 * information.
524 */ 524 */
525 task = get_proc_task(inode); 525 task = get_proc_task(inode);
526 if (task) { 526 if (task) {
527 allowed = ptrace_may_access(task, PTRACE_MODE_READ); 527 allowed = ptrace_may_access(task, PTRACE_MODE_READ);
528 put_task_struct(task); 528 put_task_struct(task);
529 } 529 }
530 return allowed; 530 return allowed;
531 } 531 }
532 532
533 int proc_setattr(struct dentry *dentry, struct iattr *attr) 533 int proc_setattr(struct dentry *dentry, struct iattr *attr)
534 { 534 {
535 int error; 535 int error;
536 struct inode *inode = dentry->d_inode; 536 struct inode *inode = dentry->d_inode;
537 537
538 if (attr->ia_valid & ATTR_MODE) 538 if (attr->ia_valid & ATTR_MODE)
539 return -EPERM; 539 return -EPERM;
540 540
541 error = inode_change_ok(inode, attr); 541 error = inode_change_ok(inode, attr);
542 if (error) 542 if (error)
543 return error; 543 return error;
544 544
545 setattr_copy(inode, attr); 545 setattr_copy(inode, attr);
546 mark_inode_dirty(inode); 546 mark_inode_dirty(inode);
547 return 0; 547 return 0;
548 } 548 }
549 549
550 /* 550 /*
551 * May current process learn task's sched/cmdline info (for hide_pid_min=1) 551 * May current process learn task's sched/cmdline info (for hide_pid_min=1)
552 * or euid/egid (for hide_pid_min=2)? 552 * or euid/egid (for hide_pid_min=2)?
553 */ 553 */
554 static bool has_pid_permissions(struct pid_namespace *pid, 554 static bool has_pid_permissions(struct pid_namespace *pid,
555 struct task_struct *task, 555 struct task_struct *task,
556 int hide_pid_min) 556 int hide_pid_min)
557 { 557 {
558 if (pid->hide_pid < hide_pid_min) 558 if (pid->hide_pid < hide_pid_min)
559 return true; 559 return true;
560 if (in_group_p(pid->pid_gid)) 560 if (in_group_p(pid->pid_gid))
561 return true; 561 return true;
562 return ptrace_may_access(task, PTRACE_MODE_READ); 562 return ptrace_may_access(task, PTRACE_MODE_READ);
563 } 563 }
564 564
565 565
566 static int proc_pid_permission(struct inode *inode, int mask) 566 static int proc_pid_permission(struct inode *inode, int mask)
567 { 567 {
568 struct pid_namespace *pid = inode->i_sb->s_fs_info; 568 struct pid_namespace *pid = inode->i_sb->s_fs_info;
569 struct task_struct *task; 569 struct task_struct *task;
570 bool has_perms; 570 bool has_perms;
571 571
572 task = get_proc_task(inode); 572 task = get_proc_task(inode);
573 if (!task) 573 if (!task)
574 return -ESRCH; 574 return -ESRCH;
575 has_perms = has_pid_permissions(pid, task, 1); 575 has_perms = has_pid_permissions(pid, task, 1);
576 put_task_struct(task); 576 put_task_struct(task);
577 577
578 if (!has_perms) { 578 if (!has_perms) {
579 if (pid->hide_pid == 2) { 579 if (pid->hide_pid == 2) {
580 /* 580 /*
581 * Let's make getdents(), stat(), and open() 581 * Let's make getdents(), stat(), and open()
582 * consistent with each other. If a process 582 * consistent with each other. If a process
583 * may not stat() a file, it shouldn't be seen 583 * may not stat() a file, it shouldn't be seen
584 * in procfs at all. 584 * in procfs at all.
585 */ 585 */
586 return -ENOENT; 586 return -ENOENT;
587 } 587 }
588 588
589 return -EPERM; 589 return -EPERM;
590 } 590 }
591 return generic_permission(inode, mask); 591 return generic_permission(inode, mask);
592 } 592 }
593 593
594 594
595 595
596 static const struct inode_operations proc_def_inode_operations = { 596 static const struct inode_operations proc_def_inode_operations = {
597 .setattr = proc_setattr, 597 .setattr = proc_setattr,
598 }; 598 };
599 599
600 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ 600 #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
601 601
602 static ssize_t proc_info_read(struct file * file, char __user * buf, 602 static ssize_t proc_info_read(struct file * file, char __user * buf,
603 size_t count, loff_t *ppos) 603 size_t count, loff_t *ppos)
604 { 604 {
605 struct inode * inode = file_inode(file); 605 struct inode * inode = file_inode(file);
606 unsigned long page; 606 unsigned long page;
607 ssize_t length; 607 ssize_t length;
608 struct task_struct *task = get_proc_task(inode); 608 struct task_struct *task = get_proc_task(inode);
609 609
610 length = -ESRCH; 610 length = -ESRCH;
611 if (!task) 611 if (!task)
612 goto out_no_task; 612 goto out_no_task;
613 613
614 if (count > PROC_BLOCK_SIZE) 614 if (count > PROC_BLOCK_SIZE)
615 count = PROC_BLOCK_SIZE; 615 count = PROC_BLOCK_SIZE;
616 616
617 length = -ENOMEM; 617 length = -ENOMEM;
618 if (!(page = __get_free_page(GFP_TEMPORARY))) 618 if (!(page = __get_free_page(GFP_TEMPORARY)))
619 goto out; 619 goto out;
620 620
621 length = PROC_I(inode)->op.proc_read(task, (char*)page); 621 length = PROC_I(inode)->op.proc_read(task, (char*)page);
622 622
623 if (length >= 0) 623 if (length >= 0)
624 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); 624 length = simple_read_from_buffer(buf, count, ppos, (char *)page, length);
625 free_page(page); 625 free_page(page);
626 out: 626 out:
627 put_task_struct(task); 627 put_task_struct(task);
628 out_no_task: 628 out_no_task:
629 return length; 629 return length;
630 } 630 }
631 631
632 static const struct file_operations proc_info_file_operations = { 632 static const struct file_operations proc_info_file_operations = {
633 .read = proc_info_read, 633 .read = proc_info_read,
634 .llseek = generic_file_llseek, 634 .llseek = generic_file_llseek,
635 }; 635 };
636 636
637 static int proc_single_show(struct seq_file *m, void *v) 637 static int proc_single_show(struct seq_file *m, void *v)
638 { 638 {
639 struct inode *inode = m->private; 639 struct inode *inode = m->private;
640 struct pid_namespace *ns; 640 struct pid_namespace *ns;
641 struct pid *pid; 641 struct pid *pid;
642 struct task_struct *task; 642 struct task_struct *task;
643 int ret; 643 int ret;
644 644
645 ns = inode->i_sb->s_fs_info; 645 ns = inode->i_sb->s_fs_info;
646 pid = proc_pid(inode); 646 pid = proc_pid(inode);
647 task = get_pid_task(pid, PIDTYPE_PID); 647 task = get_pid_task(pid, PIDTYPE_PID);
648 if (!task) 648 if (!task)
649 return -ESRCH; 649 return -ESRCH;
650 650
651 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); 651 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
652 652
653 put_task_struct(task); 653 put_task_struct(task);
654 return ret; 654 return ret;
655 } 655 }
656 656
657 static int proc_single_open(struct inode *inode, struct file *filp) 657 static int proc_single_open(struct inode *inode, struct file *filp)
658 { 658 {
659 return single_open(filp, proc_single_show, inode); 659 return single_open(filp, proc_single_show, inode);
660 } 660 }
661 661
662 static const struct file_operations proc_single_file_operations = { 662 static const struct file_operations proc_single_file_operations = {
663 .open = proc_single_open, 663 .open = proc_single_open,
664 .read = seq_read, 664 .read = seq_read,
665 .llseek = seq_lseek, 665 .llseek = seq_lseek,
666 .release = single_release, 666 .release = single_release,
667 }; 667 };
668 668
669 static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) 669 static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
670 { 670 {
671 struct task_struct *task = get_proc_task(file_inode(file)); 671 struct task_struct *task = get_proc_task(file_inode(file));
672 struct mm_struct *mm; 672 struct mm_struct *mm;
673 673
674 if (!task) 674 if (!task)
675 return -ESRCH; 675 return -ESRCH;
676 676
677 mm = mm_access(task, mode); 677 mm = mm_access(task, mode);
678 put_task_struct(task); 678 put_task_struct(task);
679 679
680 if (IS_ERR(mm)) 680 if (IS_ERR(mm))
681 return PTR_ERR(mm); 681 return PTR_ERR(mm);
682 682
683 if (mm) { 683 if (mm) {
684 /* ensure this mm_struct can't be freed */ 684 /* ensure this mm_struct can't be freed */
685 atomic_inc(&mm->mm_count); 685 atomic_inc(&mm->mm_count);
686 /* but do not pin its memory */ 686 /* but do not pin its memory */
687 mmput(mm); 687 mmput(mm);
688 } 688 }
689 689
690 file->private_data = mm; 690 file->private_data = mm;
691 691
692 return 0; 692 return 0;
693 } 693 }
694 694
695 static int mem_open(struct inode *inode, struct file *file) 695 static int mem_open(struct inode *inode, struct file *file)
696 { 696 {
697 int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH); 697 int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
698 698
699 /* OK to pass negative loff_t, we can catch out-of-range */ 699 /* OK to pass negative loff_t, we can catch out-of-range */
700 file->f_mode |= FMODE_UNSIGNED_OFFSET; 700 file->f_mode |= FMODE_UNSIGNED_OFFSET;
701 701
702 return ret; 702 return ret;
703 } 703 }
704 704
705 static ssize_t mem_rw(struct file *file, char __user *buf, 705 static ssize_t mem_rw(struct file *file, char __user *buf,
706 size_t count, loff_t *ppos, int write) 706 size_t count, loff_t *ppos, int write)
707 { 707 {
708 struct mm_struct *mm = file->private_data; 708 struct mm_struct *mm = file->private_data;
709 unsigned long addr = *ppos; 709 unsigned long addr = *ppos;
710 ssize_t copied; 710 ssize_t copied;
711 char *page; 711 char *page;
712 712
713 if (!mm) 713 if (!mm)
714 return 0; 714 return 0;
715 715
716 page = (char *)__get_free_page(GFP_TEMPORARY); 716 page = (char *)__get_free_page(GFP_TEMPORARY);
717 if (!page) 717 if (!page)
718 return -ENOMEM; 718 return -ENOMEM;
719 719
720 copied = 0; 720 copied = 0;
721 if (!atomic_inc_not_zero(&mm->mm_users)) 721 if (!atomic_inc_not_zero(&mm->mm_users))
722 goto free; 722 goto free;
723 723
724 while (count > 0) { 724 while (count > 0) {
725 int this_len = min_t(int, count, PAGE_SIZE); 725 int this_len = min_t(int, count, PAGE_SIZE);
726 726
727 if (write && copy_from_user(page, buf, this_len)) { 727 if (write && copy_from_user(page, buf, this_len)) {
728 copied = -EFAULT; 728 copied = -EFAULT;
729 break; 729 break;
730 } 730 }
731 731
732 this_len = access_remote_vm(mm, addr, page, this_len, write); 732 this_len = access_remote_vm(mm, addr, page, this_len, write);
733 if (!this_len) { 733 if (!this_len) {
734 if (!copied) 734 if (!copied)
735 copied = -EIO; 735 copied = -EIO;
736 break; 736 break;
737 } 737 }
738 738
739 if (!write && copy_to_user(buf, page, this_len)) { 739 if (!write && copy_to_user(buf, page, this_len)) {
740 copied = -EFAULT; 740 copied = -EFAULT;
741 break; 741 break;
742 } 742 }
743 743
744 buf += this_len; 744 buf += this_len;
745 addr += this_len; 745 addr += this_len;
746 copied += this_len; 746 copied += this_len;
747 count -= this_len; 747 count -= this_len;
748 } 748 }
749 *ppos = addr; 749 *ppos = addr;
750 750
751 mmput(mm); 751 mmput(mm);
752 free: 752 free:
753 free_page((unsigned long) page); 753 free_page((unsigned long) page);
754 return copied; 754 return copied;
755 } 755 }
756 756
757 static ssize_t mem_read(struct file *file, char __user *buf, 757 static ssize_t mem_read(struct file *file, char __user *buf,
758 size_t count, loff_t *ppos) 758 size_t count, loff_t *ppos)
759 { 759 {
760 return mem_rw(file, buf, count, ppos, 0); 760 return mem_rw(file, buf, count, ppos, 0);
761 } 761 }
762 762
763 static ssize_t mem_write(struct file *file, const char __user *buf, 763 static ssize_t mem_write(struct file *file, const char __user *buf,
764 size_t count, loff_t *ppos) 764 size_t count, loff_t *ppos)
765 { 765 {
766 return mem_rw(file, (char __user*)buf, count, ppos, 1); 766 return mem_rw(file, (char __user*)buf, count, ppos, 1);
767 } 767 }
768 768
769 loff_t mem_lseek(struct file *file, loff_t offset, int orig) 769 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
770 { 770 {
771 switch (orig) { 771 switch (orig) {
772 case 0: 772 case 0:
773 file->f_pos = offset; 773 file->f_pos = offset;
774 break; 774 break;
775 case 1: 775 case 1:
776 file->f_pos += offset; 776 file->f_pos += offset;
777 break; 777 break;
778 default: 778 default:
779 return -EINVAL; 779 return -EINVAL;
780 } 780 }
781 force_successful_syscall_return(); 781 force_successful_syscall_return();
782 return file->f_pos; 782 return file->f_pos;
783 } 783 }
784 784
785 static int mem_release(struct inode *inode, struct file *file) 785 static int mem_release(struct inode *inode, struct file *file)
786 { 786 {
787 struct mm_struct *mm = file->private_data; 787 struct mm_struct *mm = file->private_data;
788 if (mm) 788 if (mm)
789 mmdrop(mm); 789 mmdrop(mm);
790 return 0; 790 return 0;
791 } 791 }
792 792
793 static const struct file_operations proc_mem_operations = { 793 static const struct file_operations proc_mem_operations = {
794 .llseek = mem_lseek, 794 .llseek = mem_lseek,
795 .read = mem_read, 795 .read = mem_read,
796 .write = mem_write, 796 .write = mem_write,
797 .open = mem_open, 797 .open = mem_open,
798 .release = mem_release, 798 .release = mem_release,
799 }; 799 };
800 800
801 static int environ_open(struct inode *inode, struct file *file) 801 static int environ_open(struct inode *inode, struct file *file)
802 { 802 {
803 return __mem_open(inode, file, PTRACE_MODE_READ); 803 return __mem_open(inode, file, PTRACE_MODE_READ);
804 } 804 }
805 805
806 static ssize_t environ_read(struct file *file, char __user *buf, 806 static ssize_t environ_read(struct file *file, char __user *buf,
807 size_t count, loff_t *ppos) 807 size_t count, loff_t *ppos)
808 { 808 {
809 char *page; 809 char *page;
810 unsigned long src = *ppos; 810 unsigned long src = *ppos;
811 int ret = 0; 811 int ret = 0;
812 struct mm_struct *mm = file->private_data; 812 struct mm_struct *mm = file->private_data;
813 813
814 if (!mm) 814 if (!mm)
815 return 0; 815 return 0;
816 816
817 page = (char *)__get_free_page(GFP_TEMPORARY); 817 page = (char *)__get_free_page(GFP_TEMPORARY);
818 if (!page) 818 if (!page)
819 return -ENOMEM; 819 return -ENOMEM;
820 820
821 ret = 0; 821 ret = 0;
822 if (!atomic_inc_not_zero(&mm->mm_users)) 822 if (!atomic_inc_not_zero(&mm->mm_users))
823 goto free; 823 goto free;
824 while (count > 0) { 824 while (count > 0) {
825 size_t this_len, max_len; 825 size_t this_len, max_len;
826 int retval; 826 int retval;
827 827
828 if (src >= (mm->env_end - mm->env_start)) 828 if (src >= (mm->env_end - mm->env_start))
829 break; 829 break;
830 830
831 this_len = mm->env_end - (mm->env_start + src); 831 this_len = mm->env_end - (mm->env_start + src);
832 832
833 max_len = min_t(size_t, PAGE_SIZE, count); 833 max_len = min_t(size_t, PAGE_SIZE, count);
834 this_len = min(max_len, this_len); 834 this_len = min(max_len, this_len);
835 835
836 retval = access_remote_vm(mm, (mm->env_start + src), 836 retval = access_remote_vm(mm, (mm->env_start + src),
837 page, this_len, 0); 837 page, this_len, 0);
838 838
839 if (retval <= 0) { 839 if (retval <= 0) {
840 ret = retval; 840 ret = retval;
841 break; 841 break;
842 } 842 }
843 843
844 if (copy_to_user(buf, page, retval)) { 844 if (copy_to_user(buf, page, retval)) {
845 ret = -EFAULT; 845 ret = -EFAULT;
846 break; 846 break;
847 } 847 }
848 848
849 ret += retval; 849 ret += retval;
850 src += retval; 850 src += retval;
851 buf += retval; 851 buf += retval;
852 count -= retval; 852 count -= retval;
853 } 853 }
854 *ppos = src; 854 *ppos = src;
855 mmput(mm); 855 mmput(mm);
856 856
857 free: 857 free:
858 free_page((unsigned long) page); 858 free_page((unsigned long) page);
859 return ret; 859 return ret;
860 } 860 }
861 861
862 static const struct file_operations proc_environ_operations = { 862 static const struct file_operations proc_environ_operations = {
863 .open = environ_open, 863 .open = environ_open,
864 .read = environ_read, 864 .read = environ_read,
865 .llseek = generic_file_llseek, 865 .llseek = generic_file_llseek,
866 .release = mem_release, 866 .release = mem_release,
867 }; 867 };
868 868
869 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, 869 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
870 loff_t *ppos) 870 loff_t *ppos)
871 { 871 {
872 struct task_struct *task = get_proc_task(file_inode(file)); 872 struct task_struct *task = get_proc_task(file_inode(file));
873 char buffer[PROC_NUMBUF]; 873 char buffer[PROC_NUMBUF];
874 int oom_adj = OOM_ADJUST_MIN; 874 int oom_adj = OOM_ADJUST_MIN;
875 size_t len; 875 size_t len;
876 unsigned long flags; 876 unsigned long flags;
877 877
878 if (!task) 878 if (!task)
879 return -ESRCH; 879 return -ESRCH;
880 if (lock_task_sighand(task, &flags)) { 880 if (lock_task_sighand(task, &flags)) {
881 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) 881 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
882 oom_adj = OOM_ADJUST_MAX; 882 oom_adj = OOM_ADJUST_MAX;
883 else 883 else
884 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / 884 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
885 OOM_SCORE_ADJ_MAX; 885 OOM_SCORE_ADJ_MAX;
886 unlock_task_sighand(task, &flags); 886 unlock_task_sighand(task, &flags);
887 } 887 }
888 put_task_struct(task); 888 put_task_struct(task);
889 len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); 889 len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
890 return simple_read_from_buffer(buf, count, ppos, buffer, len); 890 return simple_read_from_buffer(buf, count, ppos, buffer, len);
891 } 891 }
892 892
893 static ssize_t oom_adj_write(struct file *file, const char __user *buf, 893 static ssize_t oom_adj_write(struct file *file, const char __user *buf,
894 size_t count, loff_t *ppos) 894 size_t count, loff_t *ppos)
895 { 895 {
896 struct task_struct *task; 896 struct task_struct *task;
897 char buffer[PROC_NUMBUF]; 897 char buffer[PROC_NUMBUF];
898 int oom_adj; 898 int oom_adj;
899 unsigned long flags; 899 unsigned long flags;
900 int err; 900 int err;
901 901
902 memset(buffer, 0, sizeof(buffer)); 902 memset(buffer, 0, sizeof(buffer));
903 if (count > sizeof(buffer) - 1) 903 if (count > sizeof(buffer) - 1)
904 count = sizeof(buffer) - 1; 904 count = sizeof(buffer) - 1;
905 if (copy_from_user(buffer, buf, count)) { 905 if (copy_from_user(buffer, buf, count)) {
906 err = -EFAULT; 906 err = -EFAULT;
907 goto out; 907 goto out;
908 } 908 }
909 909
910 err = kstrtoint(strstrip(buffer), 0, &oom_adj); 910 err = kstrtoint(strstrip(buffer), 0, &oom_adj);
911 if (err) 911 if (err)
912 goto out; 912 goto out;
913 if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && 913 if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
914 oom_adj != OOM_DISABLE) { 914 oom_adj != OOM_DISABLE) {
915 err = -EINVAL; 915 err = -EINVAL;
916 goto out; 916 goto out;
917 } 917 }
918 918
919 task = get_proc_task(file_inode(file)); 919 task = get_proc_task(file_inode(file));
920 if (!task) { 920 if (!task) {
921 err = -ESRCH; 921 err = -ESRCH;
922 goto out; 922 goto out;
923 } 923 }
924 924
925 task_lock(task); 925 task_lock(task);
926 if (!task->mm) { 926 if (!task->mm) {
927 err = -EINVAL; 927 err = -EINVAL;
928 goto err_task_lock; 928 goto err_task_lock;
929 } 929 }
930 930
931 if (!lock_task_sighand(task, &flags)) { 931 if (!lock_task_sighand(task, &flags)) {
932 err = -ESRCH; 932 err = -ESRCH;
933 goto err_task_lock; 933 goto err_task_lock;
934 } 934 }
935 935
936 /* 936 /*
937 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum 937 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
938 * value is always attainable. 938 * value is always attainable.
939 */ 939 */
940 if (oom_adj == OOM_ADJUST_MAX) 940 if (oom_adj == OOM_ADJUST_MAX)
941 oom_adj = OOM_SCORE_ADJ_MAX; 941 oom_adj = OOM_SCORE_ADJ_MAX;
942 else 942 else
943 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; 943 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
944 944
945 if (oom_adj < task->signal->oom_score_adj && 945 if (oom_adj < task->signal->oom_score_adj &&
946 !capable(CAP_SYS_RESOURCE)) { 946 !capable(CAP_SYS_RESOURCE)) {
947 err = -EACCES; 947 err = -EACCES;
948 goto err_sighand; 948 goto err_sighand;
949 } 949 }
950 950
951 /* 951 /*
952 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use 952 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
953 * /proc/pid/oom_score_adj instead. 953 * /proc/pid/oom_score_adj instead.
954 */ 954 */
955 printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", 955 printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
956 current->comm, task_pid_nr(current), task_pid_nr(task), 956 current->comm, task_pid_nr(current), task_pid_nr(task),
957 task_pid_nr(task)); 957 task_pid_nr(task));
958 958
959 task->signal->oom_score_adj = oom_adj; 959 task->signal->oom_score_adj = oom_adj;
960 trace_oom_score_adj_update(task); 960 trace_oom_score_adj_update(task);
961 err_sighand: 961 err_sighand:
962 unlock_task_sighand(task, &flags); 962 unlock_task_sighand(task, &flags);
963 err_task_lock: 963 err_task_lock:
964 task_unlock(task); 964 task_unlock(task);
965 put_task_struct(task); 965 put_task_struct(task);
966 out: 966 out:
967 return err < 0 ? err : count; 967 return err < 0 ? err : count;
968 } 968 }
969 969
970 static const struct file_operations proc_oom_adj_operations = { 970 static const struct file_operations proc_oom_adj_operations = {
971 .read = oom_adj_read, 971 .read = oom_adj_read,
972 .write = oom_adj_write, 972 .write = oom_adj_write,
973 .llseek = generic_file_llseek, 973 .llseek = generic_file_llseek,
974 }; 974 };
975 975
976 static ssize_t oom_score_adj_read(struct file *file, char __user *buf, 976 static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
977 size_t count, loff_t *ppos) 977 size_t count, loff_t *ppos)
978 { 978 {
979 struct task_struct *task = get_proc_task(file_inode(file)); 979 struct task_struct *task = get_proc_task(file_inode(file));
980 char buffer[PROC_NUMBUF]; 980 char buffer[PROC_NUMBUF];
981 short oom_score_adj = OOM_SCORE_ADJ_MIN; 981 short oom_score_adj = OOM_SCORE_ADJ_MIN;
982 unsigned long flags; 982 unsigned long flags;
983 size_t len; 983 size_t len;
984 984
985 if (!task) 985 if (!task)
986 return -ESRCH; 986 return -ESRCH;
987 if (lock_task_sighand(task, &flags)) { 987 if (lock_task_sighand(task, &flags)) {
988 oom_score_adj = task->signal->oom_score_adj; 988 oom_score_adj = task->signal->oom_score_adj;
989 unlock_task_sighand(task, &flags); 989 unlock_task_sighand(task, &flags);
990 } 990 }
991 put_task_struct(task); 991 put_task_struct(task);
992 len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj); 992 len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj);
993 return simple_read_from_buffer(buf, count, ppos, buffer, len); 993 return simple_read_from_buffer(buf, count, ppos, buffer, len);
994 } 994 }
995 995
996 static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, 996 static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
997 size_t count, loff_t *ppos) 997 size_t count, loff_t *ppos)
998 { 998 {
999 struct task_struct *task; 999 struct task_struct *task;
1000 char buffer[PROC_NUMBUF]; 1000 char buffer[PROC_NUMBUF];
1001 unsigned long flags; 1001 unsigned long flags;
1002 int oom_score_adj; 1002 int oom_score_adj;
1003 int err; 1003 int err;
1004 1004
1005 memset(buffer, 0, sizeof(buffer)); 1005 memset(buffer, 0, sizeof(buffer));
1006 if (count > sizeof(buffer) - 1) 1006 if (count > sizeof(buffer) - 1)
1007 count = sizeof(buffer) - 1; 1007 count = sizeof(buffer) - 1;
1008 if (copy_from_user(buffer, buf, count)) { 1008 if (copy_from_user(buffer, buf, count)) {
1009 err = -EFAULT; 1009 err = -EFAULT;
1010 goto out; 1010 goto out;
1011 } 1011 }
1012 1012
1013 err = kstrtoint(strstrip(buffer), 0, &oom_score_adj); 1013 err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1014 if (err) 1014 if (err)
1015 goto out; 1015 goto out;
1016 if (oom_score_adj < OOM_SCORE_ADJ_MIN || 1016 if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
1017 oom_score_adj > OOM_SCORE_ADJ_MAX) { 1017 oom_score_adj > OOM_SCORE_ADJ_MAX) {
1018 err = -EINVAL; 1018 err = -EINVAL;
1019 goto out; 1019 goto out;
1020 } 1020 }
1021 1021
1022 task = get_proc_task(file_inode(file)); 1022 task = get_proc_task(file_inode(file));
1023 if (!task) { 1023 if (!task) {
1024 err = -ESRCH; 1024 err = -ESRCH;
1025 goto out; 1025 goto out;
1026 } 1026 }
1027 1027
1028 task_lock(task); 1028 task_lock(task);
1029 if (!task->mm) { 1029 if (!task->mm) {
1030 err = -EINVAL; 1030 err = -EINVAL;
1031 goto err_task_lock; 1031 goto err_task_lock;
1032 } 1032 }
1033 1033
1034 if (!lock_task_sighand(task, &flags)) { 1034 if (!lock_task_sighand(task, &flags)) {
1035 err = -ESRCH; 1035 err = -ESRCH;
1036 goto err_task_lock; 1036 goto err_task_lock;
1037 } 1037 }
1038 1038
1039 if ((short)oom_score_adj < task->signal->oom_score_adj_min && 1039 if ((short)oom_score_adj < task->signal->oom_score_adj_min &&
1040 !capable(CAP_SYS_RESOURCE)) { 1040 !capable(CAP_SYS_RESOURCE)) {
1041 err = -EACCES; 1041 err = -EACCES;
1042 goto err_sighand; 1042 goto err_sighand;
1043 } 1043 }
1044 1044
1045 task->signal->oom_score_adj = (short)oom_score_adj; 1045 task->signal->oom_score_adj = (short)oom_score_adj;
1046 if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) 1046 if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
1047 task->signal->oom_score_adj_min = (short)oom_score_adj; 1047 task->signal->oom_score_adj_min = (short)oom_score_adj;
1048 trace_oom_score_adj_update(task); 1048 trace_oom_score_adj_update(task);
1049 1049
1050 err_sighand: 1050 err_sighand:
1051 unlock_task_sighand(task, &flags); 1051 unlock_task_sighand(task, &flags);
1052 err_task_lock: 1052 err_task_lock:
1053 task_unlock(task); 1053 task_unlock(task);
1054 put_task_struct(task); 1054 put_task_struct(task);
1055 out: 1055 out:
1056 return err < 0 ? err : count; 1056 return err < 0 ? err : count;
1057 } 1057 }
1058 1058
1059 static const struct file_operations proc_oom_score_adj_operations = { 1059 static const struct file_operations proc_oom_score_adj_operations = {
1060 .read = oom_score_adj_read, 1060 .read = oom_score_adj_read,
1061 .write = oom_score_adj_write, 1061 .write = oom_score_adj_write,
1062 .llseek = default_llseek, 1062 .llseek = default_llseek,
1063 }; 1063 };
1064 1064
1065 #ifdef CONFIG_AUDITSYSCALL 1065 #ifdef CONFIG_AUDITSYSCALL
1066 #define TMPBUFLEN 21 1066 #define TMPBUFLEN 21
1067 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 1067 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
1068 size_t count, loff_t *ppos) 1068 size_t count, loff_t *ppos)
1069 { 1069 {
1070 struct inode * inode = file_inode(file); 1070 struct inode * inode = file_inode(file);
1071 struct task_struct *task = get_proc_task(inode); 1071 struct task_struct *task = get_proc_task(inode);
1072 ssize_t length; 1072 ssize_t length;
1073 char tmpbuf[TMPBUFLEN]; 1073 char tmpbuf[TMPBUFLEN];
1074 1074
1075 if (!task) 1075 if (!task)
1076 return -ESRCH; 1076 return -ESRCH;
1077 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1077 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1078 from_kuid(file->f_cred->user_ns, 1078 from_kuid(file->f_cred->user_ns,
1079 audit_get_loginuid(task))); 1079 audit_get_loginuid(task)));
1080 put_task_struct(task); 1080 put_task_struct(task);
1081 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1081 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1082 } 1082 }
1083 1083
1084 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, 1084 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1085 size_t count, loff_t *ppos) 1085 size_t count, loff_t *ppos)
1086 { 1086 {
1087 struct inode * inode = file_inode(file); 1087 struct inode * inode = file_inode(file);
1088 char *page, *tmp; 1088 char *page, *tmp;
1089 ssize_t length; 1089 ssize_t length;
1090 uid_t loginuid; 1090 uid_t loginuid;
1091 kuid_t kloginuid; 1091 kuid_t kloginuid;
1092 1092
1093 rcu_read_lock(); 1093 rcu_read_lock();
1094 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { 1094 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1095 rcu_read_unlock(); 1095 rcu_read_unlock();
1096 return -EPERM; 1096 return -EPERM;
1097 } 1097 }
1098 rcu_read_unlock(); 1098 rcu_read_unlock();
1099 1099
1100 if (count >= PAGE_SIZE) 1100 if (count >= PAGE_SIZE)
1101 count = PAGE_SIZE - 1; 1101 count = PAGE_SIZE - 1;
1102 1102
1103 if (*ppos != 0) { 1103 if (*ppos != 0) {
1104 /* No partial writes. */ 1104 /* No partial writes. */
1105 return -EINVAL; 1105 return -EINVAL;
1106 } 1106 }
1107 page = (char*)__get_free_page(GFP_TEMPORARY); 1107 page = (char*)__get_free_page(GFP_TEMPORARY);
1108 if (!page) 1108 if (!page)
1109 return -ENOMEM; 1109 return -ENOMEM;
1110 length = -EFAULT; 1110 length = -EFAULT;
1111 if (copy_from_user(page, buf, count)) 1111 if (copy_from_user(page, buf, count))
1112 goto out_free_page; 1112 goto out_free_page;
1113 1113
1114 page[count] = '\0'; 1114 page[count] = '\0';
1115 loginuid = simple_strtoul(page, &tmp, 10); 1115 loginuid = simple_strtoul(page, &tmp, 10);
1116 if (tmp == page) { 1116 if (tmp == page) {
1117 length = -EINVAL; 1117 length = -EINVAL;
1118 goto out_free_page; 1118 goto out_free_page;
1119 1119
1120 } 1120 }
1121 kloginuid = make_kuid(file->f_cred->user_ns, loginuid); 1121 kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
1122 if (!uid_valid(kloginuid)) { 1122 if (!uid_valid(kloginuid)) {
1123 length = -EINVAL; 1123 length = -EINVAL;
1124 goto out_free_page; 1124 goto out_free_page;
1125 } 1125 }
1126 1126
1127 length = audit_set_loginuid(kloginuid); 1127 length = audit_set_loginuid(kloginuid);
1128 if (likely(length == 0)) 1128 if (likely(length == 0))
1129 length = count; 1129 length = count;
1130 1130
1131 out_free_page: 1131 out_free_page:
1132 free_page((unsigned long) page); 1132 free_page((unsigned long) page);
1133 return length; 1133 return length;
1134 } 1134 }
1135 1135
1136 static const struct file_operations proc_loginuid_operations = { 1136 static const struct file_operations proc_loginuid_operations = {
1137 .read = proc_loginuid_read, 1137 .read = proc_loginuid_read,
1138 .write = proc_loginuid_write, 1138 .write = proc_loginuid_write,
1139 .llseek = generic_file_llseek, 1139 .llseek = generic_file_llseek,
1140 }; 1140 };
1141 1141
1142 static ssize_t proc_sessionid_read(struct file * file, char __user * buf, 1142 static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1143 size_t count, loff_t *ppos) 1143 size_t count, loff_t *ppos)
1144 { 1144 {
1145 struct inode * inode = file_inode(file); 1145 struct inode * inode = file_inode(file);
1146 struct task_struct *task = get_proc_task(inode); 1146 struct task_struct *task = get_proc_task(inode);
1147 ssize_t length; 1147 ssize_t length;
1148 char tmpbuf[TMPBUFLEN]; 1148 char tmpbuf[TMPBUFLEN];
1149 1149
1150 if (!task) 1150 if (!task)
1151 return -ESRCH; 1151 return -ESRCH;
1152 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1152 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1153 audit_get_sessionid(task)); 1153 audit_get_sessionid(task));
1154 put_task_struct(task); 1154 put_task_struct(task);
1155 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1155 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1156 } 1156 }
1157 1157
1158 static const struct file_operations proc_sessionid_operations = { 1158 static const struct file_operations proc_sessionid_operations = {
1159 .read = proc_sessionid_read, 1159 .read = proc_sessionid_read,
1160 .llseek = generic_file_llseek, 1160 .llseek = generic_file_llseek,
1161 }; 1161 };
1162 #endif 1162 #endif
1163 1163
1164 #ifdef CONFIG_FAULT_INJECTION 1164 #ifdef CONFIG_FAULT_INJECTION
1165 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, 1165 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
1166 size_t count, loff_t *ppos) 1166 size_t count, loff_t *ppos)
1167 { 1167 {
1168 struct task_struct *task = get_proc_task(file_inode(file)); 1168 struct task_struct *task = get_proc_task(file_inode(file));
1169 char buffer[PROC_NUMBUF]; 1169 char buffer[PROC_NUMBUF];
1170 size_t len; 1170 size_t len;
1171 int make_it_fail; 1171 int make_it_fail;
1172 1172
1173 if (!task) 1173 if (!task)
1174 return -ESRCH; 1174 return -ESRCH;
1175 make_it_fail = task->make_it_fail; 1175 make_it_fail = task->make_it_fail;
1176 put_task_struct(task); 1176 put_task_struct(task);
1177 1177
1178 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); 1178 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
1179 1179
1180 return simple_read_from_buffer(buf, count, ppos, buffer, len); 1180 return simple_read_from_buffer(buf, count, ppos, buffer, len);
1181 } 1181 }
1182 1182
1183 static ssize_t proc_fault_inject_write(struct file * file, 1183 static ssize_t proc_fault_inject_write(struct file * file,
1184 const char __user * buf, size_t count, loff_t *ppos) 1184 const char __user * buf, size_t count, loff_t *ppos)
1185 { 1185 {
1186 struct task_struct *task; 1186 struct task_struct *task;
1187 char buffer[PROC_NUMBUF], *end; 1187 char buffer[PROC_NUMBUF], *end;
1188 int make_it_fail; 1188 int make_it_fail;
1189 1189
1190 if (!capable(CAP_SYS_RESOURCE)) 1190 if (!capable(CAP_SYS_RESOURCE))
1191 return -EPERM; 1191 return -EPERM;
1192 memset(buffer, 0, sizeof(buffer)); 1192 memset(buffer, 0, sizeof(buffer));
1193 if (count > sizeof(buffer) - 1) 1193 if (count > sizeof(buffer) - 1)
1194 count = sizeof(buffer) - 1; 1194 count = sizeof(buffer) - 1;
1195 if (copy_from_user(buffer, buf, count)) 1195 if (copy_from_user(buffer, buf, count))
1196 return -EFAULT; 1196 return -EFAULT;
1197 make_it_fail = simple_strtol(strstrip(buffer), &end, 0); 1197 make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
1198 if (*end) 1198 if (*end)
1199 return -EINVAL; 1199 return -EINVAL;
1200 task = get_proc_task(file_inode(file)); 1200 task = get_proc_task(file_inode(file));
1201 if (!task) 1201 if (!task)
1202 return -ESRCH; 1202 return -ESRCH;
1203 task->make_it_fail = make_it_fail; 1203 task->make_it_fail = make_it_fail;
1204 put_task_struct(task); 1204 put_task_struct(task);
1205 1205
1206 return count; 1206 return count;
1207 } 1207 }
1208 1208
1209 static const struct file_operations proc_fault_inject_operations = { 1209 static const struct file_operations proc_fault_inject_operations = {
1210 .read = proc_fault_inject_read, 1210 .read = proc_fault_inject_read,
1211 .write = proc_fault_inject_write, 1211 .write = proc_fault_inject_write,
1212 .llseek = generic_file_llseek, 1212 .llseek = generic_file_llseek,
1213 }; 1213 };
1214 #endif 1214 #endif
1215 1215
1216 1216
1217 #ifdef CONFIG_SCHED_DEBUG 1217 #ifdef CONFIG_SCHED_DEBUG
1218 /* 1218 /*
1219 * Print out various scheduling related per-task fields: 1219 * Print out various scheduling related per-task fields:
1220 */ 1220 */
1221 static int sched_show(struct seq_file *m, void *v) 1221 static int sched_show(struct seq_file *m, void *v)
1222 { 1222 {
1223 struct inode *inode = m->private; 1223 struct inode *inode = m->private;
1224 struct task_struct *p; 1224 struct task_struct *p;
1225 1225
1226 p = get_proc_task(inode); 1226 p = get_proc_task(inode);
1227 if (!p) 1227 if (!p)
1228 return -ESRCH; 1228 return -ESRCH;
1229 proc_sched_show_task(p, m); 1229 proc_sched_show_task(p, m);
1230 1230
1231 put_task_struct(p); 1231 put_task_struct(p);
1232 1232
1233 return 0; 1233 return 0;
1234 } 1234 }
1235 1235
1236 static ssize_t 1236 static ssize_t
1237 sched_write(struct file *file, const char __user *buf, 1237 sched_write(struct file *file, const char __user *buf,
1238 size_t count, loff_t *offset) 1238 size_t count, loff_t *offset)
1239 { 1239 {
1240 struct inode *inode = file_inode(file); 1240 struct inode *inode = file_inode(file);
1241 struct task_struct *p; 1241 struct task_struct *p;
1242 1242
1243 p = get_proc_task(inode); 1243 p = get_proc_task(inode);
1244 if (!p) 1244 if (!p)
1245 return -ESRCH; 1245 return -ESRCH;
1246 proc_sched_set_task(p); 1246 proc_sched_set_task(p);
1247 1247
1248 put_task_struct(p); 1248 put_task_struct(p);
1249 1249
1250 return count; 1250 return count;
1251 } 1251 }
1252 1252
1253 static int sched_open(struct inode *inode, struct file *filp) 1253 static int sched_open(struct inode *inode, struct file *filp)
1254 { 1254 {
1255 return single_open(filp, sched_show, inode); 1255 return single_open(filp, sched_show, inode);
1256 } 1256 }
1257 1257
1258 static const struct file_operations proc_pid_sched_operations = { 1258 static const struct file_operations proc_pid_sched_operations = {
1259 .open = sched_open, 1259 .open = sched_open,
1260 .read = seq_read, 1260 .read = seq_read,
1261 .write = sched_write, 1261 .write = sched_write,
1262 .llseek = seq_lseek, 1262 .llseek = seq_lseek,
1263 .release = single_release, 1263 .release = single_release,
1264 }; 1264 };
1265 1265
1266 #endif 1266 #endif
1267 1267
1268 #ifdef CONFIG_SCHED_AUTOGROUP 1268 #ifdef CONFIG_SCHED_AUTOGROUP
1269 /* 1269 /*
1270 * Print out autogroup related information: 1270 * Print out autogroup related information:
1271 */ 1271 */
1272 static int sched_autogroup_show(struct seq_file *m, void *v) 1272 static int sched_autogroup_show(struct seq_file *m, void *v)
1273 { 1273 {
1274 struct inode *inode = m->private; 1274 struct inode *inode = m->private;
1275 struct task_struct *p; 1275 struct task_struct *p;
1276 1276
1277 p = get_proc_task(inode); 1277 p = get_proc_task(inode);
1278 if (!p) 1278 if (!p)
1279 return -ESRCH; 1279 return -ESRCH;
1280 proc_sched_autogroup_show_task(p, m); 1280 proc_sched_autogroup_show_task(p, m);
1281 1281
1282 put_task_struct(p); 1282 put_task_struct(p);
1283 1283
1284 return 0; 1284 return 0;
1285 } 1285 }
1286 1286
1287 static ssize_t 1287 static ssize_t
1288 sched_autogroup_write(struct file *file, const char __user *buf, 1288 sched_autogroup_write(struct file *file, const char __user *buf,
1289 size_t count, loff_t *offset) 1289 size_t count, loff_t *offset)
1290 { 1290 {
1291 struct inode *inode = file_inode(file); 1291 struct inode *inode = file_inode(file);
1292 struct task_struct *p; 1292 struct task_struct *p;
1293 char buffer[PROC_NUMBUF]; 1293 char buffer[PROC_NUMBUF];
1294 int nice; 1294 int nice;
1295 int err; 1295 int err;
1296 1296
1297 memset(buffer, 0, sizeof(buffer)); 1297 memset(buffer, 0, sizeof(buffer));
1298 if (count > sizeof(buffer) - 1) 1298 if (count > sizeof(buffer) - 1)
1299 count = sizeof(buffer) - 1; 1299 count = sizeof(buffer) - 1;
1300 if (copy_from_user(buffer, buf, count)) 1300 if (copy_from_user(buffer, buf, count))
1301 return -EFAULT; 1301 return -EFAULT;
1302 1302
1303 err = kstrtoint(strstrip(buffer), 0, &nice); 1303 err = kstrtoint(strstrip(buffer), 0, &nice);
1304 if (err < 0) 1304 if (err < 0)
1305 return err; 1305 return err;
1306 1306
1307 p = get_proc_task(inode); 1307 p = get_proc_task(inode);
1308 if (!p) 1308 if (!p)
1309 return -ESRCH; 1309 return -ESRCH;
1310 1310
1311 err = proc_sched_autogroup_set_nice(p, nice); 1311 err = proc_sched_autogroup_set_nice(p, nice);
1312 if (err) 1312 if (err)
1313 count = err; 1313 count = err;
1314 1314
1315 put_task_struct(p); 1315 put_task_struct(p);
1316 1316
1317 return count; 1317 return count;
1318 } 1318 }
1319 1319
1320 static int sched_autogroup_open(struct inode *inode, struct file *filp) 1320 static int sched_autogroup_open(struct inode *inode, struct file *filp)
1321 { 1321 {
1322 int ret; 1322 int ret;
1323 1323
1324 ret = single_open(filp, sched_autogroup_show, NULL); 1324 ret = single_open(filp, sched_autogroup_show, NULL);
1325 if (!ret) { 1325 if (!ret) {
1326 struct seq_file *m = filp->private_data; 1326 struct seq_file *m = filp->private_data;
1327 1327
1328 m->private = inode; 1328 m->private = inode;
1329 } 1329 }
1330 return ret; 1330 return ret;
1331 } 1331 }
1332 1332
1333 static const struct file_operations proc_pid_sched_autogroup_operations = { 1333 static const struct file_operations proc_pid_sched_autogroup_operations = {
1334 .open = sched_autogroup_open, 1334 .open = sched_autogroup_open,
1335 .read = seq_read, 1335 .read = seq_read,
1336 .write = sched_autogroup_write, 1336 .write = sched_autogroup_write,
1337 .llseek = seq_lseek, 1337 .llseek = seq_lseek,
1338 .release = single_release, 1338 .release = single_release,
1339 }; 1339 };
1340 1340
1341 #endif /* CONFIG_SCHED_AUTOGROUP */ 1341 #endif /* CONFIG_SCHED_AUTOGROUP */
1342 1342
1343 static ssize_t comm_write(struct file *file, const char __user *buf, 1343 static ssize_t comm_write(struct file *file, const char __user *buf,
1344 size_t count, loff_t *offset) 1344 size_t count, loff_t *offset)
1345 { 1345 {
1346 struct inode *inode = file_inode(file); 1346 struct inode *inode = file_inode(file);
1347 struct task_struct *p; 1347 struct task_struct *p;
1348 char buffer[TASK_COMM_LEN]; 1348 char buffer[TASK_COMM_LEN];
1349 1349
1350 memset(buffer, 0, sizeof(buffer)); 1350 memset(buffer, 0, sizeof(buffer));
1351 if (count > sizeof(buffer) - 1) 1351 if (count > sizeof(buffer) - 1)
1352 count = sizeof(buffer) - 1; 1352 count = sizeof(buffer) - 1;
1353 if (copy_from_user(buffer, buf, count)) 1353 if (copy_from_user(buffer, buf, count))
1354 return -EFAULT; 1354 return -EFAULT;
1355 1355
1356 p = get_proc_task(inode); 1356 p = get_proc_task(inode);
1357 if (!p) 1357 if (!p)
1358 return -ESRCH; 1358 return -ESRCH;
1359 1359
1360 if (same_thread_group(current, p)) 1360 if (same_thread_group(current, p))
1361 set_task_comm(p, buffer); 1361 set_task_comm(p, buffer);
1362 else 1362 else
1363 count = -EINVAL; 1363 count = -EINVAL;
1364 1364
1365 put_task_struct(p); 1365 put_task_struct(p);
1366 1366
1367 return count; 1367 return count;
1368 } 1368 }
1369 1369
1370 static int comm_show(struct seq_file *m, void *v) 1370 static int comm_show(struct seq_file *m, void *v)
1371 { 1371 {
1372 struct inode *inode = m->private; 1372 struct inode *inode = m->private;
1373 struct task_struct *p; 1373 struct task_struct *p;
1374 1374
1375 p = get_proc_task(inode); 1375 p = get_proc_task(inode);
1376 if (!p) 1376 if (!p)
1377 return -ESRCH; 1377 return -ESRCH;
1378 1378
1379 task_lock(p); 1379 task_lock(p);
1380 seq_printf(m, "%s\n", p->comm); 1380 seq_printf(m, "%s\n", p->comm);
1381 task_unlock(p); 1381 task_unlock(p);
1382 1382
1383 put_task_struct(p); 1383 put_task_struct(p);
1384 1384
1385 return 0; 1385 return 0;
1386 } 1386 }
1387 1387
1388 static int comm_open(struct inode *inode, struct file *filp) 1388 static int comm_open(struct inode *inode, struct file *filp)
1389 { 1389 {
1390 return single_open(filp, comm_show, inode); 1390 return single_open(filp, comm_show, inode);
1391 } 1391 }
1392 1392
1393 static const struct file_operations proc_pid_set_comm_operations = { 1393 static const struct file_operations proc_pid_set_comm_operations = {
1394 .open = comm_open, 1394 .open = comm_open,
1395 .read = seq_read, 1395 .read = seq_read,
1396 .write = comm_write, 1396 .write = comm_write,
1397 .llseek = seq_lseek, 1397 .llseek = seq_lseek,
1398 .release = single_release, 1398 .release = single_release,
1399 }; 1399 };
1400 1400
1401 static int proc_exe_link(struct dentry *dentry, struct path *exe_path) 1401 static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1402 { 1402 {
1403 struct task_struct *task; 1403 struct task_struct *task;
1404 struct mm_struct *mm; 1404 struct mm_struct *mm;
1405 struct file *exe_file; 1405 struct file *exe_file;
1406 1406
1407 task = get_proc_task(dentry->d_inode); 1407 task = get_proc_task(dentry->d_inode);
1408 if (!task) 1408 if (!task)
1409 return -ENOENT; 1409 return -ENOENT;
1410 mm = get_task_mm(task); 1410 mm = get_task_mm(task);
1411 put_task_struct(task); 1411 put_task_struct(task);
1412 if (!mm) 1412 if (!mm)
1413 return -ENOENT; 1413 return -ENOENT;
1414 exe_file = get_mm_exe_file(mm); 1414 exe_file = get_mm_exe_file(mm);
1415 mmput(mm); 1415 mmput(mm);
1416 if (exe_file) { 1416 if (exe_file) {
1417 *exe_path = exe_file->f_path; 1417 *exe_path = exe_file->f_path;
1418 path_get(&exe_file->f_path); 1418 path_get(&exe_file->f_path);
1419 fput(exe_file); 1419 fput(exe_file);
1420 return 0; 1420 return 0;
1421 } else 1421 } else
1422 return -ENOENT; 1422 return -ENOENT;
1423 } 1423 }
1424 1424
1425 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) 1425 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
1426 { 1426 {
1427 struct inode *inode = dentry->d_inode; 1427 struct inode *inode = dentry->d_inode;
1428 struct path path; 1428 struct path path;
1429 int error = -EACCES; 1429 int error = -EACCES;
1430 1430
1431 /* Are we allowed to snoop on the tasks file descriptors? */ 1431 /* Are we allowed to snoop on the tasks file descriptors? */
1432 if (!proc_fd_access_allowed(inode)) 1432 if (!proc_fd_access_allowed(inode))
1433 goto out; 1433 goto out;
1434 1434
1435 error = PROC_I(inode)->op.proc_get_link(dentry, &path); 1435 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1436 if (error) 1436 if (error)
1437 goto out; 1437 goto out;
1438 1438
1439 nd_jump_link(nd, &path); 1439 nd_jump_link(nd, &path);
1440 return NULL; 1440 return NULL;
1441 out: 1441 out:
1442 return ERR_PTR(error); 1442 return ERR_PTR(error);
1443 } 1443 }
1444 1444
1445 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) 1445 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1446 { 1446 {
1447 char *tmp = (char*)__get_free_page(GFP_TEMPORARY); 1447 char *tmp = (char*)__get_free_page(GFP_TEMPORARY);
1448 char *pathname; 1448 char *pathname;
1449 int len; 1449 int len;
1450 1450
1451 if (!tmp) 1451 if (!tmp)
1452 return -ENOMEM; 1452 return -ENOMEM;
1453 1453
1454 pathname = d_path(path, tmp, PAGE_SIZE); 1454 pathname = d_path(path, tmp, PAGE_SIZE);
1455 len = PTR_ERR(pathname); 1455 len = PTR_ERR(pathname);
1456 if (IS_ERR(pathname)) 1456 if (IS_ERR(pathname))
1457 goto out; 1457 goto out;
1458 len = tmp + PAGE_SIZE - 1 - pathname; 1458 len = tmp + PAGE_SIZE - 1 - pathname;
1459 1459
1460 if (len > buflen) 1460 if (len > buflen)
1461 len = buflen; 1461 len = buflen;
1462 if (copy_to_user(buffer, pathname, len)) 1462 if (copy_to_user(buffer, pathname, len))
1463 len = -EFAULT; 1463 len = -EFAULT;
1464 out: 1464 out:
1465 free_page((unsigned long)tmp); 1465 free_page((unsigned long)tmp);
1466 return len; 1466 return len;
1467 } 1467 }
1468 1468
1469 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) 1469 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1470 { 1470 {
1471 int error = -EACCES; 1471 int error = -EACCES;
1472 struct inode *inode = dentry->d_inode; 1472 struct inode *inode = dentry->d_inode;
1473 struct path path; 1473 struct path path;
1474 1474
1475 /* Are we allowed to snoop on the tasks file descriptors? */ 1475 /* Are we allowed to snoop on the tasks file descriptors? */
1476 if (!proc_fd_access_allowed(inode)) 1476 if (!proc_fd_access_allowed(inode))
1477 goto out; 1477 goto out;
1478 1478
1479 error = PROC_I(inode)->op.proc_get_link(dentry, &path); 1479 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1480 if (error) 1480 if (error)
1481 goto out; 1481 goto out;
1482 1482
1483 error = do_proc_readlink(&path, buffer, buflen); 1483 error = do_proc_readlink(&path, buffer, buflen);
1484 path_put(&path); 1484 path_put(&path);
1485 out: 1485 out:
1486 return error; 1486 return error;
1487 } 1487 }
1488 1488
1489 const struct inode_operations proc_pid_link_inode_operations = { 1489 const struct inode_operations proc_pid_link_inode_operations = {
1490 .readlink = proc_pid_readlink, 1490 .readlink = proc_pid_readlink,
1491 .follow_link = proc_pid_follow_link, 1491 .follow_link = proc_pid_follow_link,
1492 .setattr = proc_setattr, 1492 .setattr = proc_setattr,
1493 }; 1493 };
1494 1494
1495 1495
1496 /* building an inode */ 1496 /* building an inode */
1497 1497
1498 struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) 1498 struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1499 { 1499 {
1500 struct inode * inode; 1500 struct inode * inode;
1501 struct proc_inode *ei; 1501 struct proc_inode *ei;
1502 const struct cred *cred; 1502 const struct cred *cred;
1503 1503
1504 /* We need a new inode */ 1504 /* We need a new inode */
1505 1505
1506 inode = new_inode(sb); 1506 inode = new_inode(sb);
1507 if (!inode) 1507 if (!inode)
1508 goto out; 1508 goto out;
1509 1509
1510 /* Common stuff */ 1510 /* Common stuff */
1511 ei = PROC_I(inode); 1511 ei = PROC_I(inode);
1512 inode->i_ino = get_next_ino(); 1512 inode->i_ino = get_next_ino();
1513 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 1513 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1514 inode->i_op = &proc_def_inode_operations; 1514 inode->i_op = &proc_def_inode_operations;
1515 1515
1516 /* 1516 /*
1517 * grab the reference to task. 1517 * grab the reference to task.
1518 */ 1518 */
1519 ei->pid = get_task_pid(task, PIDTYPE_PID); 1519 ei->pid = get_task_pid(task, PIDTYPE_PID);
1520 if (!ei->pid) 1520 if (!ei->pid)
1521 goto out_unlock; 1521 goto out_unlock;
1522 1522
1523 if (task_dumpable(task)) { 1523 if (task_dumpable(task)) {
1524 rcu_read_lock(); 1524 rcu_read_lock();
1525 cred = __task_cred(task); 1525 cred = __task_cred(task);
1526 inode->i_uid = cred->euid; 1526 inode->i_uid = cred->euid;
1527 inode->i_gid = cred->egid; 1527 inode->i_gid = cred->egid;
1528 rcu_read_unlock(); 1528 rcu_read_unlock();
1529 } 1529 }
1530 security_task_to_inode(task, inode); 1530 security_task_to_inode(task, inode);
1531 1531
1532 out: 1532 out:
1533 return inode; 1533 return inode;
1534 1534
1535 out_unlock: 1535 out_unlock:
1536 iput(inode); 1536 iput(inode);
1537 return NULL; 1537 return NULL;
1538 } 1538 }
1539 1539
1540 int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1540 int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1541 { 1541 {
1542 struct inode *inode = dentry->d_inode; 1542 struct inode *inode = dentry->d_inode;
1543 struct task_struct *task; 1543 struct task_struct *task;
1544 const struct cred *cred; 1544 const struct cred *cred;
1545 struct pid_namespace *pid = dentry->d_sb->s_fs_info; 1545 struct pid_namespace *pid = dentry->d_sb->s_fs_info;
1546 1546
1547 generic_fillattr(inode, stat); 1547 generic_fillattr(inode, stat);
1548 1548
1549 rcu_read_lock(); 1549 rcu_read_lock();
1550 stat->uid = GLOBAL_ROOT_UID; 1550 stat->uid = GLOBAL_ROOT_UID;
1551 stat->gid = GLOBAL_ROOT_GID; 1551 stat->gid = GLOBAL_ROOT_GID;
1552 task = pid_task(proc_pid(inode), PIDTYPE_PID); 1552 task = pid_task(proc_pid(inode), PIDTYPE_PID);
1553 if (task) { 1553 if (task) {
1554 if (!has_pid_permissions(pid, task, 2)) { 1554 if (!has_pid_permissions(pid, task, 2)) {
1555 rcu_read_unlock(); 1555 rcu_read_unlock();
1556 /* 1556 /*
1557 * This doesn't prevent learning whether PID exists, 1557 * This doesn't prevent learning whether PID exists,
1558 * it only makes getattr() consistent with readdir(). 1558 * it only makes getattr() consistent with readdir().
1559 */ 1559 */
1560 return -ENOENT; 1560 return -ENOENT;
1561 } 1561 }
1562 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1562 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1563 task_dumpable(task)) { 1563 task_dumpable(task)) {
1564 cred = __task_cred(task); 1564 cred = __task_cred(task);
1565 stat->uid = cred->euid; 1565 stat->uid = cred->euid;
1566 stat->gid = cred->egid; 1566 stat->gid = cred->egid;
1567 } 1567 }
1568 } 1568 }
1569 rcu_read_unlock(); 1569 rcu_read_unlock();
1570 return 0; 1570 return 0;
1571 } 1571 }
1572 1572
1573 /* dentry stuff */ 1573 /* dentry stuff */
1574 1574
1575 /* 1575 /*
1576 * Exceptional case: normally we are not allowed to unhash a busy 1576 * Exceptional case: normally we are not allowed to unhash a busy
1577 * directory. In this case, however, we can do it - no aliasing problems 1577 * directory. In this case, however, we can do it - no aliasing problems
1578 * due to the way we treat inodes. 1578 * due to the way we treat inodes.
1579 * 1579 *
1580 * Rewrite the inode's ownerships here because the owning task may have 1580 * Rewrite the inode's ownerships here because the owning task may have
1581 * performed a setuid(), etc. 1581 * performed a setuid(), etc.
1582 * 1582 *
1583 * Before the /proc/pid/status file was created the only way to read 1583 * Before the /proc/pid/status file was created the only way to read
1584 * the effective uid of a /process was to stat /proc/pid. Reading 1584 * the effective uid of a /process was to stat /proc/pid. Reading
1585 * /proc/pid/status is slow enough that procps and other packages 1585 * /proc/pid/status is slow enough that procps and other packages
1586 * kept stating /proc/pid. To keep the rules in /proc simple I have 1586 * kept stating /proc/pid. To keep the rules in /proc simple I have
1587 * made this apply to all per process world readable and executable 1587 * made this apply to all per process world readable and executable
1588 * directories. 1588 * directories.
1589 */ 1589 */
1590 int pid_revalidate(struct dentry *dentry, unsigned int flags) 1590 int pid_revalidate(struct dentry *dentry, unsigned int flags)
1591 { 1591 {
1592 struct inode *inode; 1592 struct inode *inode;
1593 struct task_struct *task; 1593 struct task_struct *task;
1594 const struct cred *cred; 1594 const struct cred *cred;
1595 1595
1596 if (flags & LOOKUP_RCU) 1596 if (flags & LOOKUP_RCU)
1597 return -ECHILD; 1597 return -ECHILD;
1598 1598
1599 inode = dentry->d_inode; 1599 inode = dentry->d_inode;
1600 task = get_proc_task(inode); 1600 task = get_proc_task(inode);
1601 1601
1602 if (task) { 1602 if (task) {
1603 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || 1603 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
1604 task_dumpable(task)) { 1604 task_dumpable(task)) {
1605 rcu_read_lock(); 1605 rcu_read_lock();
1606 cred = __task_cred(task); 1606 cred = __task_cred(task);
1607 inode->i_uid = cred->euid; 1607 inode->i_uid = cred->euid;
1608 inode->i_gid = cred->egid; 1608 inode->i_gid = cred->egid;
1609 rcu_read_unlock(); 1609 rcu_read_unlock();
1610 } else { 1610 } else {
1611 inode->i_uid = GLOBAL_ROOT_UID; 1611 inode->i_uid = GLOBAL_ROOT_UID;
1612 inode->i_gid = GLOBAL_ROOT_GID; 1612 inode->i_gid = GLOBAL_ROOT_GID;
1613 } 1613 }
1614 inode->i_mode &= ~(S_ISUID | S_ISGID); 1614 inode->i_mode &= ~(S_ISUID | S_ISGID);
1615 security_task_to_inode(task, inode); 1615 security_task_to_inode(task, inode);
1616 put_task_struct(task); 1616 put_task_struct(task);
1617 return 1; 1617 return 1;
1618 } 1618 }
1619 d_drop(dentry); 1619 d_drop(dentry);
1620 return 0; 1620 return 0;
1621 } 1621 }
1622 1622
1623 const struct dentry_operations pid_dentry_operations = 1623 const struct dentry_operations pid_dentry_operations =
1624 { 1624 {
1625 .d_revalidate = pid_revalidate, 1625 .d_revalidate = pid_revalidate,
1626 .d_delete = pid_delete_dentry, 1626 .d_delete = pid_delete_dentry,
1627 }; 1627 };
1628 1628
1629 /* Lookups */ 1629 /* Lookups */
1630 1630
1631 /* 1631 /*
1632 * Fill a directory entry. 1632 * Fill a directory entry.
1633 * 1633 *
1634 * If possible create the dcache entry and derive our inode number and 1634 * If possible create the dcache entry and derive our inode number and
1635 * file type from dcache entry. 1635 * file type from dcache entry.
1636 * 1636 *
1637 * Since all of the proc inode numbers are dynamically generated, the inode 1637 * Since all of the proc inode numbers are dynamically generated, the inode
1638 * numbers do not exist until the inode is cache. This means creating the 1638 * numbers do not exist until the inode is cache. This means creating the
1639 * the dcache entry in readdir is necessary to keep the inode numbers 1639 * the dcache entry in readdir is necessary to keep the inode numbers
1640 * reported by readdir in sync with the inode numbers reported 1640 * reported by readdir in sync with the inode numbers reported
1641 * by stat. 1641 * by stat.
1642 */ 1642 */
1643 int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1643 int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1644 const char *name, int len, 1644 const char *name, int len,
1645 instantiate_t instantiate, struct task_struct *task, const void *ptr) 1645 instantiate_t instantiate, struct task_struct *task, const void *ptr)
1646 { 1646 {
1647 struct dentry *child, *dir = filp->f_path.dentry; 1647 struct dentry *child, *dir = filp->f_path.dentry;
1648 struct inode *inode; 1648 struct inode *inode;
1649 struct qstr qname; 1649 struct qstr qname;
1650 ino_t ino = 0; 1650 ino_t ino = 0;
1651 unsigned type = DT_UNKNOWN; 1651 unsigned type = DT_UNKNOWN;
1652 1652
1653 qname.name = name; 1653 qname.name = name;
1654 qname.len = len; 1654 qname.len = len;
1655 qname.hash = full_name_hash(name, len); 1655 qname.hash = full_name_hash(name, len);
1656 1656
1657 child = d_lookup(dir, &qname); 1657 child = d_lookup(dir, &qname);
1658 if (!child) { 1658 if (!child) {
1659 struct dentry *new; 1659 struct dentry *new;
1660 new = d_alloc(dir, &qname); 1660 new = d_alloc(dir, &qname);
1661 if (new) { 1661 if (new) {
1662 child = instantiate(dir->d_inode, new, task, ptr); 1662 child = instantiate(dir->d_inode, new, task, ptr);
1663 if (child) 1663 if (child)
1664 dput(new); 1664 dput(new);
1665 else 1665 else
1666 child = new; 1666 child = new;
1667 } 1667 }
1668 } 1668 }
1669 if (!child || IS_ERR(child) || !child->d_inode) 1669 if (!child || IS_ERR(child) || !child->d_inode)
1670 goto end_instantiate; 1670 goto end_instantiate;
1671 inode = child->d_inode; 1671 inode = child->d_inode;
1672 if (inode) { 1672 if (inode) {
1673 ino = inode->i_ino; 1673 ino = inode->i_ino;
1674 type = inode->i_mode >> 12; 1674 type = inode->i_mode >> 12;
1675 } 1675 }
1676 dput(child); 1676 dput(child);
1677 end_instantiate: 1677 end_instantiate:
1678 if (!ino) 1678 if (!ino)
1679 ino = find_inode_number(dir, &qname); 1679 ino = find_inode_number(dir, &qname);
1680 if (!ino) 1680 if (!ino)
1681 ino = 1; 1681 ino = 1;
1682 return filldir(dirent, name, len, filp->f_pos, ino, type); 1682 return filldir(dirent, name, len, filp->f_pos, ino, type);
1683 } 1683 }
1684 1684
1685 #ifdef CONFIG_CHECKPOINT_RESTORE 1685 #ifdef CONFIG_CHECKPOINT_RESTORE
1686 1686
1687 /* 1687 /*
1688 * dname_to_vma_addr - maps a dentry name into two unsigned longs 1688 * dname_to_vma_addr - maps a dentry name into two unsigned longs
1689 * which represent vma start and end addresses. 1689 * which represent vma start and end addresses.
1690 */ 1690 */
1691 static int dname_to_vma_addr(struct dentry *dentry, 1691 static int dname_to_vma_addr(struct dentry *dentry,
1692 unsigned long *start, unsigned long *end) 1692 unsigned long *start, unsigned long *end)
1693 { 1693 {
1694 if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) 1694 if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
1695 return -EINVAL; 1695 return -EINVAL;
1696 1696
1697 return 0; 1697 return 0;
1698 } 1698 }
1699 1699
1700 static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) 1700 static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
1701 { 1701 {
1702 unsigned long vm_start, vm_end; 1702 unsigned long vm_start, vm_end;
1703 bool exact_vma_exists = false; 1703 bool exact_vma_exists = false;
1704 struct mm_struct *mm = NULL; 1704 struct mm_struct *mm = NULL;
1705 struct task_struct *task; 1705 struct task_struct *task;
1706 const struct cred *cred; 1706 const struct cred *cred;
1707 struct inode *inode; 1707 struct inode *inode;
1708 int status = 0; 1708 int status = 0;
1709 1709
1710 if (flags & LOOKUP_RCU) 1710 if (flags & LOOKUP_RCU)
1711 return -ECHILD; 1711 return -ECHILD;
1712 1712
1713 if (!capable(CAP_SYS_ADMIN)) { 1713 if (!capable(CAP_SYS_ADMIN)) {
1714 status = -EACCES; 1714 status = -EPERM;
1715 goto out_notask; 1715 goto out_notask;
1716 } 1716 }
1717 1717
1718 inode = dentry->d_inode; 1718 inode = dentry->d_inode;
1719 task = get_proc_task(inode); 1719 task = get_proc_task(inode);
1720 if (!task) 1720 if (!task)
1721 goto out_notask; 1721 goto out_notask;
1722 1722
1723 mm = mm_access(task, PTRACE_MODE_READ); 1723 mm = mm_access(task, PTRACE_MODE_READ);
1724 if (IS_ERR_OR_NULL(mm)) 1724 if (IS_ERR_OR_NULL(mm))
1725 goto out; 1725 goto out;
1726 1726
1727 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { 1727 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
1728 down_read(&mm->mmap_sem); 1728 down_read(&mm->mmap_sem);
1729 exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end); 1729 exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
1730 up_read(&mm->mmap_sem); 1730 up_read(&mm->mmap_sem);
1731 } 1731 }
1732 1732
1733 mmput(mm); 1733 mmput(mm);
1734 1734
1735 if (exact_vma_exists) { 1735 if (exact_vma_exists) {
1736 if (task_dumpable(task)) { 1736 if (task_dumpable(task)) {
1737 rcu_read_lock(); 1737 rcu_read_lock();
1738 cred = __task_cred(task); 1738 cred = __task_cred(task);
1739 inode->i_uid = cred->euid; 1739 inode->i_uid = cred->euid;
1740 inode->i_gid = cred->egid; 1740 inode->i_gid = cred->egid;
1741 rcu_read_unlock(); 1741 rcu_read_unlock();
1742 } else { 1742 } else {
1743 inode->i_uid = GLOBAL_ROOT_UID; 1743 inode->i_uid = GLOBAL_ROOT_UID;
1744 inode->i_gid = GLOBAL_ROOT_GID; 1744 inode->i_gid = GLOBAL_ROOT_GID;
1745 } 1745 }
1746 security_task_to_inode(task, inode); 1746 security_task_to_inode(task, inode);
1747 status = 1; 1747 status = 1;
1748 } 1748 }
1749 1749
1750 out: 1750 out:
1751 put_task_struct(task); 1751 put_task_struct(task);
1752 1752
1753 out_notask: 1753 out_notask:
1754 if (status <= 0) 1754 if (status <= 0)
1755 d_drop(dentry); 1755 d_drop(dentry);
1756 1756
1757 return status; 1757 return status;
1758 } 1758 }
1759 1759
1760 static const struct dentry_operations tid_map_files_dentry_operations = { 1760 static const struct dentry_operations tid_map_files_dentry_operations = {
1761 .d_revalidate = map_files_d_revalidate, 1761 .d_revalidate = map_files_d_revalidate,
1762 .d_delete = pid_delete_dentry, 1762 .d_delete = pid_delete_dentry,
1763 }; 1763 };
1764 1764
1765 static int proc_map_files_get_link(struct dentry *dentry, struct path *path) 1765 static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
1766 { 1766 {
1767 unsigned long vm_start, vm_end; 1767 unsigned long vm_start, vm_end;
1768 struct vm_area_struct *vma; 1768 struct vm_area_struct *vma;
1769 struct task_struct *task; 1769 struct task_struct *task;
1770 struct mm_struct *mm; 1770 struct mm_struct *mm;
1771 int rc; 1771 int rc;
1772 1772
1773 rc = -ENOENT; 1773 rc = -ENOENT;
1774 task = get_proc_task(dentry->d_inode); 1774 task = get_proc_task(dentry->d_inode);
1775 if (!task) 1775 if (!task)
1776 goto out; 1776 goto out;
1777 1777
1778 mm = get_task_mm(task); 1778 mm = get_task_mm(task);
1779 put_task_struct(task); 1779 put_task_struct(task);
1780 if (!mm) 1780 if (!mm)
1781 goto out; 1781 goto out;
1782 1782
1783 rc = dname_to_vma_addr(dentry, &vm_start, &vm_end); 1783 rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
1784 if (rc) 1784 if (rc)
1785 goto out_mmput; 1785 goto out_mmput;
1786 1786
1787 down_read(&mm->mmap_sem); 1787 down_read(&mm->mmap_sem);
1788 vma = find_exact_vma(mm, vm_start, vm_end); 1788 vma = find_exact_vma(mm, vm_start, vm_end);
1789 if (vma && vma->vm_file) { 1789 if (vma && vma->vm_file) {
1790 *path = vma->vm_file->f_path; 1790 *path = vma->vm_file->f_path;
1791 path_get(path); 1791 path_get(path);
1792 rc = 0; 1792 rc = 0;
1793 } 1793 }
1794 up_read(&mm->mmap_sem); 1794 up_read(&mm->mmap_sem);
1795 1795
1796 out_mmput: 1796 out_mmput:
1797 mmput(mm); 1797 mmput(mm);
1798 out: 1798 out:
1799 return rc; 1799 return rc;
1800 } 1800 }
1801 1801
1802 struct map_files_info { 1802 struct map_files_info {
1803 fmode_t mode; 1803 fmode_t mode;
1804 unsigned long len; 1804 unsigned long len;
1805 unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */ 1805 unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
1806 }; 1806 };
1807 1807
1808 static struct dentry * 1808 static struct dentry *
1809 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, 1809 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
1810 struct task_struct *task, const void *ptr) 1810 struct task_struct *task, const void *ptr)
1811 { 1811 {
1812 fmode_t mode = (fmode_t)(unsigned long)ptr; 1812 fmode_t mode = (fmode_t)(unsigned long)ptr;
1813 struct proc_inode *ei; 1813 struct proc_inode *ei;
1814 struct inode *inode; 1814 struct inode *inode;
1815 1815
1816 inode = proc_pid_make_inode(dir->i_sb, task); 1816 inode = proc_pid_make_inode(dir->i_sb, task);
1817 if (!inode) 1817 if (!inode)
1818 return ERR_PTR(-ENOENT); 1818 return ERR_PTR(-ENOENT);
1819 1819
1820 ei = PROC_I(inode); 1820 ei = PROC_I(inode);
1821 ei->op.proc_get_link = proc_map_files_get_link; 1821 ei->op.proc_get_link = proc_map_files_get_link;
1822 1822
1823 inode->i_op = &proc_pid_link_inode_operations; 1823 inode->i_op = &proc_pid_link_inode_operations;
1824 inode->i_size = 64; 1824 inode->i_size = 64;
1825 inode->i_mode = S_IFLNK; 1825 inode->i_mode = S_IFLNK;
1826 1826
1827 if (mode & FMODE_READ) 1827 if (mode & FMODE_READ)
1828 inode->i_mode |= S_IRUSR; 1828 inode->i_mode |= S_IRUSR;
1829 if (mode & FMODE_WRITE) 1829 if (mode & FMODE_WRITE)
1830 inode->i_mode |= S_IWUSR; 1830 inode->i_mode |= S_IWUSR;
1831 1831
1832 d_set_d_op(dentry, &tid_map_files_dentry_operations); 1832 d_set_d_op(dentry, &tid_map_files_dentry_operations);
1833 d_add(dentry, inode); 1833 d_add(dentry, inode);
1834 1834
1835 return NULL; 1835 return NULL;
1836 } 1836 }
1837 1837
1838 static struct dentry *proc_map_files_lookup(struct inode *dir, 1838 static struct dentry *proc_map_files_lookup(struct inode *dir,
1839 struct dentry *dentry, unsigned int flags) 1839 struct dentry *dentry, unsigned int flags)
1840 { 1840 {
1841 unsigned long vm_start, vm_end; 1841 unsigned long vm_start, vm_end;
1842 struct vm_area_struct *vma; 1842 struct vm_area_struct *vma;
1843 struct task_struct *task; 1843 struct task_struct *task;
1844 struct dentry *result; 1844 struct dentry *result;
1845 struct mm_struct *mm; 1845 struct mm_struct *mm;
1846 1846
1847 result = ERR_PTR(-EACCES); 1847 result = ERR_PTR(-EPERM);
1848 if (!capable(CAP_SYS_ADMIN)) 1848 if (!capable(CAP_SYS_ADMIN))
1849 goto out; 1849 goto out;
1850 1850
1851 result = ERR_PTR(-ENOENT); 1851 result = ERR_PTR(-ENOENT);
1852 task = get_proc_task(dir); 1852 task = get_proc_task(dir);
1853 if (!task) 1853 if (!task)
1854 goto out; 1854 goto out;
1855 1855
1856 result = ERR_PTR(-EACCES); 1856 result = ERR_PTR(-EACCES);
1857 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 1857 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1858 goto out_put_task; 1858 goto out_put_task;
1859 1859
1860 result = ERR_PTR(-ENOENT); 1860 result = ERR_PTR(-ENOENT);
1861 if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) 1861 if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
1862 goto out_put_task; 1862 goto out_put_task;
1863 1863
1864 mm = get_task_mm(task); 1864 mm = get_task_mm(task);
1865 if (!mm) 1865 if (!mm)
1866 goto out_put_task; 1866 goto out_put_task;
1867 1867
1868 down_read(&mm->mmap_sem); 1868 down_read(&mm->mmap_sem);
1869 vma = find_exact_vma(mm, vm_start, vm_end); 1869 vma = find_exact_vma(mm, vm_start, vm_end);
1870 if (!vma) 1870 if (!vma)
1871 goto out_no_vma; 1871 goto out_no_vma;
1872 1872
1873 if (vma->vm_file) 1873 if (vma->vm_file)
1874 result = proc_map_files_instantiate(dir, dentry, task, 1874 result = proc_map_files_instantiate(dir, dentry, task,
1875 (void *)(unsigned long)vma->vm_file->f_mode); 1875 (void *)(unsigned long)vma->vm_file->f_mode);
1876 1876
1877 out_no_vma: 1877 out_no_vma:
1878 up_read(&mm->mmap_sem); 1878 up_read(&mm->mmap_sem);
1879 mmput(mm); 1879 mmput(mm);
1880 out_put_task: 1880 out_put_task:
1881 put_task_struct(task); 1881 put_task_struct(task);
1882 out: 1882 out:
1883 return result; 1883 return result;
1884 } 1884 }
1885 1885
1886 static const struct inode_operations proc_map_files_inode_operations = { 1886 static const struct inode_operations proc_map_files_inode_operations = {
1887 .lookup = proc_map_files_lookup, 1887 .lookup = proc_map_files_lookup,
1888 .permission = proc_fd_permission, 1888 .permission = proc_fd_permission,
1889 .setattr = proc_setattr, 1889 .setattr = proc_setattr,
1890 }; 1890 };
1891 1891
1892 static int 1892 static int
1893 proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir) 1893 proc_map_files_readdir(struct file *filp, void *dirent, filldir_t filldir)
1894 { 1894 {
1895 struct dentry *dentry = filp->f_path.dentry; 1895 struct dentry *dentry = filp->f_path.dentry;
1896 struct inode *inode = dentry->d_inode; 1896 struct inode *inode = dentry->d_inode;
1897 struct vm_area_struct *vma; 1897 struct vm_area_struct *vma;
1898 struct task_struct *task; 1898 struct task_struct *task;
1899 struct mm_struct *mm; 1899 struct mm_struct *mm;
1900 ino_t ino; 1900 ino_t ino;
1901 int ret; 1901 int ret;
1902 1902
1903 ret = -EACCES; 1903 ret = -EPERM;
1904 if (!capable(CAP_SYS_ADMIN)) 1904 if (!capable(CAP_SYS_ADMIN))
1905 goto out; 1905 goto out;
1906 1906
1907 ret = -ENOENT; 1907 ret = -ENOENT;
1908 task = get_proc_task(inode); 1908 task = get_proc_task(inode);
1909 if (!task) 1909 if (!task)
1910 goto out; 1910 goto out;
1911 1911
1912 ret = -EACCES; 1912 ret = -EACCES;
1913 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 1913 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1914 goto out_put_task; 1914 goto out_put_task;
1915 1915
1916 ret = 0; 1916 ret = 0;
1917 switch (filp->f_pos) { 1917 switch (filp->f_pos) {
1918 case 0: 1918 case 0:
1919 ino = inode->i_ino; 1919 ino = inode->i_ino;
1920 if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0) 1920 if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
1921 goto out_put_task; 1921 goto out_put_task;
1922 filp->f_pos++; 1922 filp->f_pos++;
1923 case 1: 1923 case 1:
1924 ino = parent_ino(dentry); 1924 ino = parent_ino(dentry);
1925 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) 1925 if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
1926 goto out_put_task; 1926 goto out_put_task;
1927 filp->f_pos++; 1927 filp->f_pos++;
1928 default: 1928 default:
1929 { 1929 {
1930 unsigned long nr_files, pos, i; 1930 unsigned long nr_files, pos, i;
1931 struct flex_array *fa = NULL; 1931 struct flex_array *fa = NULL;
1932 struct map_files_info info; 1932 struct map_files_info info;
1933 struct map_files_info *p; 1933 struct map_files_info *p;
1934 1934
1935 mm = get_task_mm(task); 1935 mm = get_task_mm(task);
1936 if (!mm) 1936 if (!mm)
1937 goto out_put_task; 1937 goto out_put_task;
1938 down_read(&mm->mmap_sem); 1938 down_read(&mm->mmap_sem);
1939 1939
1940 nr_files = 0; 1940 nr_files = 0;
1941 1941
1942 /* 1942 /*
1943 * We need two passes here: 1943 * We need two passes here:
1944 * 1944 *
1945 * 1) Collect vmas of mapped files with mmap_sem taken 1945 * 1) Collect vmas of mapped files with mmap_sem taken
1946 * 2) Release mmap_sem and instantiate entries 1946 * 2) Release mmap_sem and instantiate entries
1947 * 1947 *
1948 * otherwise we get lockdep complained, since filldir() 1948 * otherwise we get lockdep complained, since filldir()
1949 * routine might require mmap_sem taken in might_fault(). 1949 * routine might require mmap_sem taken in might_fault().
1950 */ 1950 */
1951 1951
1952 for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { 1952 for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
1953 if (vma->vm_file && ++pos > filp->f_pos) 1953 if (vma->vm_file && ++pos > filp->f_pos)
1954 nr_files++; 1954 nr_files++;
1955 } 1955 }
1956 1956
1957 if (nr_files) { 1957 if (nr_files) {
1958 fa = flex_array_alloc(sizeof(info), nr_files, 1958 fa = flex_array_alloc(sizeof(info), nr_files,
1959 GFP_KERNEL); 1959 GFP_KERNEL);
1960 if (!fa || flex_array_prealloc(fa, 0, nr_files, 1960 if (!fa || flex_array_prealloc(fa, 0, nr_files,
1961 GFP_KERNEL)) { 1961 GFP_KERNEL)) {
1962 ret = -ENOMEM; 1962 ret = -ENOMEM;
1963 if (fa) 1963 if (fa)
1964 flex_array_free(fa); 1964 flex_array_free(fa);
1965 up_read(&mm->mmap_sem); 1965 up_read(&mm->mmap_sem);
1966 mmput(mm); 1966 mmput(mm);
1967 goto out_put_task; 1967 goto out_put_task;
1968 } 1968 }
1969 for (i = 0, vma = mm->mmap, pos = 2; vma; 1969 for (i = 0, vma = mm->mmap, pos = 2; vma;
1970 vma = vma->vm_next) { 1970 vma = vma->vm_next) {
1971 if (!vma->vm_file) 1971 if (!vma->vm_file)
1972 continue; 1972 continue;
1973 if (++pos <= filp->f_pos) 1973 if (++pos <= filp->f_pos)
1974 continue; 1974 continue;
1975 1975
1976 info.mode = vma->vm_file->f_mode; 1976 info.mode = vma->vm_file->f_mode;
1977 info.len = snprintf(info.name, 1977 info.len = snprintf(info.name,
1978 sizeof(info.name), "%lx-%lx", 1978 sizeof(info.name), "%lx-%lx",
1979 vma->vm_start, vma->vm_end); 1979 vma->vm_start, vma->vm_end);
1980 if (flex_array_put(fa, i++, &info, GFP_KERNEL)) 1980 if (flex_array_put(fa, i++, &info, GFP_KERNEL))
1981 BUG(); 1981 BUG();
1982 } 1982 }
1983 } 1983 }
1984 up_read(&mm->mmap_sem); 1984 up_read(&mm->mmap_sem);
1985 1985
1986 for (i = 0; i < nr_files; i++) { 1986 for (i = 0; i < nr_files; i++) {
1987 p = flex_array_get(fa, i); 1987 p = flex_array_get(fa, i);
1988 ret = proc_fill_cache(filp, dirent, filldir, 1988 ret = proc_fill_cache(filp, dirent, filldir,
1989 p->name, p->len, 1989 p->name, p->len,
1990 proc_map_files_instantiate, 1990 proc_map_files_instantiate,
1991 task, 1991 task,
1992 (void *)(unsigned long)p->mode); 1992 (void *)(unsigned long)p->mode);
1993 if (ret) 1993 if (ret)
1994 break; 1994 break;
1995 filp->f_pos++; 1995 filp->f_pos++;
1996 } 1996 }
1997 if (fa) 1997 if (fa)
1998 flex_array_free(fa); 1998 flex_array_free(fa);
1999 mmput(mm); 1999 mmput(mm);
2000 } 2000 }
2001 } 2001 }
2002 2002
2003 out_put_task: 2003 out_put_task:
2004 put_task_struct(task); 2004 put_task_struct(task);
2005 out: 2005 out:
2006 return ret; 2006 return ret;
2007 } 2007 }
2008 2008
2009 static const struct file_operations proc_map_files_operations = { 2009 static const struct file_operations proc_map_files_operations = {
2010 .read = generic_read_dir, 2010 .read = generic_read_dir,
2011 .readdir = proc_map_files_readdir, 2011 .readdir = proc_map_files_readdir,
2012 .llseek = default_llseek, 2012 .llseek = default_llseek,
2013 }; 2013 };
2014 2014
2015 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2015 #endif /* CONFIG_CHECKPOINT_RESTORE */
2016 2016
2017 static struct dentry *proc_pident_instantiate(struct inode *dir, 2017 static struct dentry *proc_pident_instantiate(struct inode *dir,
2018 struct dentry *dentry, struct task_struct *task, const void *ptr) 2018 struct dentry *dentry, struct task_struct *task, const void *ptr)
2019 { 2019 {
2020 const struct pid_entry *p = ptr; 2020 const struct pid_entry *p = ptr;
2021 struct inode *inode; 2021 struct inode *inode;
2022 struct proc_inode *ei; 2022 struct proc_inode *ei;
2023 struct dentry *error = ERR_PTR(-ENOENT); 2023 struct dentry *error = ERR_PTR(-ENOENT);
2024 2024
2025 inode = proc_pid_make_inode(dir->i_sb, task); 2025 inode = proc_pid_make_inode(dir->i_sb, task);
2026 if (!inode) 2026 if (!inode)
2027 goto out; 2027 goto out;
2028 2028
2029 ei = PROC_I(inode); 2029 ei = PROC_I(inode);
2030 inode->i_mode = p->mode; 2030 inode->i_mode = p->mode;
2031 if (S_ISDIR(inode->i_mode)) 2031 if (S_ISDIR(inode->i_mode))
2032 set_nlink(inode, 2); /* Use getattr to fix if necessary */ 2032 set_nlink(inode, 2); /* Use getattr to fix if necessary */
2033 if (p->iop) 2033 if (p->iop)
2034 inode->i_op = p->iop; 2034 inode->i_op = p->iop;
2035 if (p->fop) 2035 if (p->fop)
2036 inode->i_fop = p->fop; 2036 inode->i_fop = p->fop;
2037 ei->op = p->op; 2037 ei->op = p->op;
2038 d_set_d_op(dentry, &pid_dentry_operations); 2038 d_set_d_op(dentry, &pid_dentry_operations);
2039 d_add(dentry, inode); 2039 d_add(dentry, inode);
2040 /* Close the race of the process dying before we return the dentry */ 2040 /* Close the race of the process dying before we return the dentry */
2041 if (pid_revalidate(dentry, 0)) 2041 if (pid_revalidate(dentry, 0))
2042 error = NULL; 2042 error = NULL;
2043 out: 2043 out:
2044 return error; 2044 return error;
2045 } 2045 }
2046 2046
2047 static struct dentry *proc_pident_lookup(struct inode *dir, 2047 static struct dentry *proc_pident_lookup(struct inode *dir,
2048 struct dentry *dentry, 2048 struct dentry *dentry,
2049 const struct pid_entry *ents, 2049 const struct pid_entry *ents,
2050 unsigned int nents) 2050 unsigned int nents)
2051 { 2051 {
2052 struct dentry *error; 2052 struct dentry *error;
2053 struct task_struct *task = get_proc_task(dir); 2053 struct task_struct *task = get_proc_task(dir);
2054 const struct pid_entry *p, *last; 2054 const struct pid_entry *p, *last;
2055 2055
2056 error = ERR_PTR(-ENOENT); 2056 error = ERR_PTR(-ENOENT);
2057 2057
2058 if (!task) 2058 if (!task)
2059 goto out_no_task; 2059 goto out_no_task;
2060 2060
2061 /* 2061 /*
2062 * Yes, it does not scale. And it should not. Don't add 2062 * Yes, it does not scale. And it should not. Don't add
2063 * new entries into /proc/<tgid>/ without very good reasons. 2063 * new entries into /proc/<tgid>/ without very good reasons.
2064 */ 2064 */
2065 last = &ents[nents - 1]; 2065 last = &ents[nents - 1];
2066 for (p = ents; p <= last; p++) { 2066 for (p = ents; p <= last; p++) {
2067 if (p->len != dentry->d_name.len) 2067 if (p->len != dentry->d_name.len)
2068 continue; 2068 continue;
2069 if (!memcmp(dentry->d_name.name, p->name, p->len)) 2069 if (!memcmp(dentry->d_name.name, p->name, p->len))
2070 break; 2070 break;
2071 } 2071 }
2072 if (p > last) 2072 if (p > last)
2073 goto out; 2073 goto out;
2074 2074
2075 error = proc_pident_instantiate(dir, dentry, task, p); 2075 error = proc_pident_instantiate(dir, dentry, task, p);
2076 out: 2076 out:
2077 put_task_struct(task); 2077 put_task_struct(task);
2078 out_no_task: 2078 out_no_task:
2079 return error; 2079 return error;
2080 } 2080 }
2081 2081
2082 static int proc_pident_fill_cache(struct file *filp, void *dirent, 2082 static int proc_pident_fill_cache(struct file *filp, void *dirent,
2083 filldir_t filldir, struct task_struct *task, const struct pid_entry *p) 2083 filldir_t filldir, struct task_struct *task, const struct pid_entry *p)
2084 { 2084 {
2085 return proc_fill_cache(filp, dirent, filldir, p->name, p->len, 2085 return proc_fill_cache(filp, dirent, filldir, p->name, p->len,
2086 proc_pident_instantiate, task, p); 2086 proc_pident_instantiate, task, p);
2087 } 2087 }
2088 2088
2089 static int proc_pident_readdir(struct file *filp, 2089 static int proc_pident_readdir(struct file *filp,
2090 void *dirent, filldir_t filldir, 2090 void *dirent, filldir_t filldir,
2091 const struct pid_entry *ents, unsigned int nents) 2091 const struct pid_entry *ents, unsigned int nents)
2092 { 2092 {
2093 int i; 2093 int i;
2094 struct dentry *dentry = filp->f_path.dentry; 2094 struct dentry *dentry = filp->f_path.dentry;
2095 struct inode *inode = dentry->d_inode; 2095 struct inode *inode = dentry->d_inode;
2096 struct task_struct *task = get_proc_task(inode); 2096 struct task_struct *task = get_proc_task(inode);
2097 const struct pid_entry *p, *last; 2097 const struct pid_entry *p, *last;
2098 ino_t ino; 2098 ino_t ino;
2099 int ret; 2099 int ret;
2100 2100
2101 ret = -ENOENT; 2101 ret = -ENOENT;
2102 if (!task) 2102 if (!task)
2103 goto out_no_task; 2103 goto out_no_task;
2104 2104
2105 ret = 0; 2105 ret = 0;
2106 i = filp->f_pos; 2106 i = filp->f_pos;
2107 switch (i) { 2107 switch (i) {
2108 case 0: 2108 case 0:
2109 ino = inode->i_ino; 2109 ino = inode->i_ino;
2110 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) 2110 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
2111 goto out; 2111 goto out;
2112 i++; 2112 i++;
2113 filp->f_pos++; 2113 filp->f_pos++;
2114 /* fall through */ 2114 /* fall through */
2115 case 1: 2115 case 1:
2116 ino = parent_ino(dentry); 2116 ino = parent_ino(dentry);
2117 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) 2117 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
2118 goto out; 2118 goto out;
2119 i++; 2119 i++;
2120 filp->f_pos++; 2120 filp->f_pos++;
2121 /* fall through */ 2121 /* fall through */
2122 default: 2122 default:
2123 i -= 2; 2123 i -= 2;
2124 if (i >= nents) { 2124 if (i >= nents) {
2125 ret = 1; 2125 ret = 1;
2126 goto out; 2126 goto out;
2127 } 2127 }
2128 p = ents + i; 2128 p = ents + i;
2129 last = &ents[nents - 1]; 2129 last = &ents[nents - 1];
2130 while (p <= last) { 2130 while (p <= last) {
2131 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) 2131 if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0)
2132 goto out; 2132 goto out;
2133 filp->f_pos++; 2133 filp->f_pos++;
2134 p++; 2134 p++;
2135 } 2135 }
2136 } 2136 }
2137 2137
2138 ret = 1; 2138 ret = 1;
2139 out: 2139 out:
2140 put_task_struct(task); 2140 put_task_struct(task);
2141 out_no_task: 2141 out_no_task:
2142 return ret; 2142 return ret;
2143 } 2143 }
2144 2144
2145 #ifdef CONFIG_SECURITY 2145 #ifdef CONFIG_SECURITY
2146 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, 2146 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
2147 size_t count, loff_t *ppos) 2147 size_t count, loff_t *ppos)
2148 { 2148 {
2149 struct inode * inode = file_inode(file); 2149 struct inode * inode = file_inode(file);
2150 char *p = NULL; 2150 char *p = NULL;
2151 ssize_t length; 2151 ssize_t length;
2152 struct task_struct *task = get_proc_task(inode); 2152 struct task_struct *task = get_proc_task(inode);
2153 2153
2154 if (!task) 2154 if (!task)
2155 return -ESRCH; 2155 return -ESRCH;
2156 2156
2157 length = security_getprocattr(task, 2157 length = security_getprocattr(task,
2158 (char*)file->f_path.dentry->d_name.name, 2158 (char*)file->f_path.dentry->d_name.name,
2159 &p); 2159 &p);
2160 put_task_struct(task); 2160 put_task_struct(task);
2161 if (length > 0) 2161 if (length > 0)
2162 length = simple_read_from_buffer(buf, count, ppos, p, length); 2162 length = simple_read_from_buffer(buf, count, ppos, p, length);
2163 kfree(p); 2163 kfree(p);
2164 return length; 2164 return length;
2165 } 2165 }
2166 2166
2167 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, 2167 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2168 size_t count, loff_t *ppos) 2168 size_t count, loff_t *ppos)
2169 { 2169 {
2170 struct inode * inode = file_inode(file); 2170 struct inode * inode = file_inode(file);
2171 char *page; 2171 char *page;
2172 ssize_t length; 2172 ssize_t length;
2173 struct task_struct *task = get_proc_task(inode); 2173 struct task_struct *task = get_proc_task(inode);
2174 2174
2175 length = -ESRCH; 2175 length = -ESRCH;
2176 if (!task) 2176 if (!task)
2177 goto out_no_task; 2177 goto out_no_task;
2178 if (count > PAGE_SIZE) 2178 if (count > PAGE_SIZE)
2179 count = PAGE_SIZE; 2179 count = PAGE_SIZE;
2180 2180
2181 /* No partial writes. */ 2181 /* No partial writes. */
2182 length = -EINVAL; 2182 length = -EINVAL;
2183 if (*ppos != 0) 2183 if (*ppos != 0)
2184 goto out; 2184 goto out;
2185 2185
2186 length = -ENOMEM; 2186 length = -ENOMEM;
2187 page = (char*)__get_free_page(GFP_TEMPORARY); 2187 page = (char*)__get_free_page(GFP_TEMPORARY);
2188 if (!page) 2188 if (!page)
2189 goto out; 2189 goto out;
2190 2190
2191 length = -EFAULT; 2191 length = -EFAULT;
2192 if (copy_from_user(page, buf, count)) 2192 if (copy_from_user(page, buf, count))
2193 goto out_free; 2193 goto out_free;
2194 2194
2195 /* Guard against adverse ptrace interaction */ 2195 /* Guard against adverse ptrace interaction */
2196 length = mutex_lock_interruptible(&task->signal->cred_guard_mutex); 2196 length = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
2197 if (length < 0) 2197 if (length < 0)
2198 goto out_free; 2198 goto out_free;
2199 2199
2200 length = security_setprocattr(task, 2200 length = security_setprocattr(task,
2201 (char*)file->f_path.dentry->d_name.name, 2201 (char*)file->f_path.dentry->d_name.name,
2202 (void*)page, count); 2202 (void*)page, count);
2203 mutex_unlock(&task->signal->cred_guard_mutex); 2203 mutex_unlock(&task->signal->cred_guard_mutex);
2204 out_free: 2204 out_free:
2205 free_page((unsigned long) page); 2205 free_page((unsigned long) page);
2206 out: 2206 out:
2207 put_task_struct(task); 2207 put_task_struct(task);
2208 out_no_task: 2208 out_no_task:
2209 return length; 2209 return length;
2210 } 2210 }
2211 2211
2212 static const struct file_operations proc_pid_attr_operations = { 2212 static const struct file_operations proc_pid_attr_operations = {
2213 .read = proc_pid_attr_read, 2213 .read = proc_pid_attr_read,
2214 .write = proc_pid_attr_write, 2214 .write = proc_pid_attr_write,
2215 .llseek = generic_file_llseek, 2215 .llseek = generic_file_llseek,
2216 }; 2216 };
2217 2217
2218 static const struct pid_entry attr_dir_stuff[] = { 2218 static const struct pid_entry attr_dir_stuff[] = {
2219 REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2219 REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2220 REG("prev", S_IRUGO, proc_pid_attr_operations), 2220 REG("prev", S_IRUGO, proc_pid_attr_operations),
2221 REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2221 REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2222 REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2222 REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2223 REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2223 REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2224 REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2224 REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations),
2225 }; 2225 };
2226 2226
2227 static int proc_attr_dir_readdir(struct file * filp, 2227 static int proc_attr_dir_readdir(struct file * filp,
2228 void * dirent, filldir_t filldir) 2228 void * dirent, filldir_t filldir)
2229 { 2229 {
2230 return proc_pident_readdir(filp,dirent,filldir, 2230 return proc_pident_readdir(filp,dirent,filldir,
2231 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); 2231 attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff));
2232 } 2232 }
2233 2233
2234 static const struct file_operations proc_attr_dir_operations = { 2234 static const struct file_operations proc_attr_dir_operations = {
2235 .read = generic_read_dir, 2235 .read = generic_read_dir,
2236 .readdir = proc_attr_dir_readdir, 2236 .readdir = proc_attr_dir_readdir,
2237 .llseek = default_llseek, 2237 .llseek = default_llseek,
2238 }; 2238 };
2239 2239
2240 static struct dentry *proc_attr_dir_lookup(struct inode *dir, 2240 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
2241 struct dentry *dentry, unsigned int flags) 2241 struct dentry *dentry, unsigned int flags)
2242 { 2242 {
2243 return proc_pident_lookup(dir, dentry, 2243 return proc_pident_lookup(dir, dentry,
2244 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 2244 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2245 } 2245 }
2246 2246
2247 static const struct inode_operations proc_attr_dir_inode_operations = { 2247 static const struct inode_operations proc_attr_dir_inode_operations = {
2248 .lookup = proc_attr_dir_lookup, 2248 .lookup = proc_attr_dir_lookup,
2249 .getattr = pid_getattr, 2249 .getattr = pid_getattr,
2250 .setattr = proc_setattr, 2250 .setattr = proc_setattr,
2251 }; 2251 };
2252 2252
2253 #endif 2253 #endif
2254 2254
2255 #ifdef CONFIG_ELF_CORE 2255 #ifdef CONFIG_ELF_CORE
2256 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, 2256 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2257 size_t count, loff_t *ppos) 2257 size_t count, loff_t *ppos)
2258 { 2258 {
2259 struct task_struct *task = get_proc_task(file_inode(file)); 2259 struct task_struct *task = get_proc_task(file_inode(file));
2260 struct mm_struct *mm; 2260 struct mm_struct *mm;
2261 char buffer[PROC_NUMBUF]; 2261 char buffer[PROC_NUMBUF];
2262 size_t len; 2262 size_t len;
2263 int ret; 2263 int ret;
2264 2264
2265 if (!task) 2265 if (!task)
2266 return -ESRCH; 2266 return -ESRCH;
2267 2267
2268 ret = 0; 2268 ret = 0;
2269 mm = get_task_mm(task); 2269 mm = get_task_mm(task);
2270 if (mm) { 2270 if (mm) {
2271 len = snprintf(buffer, sizeof(buffer), "%08lx\n", 2271 len = snprintf(buffer, sizeof(buffer), "%08lx\n",
2272 ((mm->flags & MMF_DUMP_FILTER_MASK) >> 2272 ((mm->flags & MMF_DUMP_FILTER_MASK) >>
2273 MMF_DUMP_FILTER_SHIFT)); 2273 MMF_DUMP_FILTER_SHIFT));
2274 mmput(mm); 2274 mmput(mm);
2275 ret = simple_read_from_buffer(buf, count, ppos, buffer, len); 2275 ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
2276 } 2276 }
2277 2277
2278 put_task_struct(task); 2278 put_task_struct(task);
2279 2279
2280 return ret; 2280 return ret;
2281 } 2281 }
2282 2282
2283 static ssize_t proc_coredump_filter_write(struct file *file, 2283 static ssize_t proc_coredump_filter_write(struct file *file,
2284 const char __user *buf, 2284 const char __user *buf,
2285 size_t count, 2285 size_t count,
2286 loff_t *ppos) 2286 loff_t *ppos)
2287 { 2287 {
2288 struct task_struct *task; 2288 struct task_struct *task;
2289 struct mm_struct *mm; 2289 struct mm_struct *mm;
2290 char buffer[PROC_NUMBUF], *end; 2290 char buffer[PROC_NUMBUF], *end;
2291 unsigned int val; 2291 unsigned int val;
2292 int ret; 2292 int ret;
2293 int i; 2293 int i;
2294 unsigned long mask; 2294 unsigned long mask;
2295 2295
2296 ret = -EFAULT; 2296 ret = -EFAULT;
2297 memset(buffer, 0, sizeof(buffer)); 2297 memset(buffer, 0, sizeof(buffer));
2298 if (count > sizeof(buffer) - 1) 2298 if (count > sizeof(buffer) - 1)
2299 count = sizeof(buffer) - 1; 2299 count = sizeof(buffer) - 1;
2300 if (copy_from_user(buffer, buf, count)) 2300 if (copy_from_user(buffer, buf, count))
2301 goto out_no_task; 2301 goto out_no_task;
2302 2302
2303 ret = -EINVAL; 2303 ret = -EINVAL;
2304 val = (unsigned int)simple_strtoul(buffer, &end, 0); 2304 val = (unsigned int)simple_strtoul(buffer, &end, 0);
2305 if (*end == '\n') 2305 if (*end == '\n')
2306 end++; 2306 end++;
2307 if (end - buffer == 0) 2307 if (end - buffer == 0)
2308 goto out_no_task; 2308 goto out_no_task;
2309 2309
2310 ret = -ESRCH; 2310 ret = -ESRCH;
2311 task = get_proc_task(file_inode(file)); 2311 task = get_proc_task(file_inode(file));
2312 if (!task) 2312 if (!task)
2313 goto out_no_task; 2313 goto out_no_task;
2314 2314
2315 ret = end - buffer; 2315 ret = end - buffer;
2316 mm = get_task_mm(task); 2316 mm = get_task_mm(task);
2317 if (!mm) 2317 if (!mm)
2318 goto out_no_mm; 2318 goto out_no_mm;
2319 2319
2320 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2320 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2321 if (val & mask) 2321 if (val & mask)
2322 set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2322 set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2323 else 2323 else
2324 clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2324 clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2325 } 2325 }
2326 2326
2327 mmput(mm); 2327 mmput(mm);
2328 out_no_mm: 2328 out_no_mm:
2329 put_task_struct(task); 2329 put_task_struct(task);
2330 out_no_task: 2330 out_no_task:
2331 return ret; 2331 return ret;
2332 } 2332 }
2333 2333
2334 static const struct file_operations proc_coredump_filter_operations = { 2334 static const struct file_operations proc_coredump_filter_operations = {
2335 .read = proc_coredump_filter_read, 2335 .read = proc_coredump_filter_read,
2336 .write = proc_coredump_filter_write, 2336 .write = proc_coredump_filter_write,
2337 .llseek = generic_file_llseek, 2337 .llseek = generic_file_llseek,
2338 }; 2338 };
2339 #endif 2339 #endif
2340 2340
2341 #ifdef CONFIG_TASK_IO_ACCOUNTING 2341 #ifdef CONFIG_TASK_IO_ACCOUNTING
2342 static int do_io_accounting(struct task_struct *task, char *buffer, int whole) 2342 static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
2343 { 2343 {
2344 struct task_io_accounting acct = task->ioac; 2344 struct task_io_accounting acct = task->ioac;
2345 unsigned long flags; 2345 unsigned long flags;
2346 int result; 2346 int result;
2347 2347
2348 result = mutex_lock_killable(&task->signal->cred_guard_mutex); 2348 result = mutex_lock_killable(&task->signal->cred_guard_mutex);
2349 if (result) 2349 if (result)
2350 return result; 2350 return result;
2351 2351
2352 if (!ptrace_may_access(task, PTRACE_MODE_READ)) { 2352 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
2353 result = -EACCES; 2353 result = -EACCES;
2354 goto out_unlock; 2354 goto out_unlock;
2355 } 2355 }
2356 2356
2357 if (whole && lock_task_sighand(task, &flags)) { 2357 if (whole && lock_task_sighand(task, &flags)) {
2358 struct task_struct *t = task; 2358 struct task_struct *t = task;
2359 2359
2360 task_io_accounting_add(&acct, &task->signal->ioac); 2360 task_io_accounting_add(&acct, &task->signal->ioac);
2361 while_each_thread(task, t) 2361 while_each_thread(task, t)
2362 task_io_accounting_add(&acct, &t->ioac); 2362 task_io_accounting_add(&acct, &t->ioac);
2363 2363
2364 unlock_task_sighand(task, &flags); 2364 unlock_task_sighand(task, &flags);
2365 } 2365 }
2366 result = sprintf(buffer, 2366 result = sprintf(buffer,
2367 "rchar: %llu\n" 2367 "rchar: %llu\n"
2368 "wchar: %llu\n" 2368 "wchar: %llu\n"
2369 "syscr: %llu\n" 2369 "syscr: %llu\n"
2370 "syscw: %llu\n" 2370 "syscw: %llu\n"
2371 "read_bytes: %llu\n" 2371 "read_bytes: %llu\n"
2372 "write_bytes: %llu\n" 2372 "write_bytes: %llu\n"
2373 "cancelled_write_bytes: %llu\n", 2373 "cancelled_write_bytes: %llu\n",
2374 (unsigned long long)acct.rchar, 2374 (unsigned long long)acct.rchar,
2375 (unsigned long long)acct.wchar, 2375 (unsigned long long)acct.wchar,
2376 (unsigned long long)acct.syscr, 2376 (unsigned long long)acct.syscr,
2377 (unsigned long long)acct.syscw, 2377 (unsigned long long)acct.syscw,
2378 (unsigned long long)acct.read_bytes, 2378 (unsigned long long)acct.read_bytes,
2379 (unsigned long long)acct.write_bytes, 2379 (unsigned long long)acct.write_bytes,
2380 (unsigned long long)acct.cancelled_write_bytes); 2380 (unsigned long long)acct.cancelled_write_bytes);
2381 out_unlock: 2381 out_unlock:
2382 mutex_unlock(&task->signal->cred_guard_mutex); 2382 mutex_unlock(&task->signal->cred_guard_mutex);
2383 return result; 2383 return result;
2384 } 2384 }
2385 2385
2386 static int proc_tid_io_accounting(struct task_struct *task, char *buffer) 2386 static int proc_tid_io_accounting(struct task_struct *task, char *buffer)
2387 { 2387 {
2388 return do_io_accounting(task, buffer, 0); 2388 return do_io_accounting(task, buffer, 0);
2389 } 2389 }
2390 2390
2391 static int proc_tgid_io_accounting(struct task_struct *task, char *buffer) 2391 static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
2392 { 2392 {
2393 return do_io_accounting(task, buffer, 1); 2393 return do_io_accounting(task, buffer, 1);
2394 } 2394 }
2395 #endif /* CONFIG_TASK_IO_ACCOUNTING */ 2395 #endif /* CONFIG_TASK_IO_ACCOUNTING */
2396 2396
2397 #ifdef CONFIG_USER_NS 2397 #ifdef CONFIG_USER_NS
2398 static int proc_id_map_open(struct inode *inode, struct file *file, 2398 static int proc_id_map_open(struct inode *inode, struct file *file,
2399 struct seq_operations *seq_ops) 2399 struct seq_operations *seq_ops)
2400 { 2400 {
2401 struct user_namespace *ns = NULL; 2401 struct user_namespace *ns = NULL;
2402 struct task_struct *task; 2402 struct task_struct *task;
2403 struct seq_file *seq; 2403 struct seq_file *seq;
2404 int ret = -EINVAL; 2404 int ret = -EINVAL;
2405 2405
2406 task = get_proc_task(inode); 2406 task = get_proc_task(inode);
2407 if (task) { 2407 if (task) {
2408 rcu_read_lock(); 2408 rcu_read_lock();
2409 ns = get_user_ns(task_cred_xxx(task, user_ns)); 2409 ns = get_user_ns(task_cred_xxx(task, user_ns));
2410 rcu_read_unlock(); 2410 rcu_read_unlock();
2411 put_task_struct(task); 2411 put_task_struct(task);
2412 } 2412 }
2413 if (!ns) 2413 if (!ns)
2414 goto err; 2414 goto err;
2415 2415
2416 ret = seq_open(file, seq_ops); 2416 ret = seq_open(file, seq_ops);
2417 if (ret) 2417 if (ret)
2418 goto err_put_ns; 2418 goto err_put_ns;
2419 2419
2420 seq = file->private_data; 2420 seq = file->private_data;
2421 seq->private = ns; 2421 seq->private = ns;
2422 2422
2423 return 0; 2423 return 0;
2424 err_put_ns: 2424 err_put_ns:
2425 put_user_ns(ns); 2425 put_user_ns(ns);
2426 err: 2426 err:
2427 return ret; 2427 return ret;
2428 } 2428 }
2429 2429
2430 static int proc_id_map_release(struct inode *inode, struct file *file) 2430 static int proc_id_map_release(struct inode *inode, struct file *file)
2431 { 2431 {
2432 struct seq_file *seq = file->private_data; 2432 struct seq_file *seq = file->private_data;
2433 struct user_namespace *ns = seq->private; 2433 struct user_namespace *ns = seq->private;
2434 put_user_ns(ns); 2434 put_user_ns(ns);
2435 return seq_release(inode, file); 2435 return seq_release(inode, file);
2436 } 2436 }
2437 2437
2438 static int proc_uid_map_open(struct inode *inode, struct file *file) 2438 static int proc_uid_map_open(struct inode *inode, struct file *file)
2439 { 2439 {
2440 return proc_id_map_open(inode, file, &proc_uid_seq_operations); 2440 return proc_id_map_open(inode, file, &proc_uid_seq_operations);
2441 } 2441 }
2442 2442
2443 static int proc_gid_map_open(struct inode *inode, struct file *file) 2443 static int proc_gid_map_open(struct inode *inode, struct file *file)
2444 { 2444 {
2445 return proc_id_map_open(inode, file, &proc_gid_seq_operations); 2445 return proc_id_map_open(inode, file, &proc_gid_seq_operations);
2446 } 2446 }
2447 2447
2448 static int proc_projid_map_open(struct inode *inode, struct file *file) 2448 static int proc_projid_map_open(struct inode *inode, struct file *file)
2449 { 2449 {
2450 return proc_id_map_open(inode, file, &proc_projid_seq_operations); 2450 return proc_id_map_open(inode, file, &proc_projid_seq_operations);
2451 } 2451 }
2452 2452
2453 static const struct file_operations proc_uid_map_operations = { 2453 static const struct file_operations proc_uid_map_operations = {
2454 .open = proc_uid_map_open, 2454 .open = proc_uid_map_open,
2455 .write = proc_uid_map_write, 2455 .write = proc_uid_map_write,
2456 .read = seq_read, 2456 .read = seq_read,
2457 .llseek = seq_lseek, 2457 .llseek = seq_lseek,
2458 .release = proc_id_map_release, 2458 .release = proc_id_map_release,
2459 }; 2459 };
2460 2460
2461 static const struct file_operations proc_gid_map_operations = { 2461 static const struct file_operations proc_gid_map_operations = {
2462 .open = proc_gid_map_open, 2462 .open = proc_gid_map_open,
2463 .write = proc_gid_map_write, 2463 .write = proc_gid_map_write,
2464 .read = seq_read, 2464 .read = seq_read,
2465 .llseek = seq_lseek, 2465 .llseek = seq_lseek,
2466 .release = proc_id_map_release, 2466 .release = proc_id_map_release,
2467 }; 2467 };
2468 2468
2469 static const struct file_operations proc_projid_map_operations = { 2469 static const struct file_operations proc_projid_map_operations = {
2470 .open = proc_projid_map_open, 2470 .open = proc_projid_map_open,
2471 .write = proc_projid_map_write, 2471 .write = proc_projid_map_write,
2472 .read = seq_read, 2472 .read = seq_read,
2473 .llseek = seq_lseek, 2473 .llseek = seq_lseek,
2474 .release = proc_id_map_release, 2474 .release = proc_id_map_release,
2475 }; 2475 };
2476 #endif /* CONFIG_USER_NS */ 2476 #endif /* CONFIG_USER_NS */
2477 2477
2478 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, 2478 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2479 struct pid *pid, struct task_struct *task) 2479 struct pid *pid, struct task_struct *task)
2480 { 2480 {
2481 int err = lock_trace(task); 2481 int err = lock_trace(task);
2482 if (!err) { 2482 if (!err) {
2483 seq_printf(m, "%08x\n", task->personality); 2483 seq_printf(m, "%08x\n", task->personality);
2484 unlock_trace(task); 2484 unlock_trace(task);
2485 } 2485 }
2486 return err; 2486 return err;
2487 } 2487 }
2488 2488
2489 /* 2489 /*
2490 * Thread groups 2490 * Thread groups
2491 */ 2491 */
2492 static const struct file_operations proc_task_operations; 2492 static const struct file_operations proc_task_operations;
2493 static const struct inode_operations proc_task_inode_operations; 2493 static const struct inode_operations proc_task_inode_operations;
2494 2494
2495 static const struct pid_entry tgid_base_stuff[] = { 2495 static const struct pid_entry tgid_base_stuff[] = {
2496 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), 2496 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
2497 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 2497 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2498 #ifdef CONFIG_CHECKPOINT_RESTORE 2498 #ifdef CONFIG_CHECKPOINT_RESTORE
2499 DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations), 2499 DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
2500 #endif 2500 #endif
2501 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 2501 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2502 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), 2502 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
2503 #ifdef CONFIG_NET 2503 #ifdef CONFIG_NET
2504 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), 2504 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
2505 #endif 2505 #endif
2506 REG("environ", S_IRUSR, proc_environ_operations), 2506 REG("environ", S_IRUSR, proc_environ_operations),
2507 INF("auxv", S_IRUSR, proc_pid_auxv), 2507 INF("auxv", S_IRUSR, proc_pid_auxv),
2508 ONE("status", S_IRUGO, proc_pid_status), 2508 ONE("status", S_IRUGO, proc_pid_status),
2509 ONE("personality", S_IRUGO, proc_pid_personality), 2509 ONE("personality", S_IRUGO, proc_pid_personality),
2510 INF("limits", S_IRUGO, proc_pid_limits), 2510 INF("limits", S_IRUGO, proc_pid_limits),
2511 #ifdef CONFIG_SCHED_DEBUG 2511 #ifdef CONFIG_SCHED_DEBUG
2512 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2512 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2513 #endif 2513 #endif
2514 #ifdef CONFIG_SCHED_AUTOGROUP 2514 #ifdef CONFIG_SCHED_AUTOGROUP
2515 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), 2515 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
2516 #endif 2516 #endif
2517 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 2517 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2518 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2518 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2519 INF("syscall", S_IRUGO, proc_pid_syscall), 2519 INF("syscall", S_IRUGO, proc_pid_syscall),
2520 #endif 2520 #endif
2521 INF("cmdline", S_IRUGO, proc_pid_cmdline), 2521 INF("cmdline", S_IRUGO, proc_pid_cmdline),
2522 ONE("stat", S_IRUGO, proc_tgid_stat), 2522 ONE("stat", S_IRUGO, proc_tgid_stat),
2523 ONE("statm", S_IRUGO, proc_pid_statm), 2523 ONE("statm", S_IRUGO, proc_pid_statm),
2524 REG("maps", S_IRUGO, proc_pid_maps_operations), 2524 REG("maps", S_IRUGO, proc_pid_maps_operations),
2525 #ifdef CONFIG_NUMA 2525 #ifdef CONFIG_NUMA
2526 REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations), 2526 REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
2527 #endif 2527 #endif
2528 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), 2528 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
2529 LNK("cwd", proc_cwd_link), 2529 LNK("cwd", proc_cwd_link),
2530 LNK("root", proc_root_link), 2530 LNK("root", proc_root_link),
2531 LNK("exe", proc_exe_link), 2531 LNK("exe", proc_exe_link),
2532 REG("mounts", S_IRUGO, proc_mounts_operations), 2532 REG("mounts", S_IRUGO, proc_mounts_operations),
2533 REG("mountinfo", S_IRUGO, proc_mountinfo_operations), 2533 REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
2534 REG("mountstats", S_IRUSR, proc_mountstats_operations), 2534 REG("mountstats", S_IRUSR, proc_mountstats_operations),
2535 #ifdef CONFIG_PROC_PAGE_MONITOR 2535 #ifdef CONFIG_PROC_PAGE_MONITOR
2536 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 2536 REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
2537 REG("smaps", S_IRUGO, proc_pid_smaps_operations), 2537 REG("smaps", S_IRUGO, proc_pid_smaps_operations),
2538 REG("pagemap", S_IRUGO, proc_pagemap_operations), 2538 REG("pagemap", S_IRUGO, proc_pagemap_operations),
2539 #endif 2539 #endif
2540 #ifdef CONFIG_SECURITY 2540 #ifdef CONFIG_SECURITY
2541 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 2541 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
2542 #endif 2542 #endif
2543 #ifdef CONFIG_KALLSYMS 2543 #ifdef CONFIG_KALLSYMS
2544 INF("wchan", S_IRUGO, proc_pid_wchan), 2544 INF("wchan", S_IRUGO, proc_pid_wchan),
2545 #endif 2545 #endif
2546 #ifdef CONFIG_STACKTRACE 2546 #ifdef CONFIG_STACKTRACE
2547 ONE("stack", S_IRUGO, proc_pid_stack), 2547 ONE("stack", S_IRUGO, proc_pid_stack),
2548 #endif 2548 #endif
2549 #ifdef CONFIG_SCHEDSTATS 2549 #ifdef CONFIG_SCHEDSTATS
2550 INF("schedstat", S_IRUGO, proc_pid_schedstat), 2550 INF("schedstat", S_IRUGO, proc_pid_schedstat),
2551 #endif 2551 #endif
2552 #ifdef CONFIG_LATENCYTOP 2552 #ifdef CONFIG_LATENCYTOP
2553 REG("latency", S_IRUGO, proc_lstats_operations), 2553 REG("latency", S_IRUGO, proc_lstats_operations),
2554 #endif 2554 #endif
2555 #ifdef CONFIG_PROC_PID_CPUSET 2555 #ifdef CONFIG_PROC_PID_CPUSET
2556 REG("cpuset", S_IRUGO, proc_cpuset_operations), 2556 REG("cpuset", S_IRUGO, proc_cpuset_operations),
2557 #endif 2557 #endif
2558 #ifdef CONFIG_CGROUPS 2558 #ifdef CONFIG_CGROUPS
2559 REG("cgroup", S_IRUGO, proc_cgroup_operations), 2559 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2560 #endif 2560 #endif
2561 INF("oom_score", S_IRUGO, proc_oom_score), 2561 INF("oom_score", S_IRUGO, proc_oom_score),
2562 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), 2562 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2563 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 2563 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2564 #ifdef CONFIG_AUDITSYSCALL 2564 #ifdef CONFIG_AUDITSYSCALL
2565 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 2565 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
2566 REG("sessionid", S_IRUGO, proc_sessionid_operations), 2566 REG("sessionid", S_IRUGO, proc_sessionid_operations),
2567 #endif 2567 #endif
2568 #ifdef CONFIG_FAULT_INJECTION 2568 #ifdef CONFIG_FAULT_INJECTION
2569 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 2569 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2570 #endif 2570 #endif
2571 #ifdef CONFIG_ELF_CORE 2571 #ifdef CONFIG_ELF_CORE
2572 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), 2572 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2573 #endif 2573 #endif
2574 #ifdef CONFIG_TASK_IO_ACCOUNTING 2574 #ifdef CONFIG_TASK_IO_ACCOUNTING
2575 INF("io", S_IRUSR, proc_tgid_io_accounting), 2575 INF("io", S_IRUSR, proc_tgid_io_accounting),
2576 #endif 2576 #endif
2577 #ifdef CONFIG_HARDWALL 2577 #ifdef CONFIG_HARDWALL
2578 INF("hardwall", S_IRUGO, proc_pid_hardwall), 2578 INF("hardwall", S_IRUGO, proc_pid_hardwall),
2579 #endif 2579 #endif
2580 #ifdef CONFIG_USER_NS 2580 #ifdef CONFIG_USER_NS
2581 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 2581 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
2582 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 2582 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
2583 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 2583 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
2584 #endif 2584 #endif
2585 }; 2585 };
2586 2586
2587 static int proc_tgid_base_readdir(struct file * filp, 2587 static int proc_tgid_base_readdir(struct file * filp,
2588 void * dirent, filldir_t filldir) 2588 void * dirent, filldir_t filldir)
2589 { 2589 {
2590 return proc_pident_readdir(filp,dirent,filldir, 2590 return proc_pident_readdir(filp,dirent,filldir,
2591 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); 2591 tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff));
2592 } 2592 }
2593 2593
2594 static const struct file_operations proc_tgid_base_operations = { 2594 static const struct file_operations proc_tgid_base_operations = {
2595 .read = generic_read_dir, 2595 .read = generic_read_dir,
2596 .readdir = proc_tgid_base_readdir, 2596 .readdir = proc_tgid_base_readdir,
2597 .llseek = default_llseek, 2597 .llseek = default_llseek,
2598 }; 2598 };
2599 2599
2600 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 2600 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2601 { 2601 {
2602 return proc_pident_lookup(dir, dentry, 2602 return proc_pident_lookup(dir, dentry,
2603 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 2603 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
2604 } 2604 }
2605 2605
2606 static const struct inode_operations proc_tgid_base_inode_operations = { 2606 static const struct inode_operations proc_tgid_base_inode_operations = {
2607 .lookup = proc_tgid_base_lookup, 2607 .lookup = proc_tgid_base_lookup,
2608 .getattr = pid_getattr, 2608 .getattr = pid_getattr,
2609 .setattr = proc_setattr, 2609 .setattr = proc_setattr,
2610 .permission = proc_pid_permission, 2610 .permission = proc_pid_permission,
2611 }; 2611 };
2612 2612
2613 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) 2613 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
2614 { 2614 {
2615 struct dentry *dentry, *leader, *dir; 2615 struct dentry *dentry, *leader, *dir;
2616 char buf[PROC_NUMBUF]; 2616 char buf[PROC_NUMBUF];
2617 struct qstr name; 2617 struct qstr name;
2618 2618
2619 name.name = buf; 2619 name.name = buf;
2620 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2620 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2621 /* no ->d_hash() rejects on procfs */ 2621 /* no ->d_hash() rejects on procfs */
2622 dentry = d_hash_and_lookup(mnt->mnt_root, &name); 2622 dentry = d_hash_and_lookup(mnt->mnt_root, &name);
2623 if (dentry) { 2623 if (dentry) {
2624 shrink_dcache_parent(dentry); 2624 shrink_dcache_parent(dentry);
2625 d_drop(dentry); 2625 d_drop(dentry);
2626 dput(dentry); 2626 dput(dentry);
2627 } 2627 }
2628 2628
2629 name.name = buf; 2629 name.name = buf;
2630 name.len = snprintf(buf, sizeof(buf), "%d", tgid); 2630 name.len = snprintf(buf, sizeof(buf), "%d", tgid);
2631 leader = d_hash_and_lookup(mnt->mnt_root, &name); 2631 leader = d_hash_and_lookup(mnt->mnt_root, &name);
2632 if (!leader) 2632 if (!leader)
2633 goto out; 2633 goto out;
2634 2634
2635 name.name = "task"; 2635 name.name = "task";
2636 name.len = strlen(name.name); 2636 name.len = strlen(name.name);
2637 dir = d_hash_and_lookup(leader, &name); 2637 dir = d_hash_and_lookup(leader, &name);
2638 if (!dir) 2638 if (!dir)
2639 goto out_put_leader; 2639 goto out_put_leader;
2640 2640
2641 name.name = buf; 2641 name.name = buf;
2642 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2642 name.len = snprintf(buf, sizeof(buf), "%d", pid);
2643 dentry = d_hash_and_lookup(dir, &name); 2643 dentry = d_hash_and_lookup(dir, &name);
2644 if (dentry) { 2644 if (dentry) {
2645 shrink_dcache_parent(dentry); 2645 shrink_dcache_parent(dentry);
2646 d_drop(dentry); 2646 d_drop(dentry);
2647 dput(dentry); 2647 dput(dentry);
2648 } 2648 }
2649 2649
2650 dput(dir); 2650 dput(dir);
2651 out_put_leader: 2651 out_put_leader:
2652 dput(leader); 2652 dput(leader);
2653 out: 2653 out:
2654 return; 2654 return;
2655 } 2655 }
2656 2656
2657 /** 2657 /**
2658 * proc_flush_task - Remove dcache entries for @task from the /proc dcache. 2658 * proc_flush_task - Remove dcache entries for @task from the /proc dcache.
2659 * @task: task that should be flushed. 2659 * @task: task that should be flushed.
2660 * 2660 *
2661 * When flushing dentries from proc, one needs to flush them from global 2661 * When flushing dentries from proc, one needs to flush them from global
2662 * proc (proc_mnt) and from all the namespaces' procs this task was seen 2662 * proc (proc_mnt) and from all the namespaces' procs this task was seen
2663 * in. This call is supposed to do all of this job. 2663 * in. This call is supposed to do all of this job.
2664 * 2664 *
2665 * Looks in the dcache for 2665 * Looks in the dcache for
2666 * /proc/@pid 2666 * /proc/@pid
2667 * /proc/@tgid/task/@pid 2667 * /proc/@tgid/task/@pid
2668 * if either directory is present flushes it and all of it'ts children 2668 * if either directory is present flushes it and all of it'ts children
2669 * from the dcache. 2669 * from the dcache.
2670 * 2670 *
2671 * It is safe and reasonable to cache /proc entries for a task until 2671 * It is safe and reasonable to cache /proc entries for a task until
2672 * that task exits. After that they just clog up the dcache with 2672 * that task exits. After that they just clog up the dcache with
2673 * useless entries, possibly causing useful dcache entries to be 2673 * useless entries, possibly causing useful dcache entries to be
2674 * flushed instead. This routine is proved to flush those useless 2674 * flushed instead. This routine is proved to flush those useless
2675 * dcache entries at process exit time. 2675 * dcache entries at process exit time.
2676 * 2676 *
2677 * NOTE: This routine is just an optimization so it does not guarantee 2677 * NOTE: This routine is just an optimization so it does not guarantee
2678 * that no dcache entries will exist at process exit time it 2678 * that no dcache entries will exist at process exit time it
2679 * just makes it very unlikely that any will persist. 2679 * just makes it very unlikely that any will persist.
2680 */ 2680 */
2681 2681
2682 void proc_flush_task(struct task_struct *task) 2682 void proc_flush_task(struct task_struct *task)
2683 { 2683 {
2684 int i; 2684 int i;
2685 struct pid *pid, *tgid; 2685 struct pid *pid, *tgid;
2686 struct upid *upid; 2686 struct upid *upid;
2687 2687
2688 pid = task_pid(task); 2688 pid = task_pid(task);
2689 tgid = task_tgid(task); 2689 tgid = task_tgid(task);
2690 2690
2691 for (i = 0; i <= pid->level; i++) { 2691 for (i = 0; i <= pid->level; i++) {
2692 upid = &pid->numbers[i]; 2692 upid = &pid->numbers[i];
2693 proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, 2693 proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
2694 tgid->numbers[i].nr); 2694 tgid->numbers[i].nr);
2695 } 2695 }
2696 } 2696 }
2697 2697
2698 static struct dentry *proc_pid_instantiate(struct inode *dir, 2698 static struct dentry *proc_pid_instantiate(struct inode *dir,
2699 struct dentry * dentry, 2699 struct dentry * dentry,
2700 struct task_struct *task, const void *ptr) 2700 struct task_struct *task, const void *ptr)
2701 { 2701 {
2702 struct dentry *error = ERR_PTR(-ENOENT); 2702 struct dentry *error = ERR_PTR(-ENOENT);
2703 struct inode *inode; 2703 struct inode *inode;
2704 2704
2705 inode = proc_pid_make_inode(dir->i_sb, task); 2705 inode = proc_pid_make_inode(dir->i_sb, task);
2706 if (!inode) 2706 if (!inode)
2707 goto out; 2707 goto out;
2708 2708
2709 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2709 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2710 inode->i_op = &proc_tgid_base_inode_operations; 2710 inode->i_op = &proc_tgid_base_inode_operations;
2711 inode->i_fop = &proc_tgid_base_operations; 2711 inode->i_fop = &proc_tgid_base_operations;
2712 inode->i_flags|=S_IMMUTABLE; 2712 inode->i_flags|=S_IMMUTABLE;
2713 2713
2714 set_nlink(inode, 2 + pid_entry_count_dirs(tgid_base_stuff, 2714 set_nlink(inode, 2 + pid_entry_count_dirs(tgid_base_stuff,
2715 ARRAY_SIZE(tgid_base_stuff))); 2715 ARRAY_SIZE(tgid_base_stuff)));
2716 2716
2717 d_set_d_op(dentry, &pid_dentry_operations); 2717 d_set_d_op(dentry, &pid_dentry_operations);
2718 2718
2719 d_add(dentry, inode); 2719 d_add(dentry, inode);
2720 /* Close the race of the process dying before we return the dentry */ 2720 /* Close the race of the process dying before we return the dentry */
2721 if (pid_revalidate(dentry, 0)) 2721 if (pid_revalidate(dentry, 0))
2722 error = NULL; 2722 error = NULL;
2723 out: 2723 out:
2724 return error; 2724 return error;
2725 } 2725 }
2726 2726
2727 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) 2727 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
2728 { 2728 {
2729 struct dentry *result = NULL; 2729 struct dentry *result = NULL;
2730 struct task_struct *task; 2730 struct task_struct *task;
2731 unsigned tgid; 2731 unsigned tgid;
2732 struct pid_namespace *ns; 2732 struct pid_namespace *ns;
2733 2733
2734 tgid = name_to_int(dentry); 2734 tgid = name_to_int(dentry);
2735 if (tgid == ~0U) 2735 if (tgid == ~0U)
2736 goto out; 2736 goto out;
2737 2737
2738 ns = dentry->d_sb->s_fs_info; 2738 ns = dentry->d_sb->s_fs_info;
2739 rcu_read_lock(); 2739 rcu_read_lock();
2740 task = find_task_by_pid_ns(tgid, ns); 2740 task = find_task_by_pid_ns(tgid, ns);
2741 if (task) 2741 if (task)
2742 get_task_struct(task); 2742 get_task_struct(task);
2743 rcu_read_unlock(); 2743 rcu_read_unlock();
2744 if (!task) 2744 if (!task)
2745 goto out; 2745 goto out;
2746 2746
2747 result = proc_pid_instantiate(dir, dentry, task, NULL); 2747 result = proc_pid_instantiate(dir, dentry, task, NULL);
2748 put_task_struct(task); 2748 put_task_struct(task);
2749 out: 2749 out:
2750 return result; 2750 return result;
2751 } 2751 }
2752 2752
2753 /* 2753 /*
2754 * Find the first task with tgid >= tgid 2754 * Find the first task with tgid >= tgid
2755 * 2755 *
2756 */ 2756 */
2757 struct tgid_iter { 2757 struct tgid_iter {
2758 unsigned int tgid; 2758 unsigned int tgid;
2759 struct task_struct *task; 2759 struct task_struct *task;
2760 }; 2760 };
2761 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) 2761 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
2762 { 2762 {
2763 struct pid *pid; 2763 struct pid *pid;
2764 2764
2765 if (iter.task) 2765 if (iter.task)
2766 put_task_struct(iter.task); 2766 put_task_struct(iter.task);
2767 rcu_read_lock(); 2767 rcu_read_lock();
2768 retry: 2768 retry:
2769 iter.task = NULL; 2769 iter.task = NULL;
2770 pid = find_ge_pid(iter.tgid, ns); 2770 pid = find_ge_pid(iter.tgid, ns);
2771 if (pid) { 2771 if (pid) {
2772 iter.tgid = pid_nr_ns(pid, ns); 2772 iter.tgid = pid_nr_ns(pid, ns);
2773 iter.task = pid_task(pid, PIDTYPE_PID); 2773 iter.task = pid_task(pid, PIDTYPE_PID);
2774 /* What we to know is if the pid we have find is the 2774 /* What we to know is if the pid we have find is the
2775 * pid of a thread_group_leader. Testing for task 2775 * pid of a thread_group_leader. Testing for task
2776 * being a thread_group_leader is the obvious thing 2776 * being a thread_group_leader is the obvious thing
2777 * todo but there is a window when it fails, due to 2777 * todo but there is a window when it fails, due to
2778 * the pid transfer logic in de_thread. 2778 * the pid transfer logic in de_thread.
2779 * 2779 *
2780 * So we perform the straight forward test of seeing 2780 * So we perform the straight forward test of seeing
2781 * if the pid we have found is the pid of a thread 2781 * if the pid we have found is the pid of a thread
2782 * group leader, and don't worry if the task we have 2782 * group leader, and don't worry if the task we have
2783 * found doesn't happen to be a thread group leader. 2783 * found doesn't happen to be a thread group leader.
2784 * As we don't care in the case of readdir. 2784 * As we don't care in the case of readdir.
2785 */ 2785 */
2786 if (!iter.task || !has_group_leader_pid(iter.task)) { 2786 if (!iter.task || !has_group_leader_pid(iter.task)) {
2787 iter.tgid += 1; 2787 iter.tgid += 1;
2788 goto retry; 2788 goto retry;
2789 } 2789 }
2790 get_task_struct(iter.task); 2790 get_task_struct(iter.task);
2791 } 2791 }
2792 rcu_read_unlock(); 2792 rcu_read_unlock();
2793 return iter; 2793 return iter;
2794 } 2794 }
2795 2795
2796 #define TGID_OFFSET (FIRST_PROCESS_ENTRY) 2796 #define TGID_OFFSET (FIRST_PROCESS_ENTRY)
2797 2797
2798 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 2798 static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
2799 struct tgid_iter iter) 2799 struct tgid_iter iter)
2800 { 2800 {
2801 char name[PROC_NUMBUF]; 2801 char name[PROC_NUMBUF];
2802 int len = snprintf(name, sizeof(name), "%d", iter.tgid); 2802 int len = snprintf(name, sizeof(name), "%d", iter.tgid);
2803 return proc_fill_cache(filp, dirent, filldir, name, len, 2803 return proc_fill_cache(filp, dirent, filldir, name, len,
2804 proc_pid_instantiate, iter.task, NULL); 2804 proc_pid_instantiate, iter.task, NULL);
2805 } 2805 }
2806 2806
2807 static int fake_filldir(void *buf, const char *name, int namelen, 2807 static int fake_filldir(void *buf, const char *name, int namelen,
2808 loff_t offset, u64 ino, unsigned d_type) 2808 loff_t offset, u64 ino, unsigned d_type)
2809 { 2809 {
2810 return 0; 2810 return 0;
2811 } 2811 }
2812 2812
2813 /* for the /proc/ directory itself, after non-process stuff has been done */ 2813 /* for the /proc/ directory itself, after non-process stuff has been done */
2814 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 2814 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
2815 { 2815 {
2816 struct tgid_iter iter; 2816 struct tgid_iter iter;
2817 struct pid_namespace *ns; 2817 struct pid_namespace *ns;
2818 filldir_t __filldir; 2818 filldir_t __filldir;
2819 2819
2820 if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET) 2820 if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
2821 goto out; 2821 goto out;
2822 2822
2823 ns = filp->f_dentry->d_sb->s_fs_info; 2823 ns = filp->f_dentry->d_sb->s_fs_info;
2824 iter.task = NULL; 2824 iter.task = NULL;
2825 iter.tgid = filp->f_pos - TGID_OFFSET; 2825 iter.tgid = filp->f_pos - TGID_OFFSET;
2826 for (iter = next_tgid(ns, iter); 2826 for (iter = next_tgid(ns, iter);
2827 iter.task; 2827 iter.task;
2828 iter.tgid += 1, iter = next_tgid(ns, iter)) { 2828 iter.tgid += 1, iter = next_tgid(ns, iter)) {
2829 if (has_pid_permissions(ns, iter.task, 2)) 2829 if (has_pid_permissions(ns, iter.task, 2))
2830 __filldir = filldir; 2830 __filldir = filldir;
2831 else 2831 else
2832 __filldir = fake_filldir; 2832 __filldir = fake_filldir;
2833 2833
2834 filp->f_pos = iter.tgid + TGID_OFFSET; 2834 filp->f_pos = iter.tgid + TGID_OFFSET;
2835 if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { 2835 if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
2836 put_task_struct(iter.task); 2836 put_task_struct(iter.task);
2837 goto out; 2837 goto out;
2838 } 2838 }
2839 } 2839 }
2840 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; 2840 filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET;
2841 out: 2841 out:
2842 return 0; 2842 return 0;
2843 } 2843 }
2844 2844
2845 /* 2845 /*
2846 * Tasks 2846 * Tasks
2847 */ 2847 */
2848 static const struct pid_entry tid_base_stuff[] = { 2848 static const struct pid_entry tid_base_stuff[] = {
2849 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 2849 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2850 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 2850 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2851 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), 2851 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
2852 REG("environ", S_IRUSR, proc_environ_operations), 2852 REG("environ", S_IRUSR, proc_environ_operations),
2853 INF("auxv", S_IRUSR, proc_pid_auxv), 2853 INF("auxv", S_IRUSR, proc_pid_auxv),
2854 ONE("status", S_IRUGO, proc_pid_status), 2854 ONE("status", S_IRUGO, proc_pid_status),
2855 ONE("personality", S_IRUGO, proc_pid_personality), 2855 ONE("personality", S_IRUGO, proc_pid_personality),
2856 INF("limits", S_IRUGO, proc_pid_limits), 2856 INF("limits", S_IRUGO, proc_pid_limits),
2857 #ifdef CONFIG_SCHED_DEBUG 2857 #ifdef CONFIG_SCHED_DEBUG
2858 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2858 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
2859 #endif 2859 #endif
2860 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 2860 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2861 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2861 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2862 INF("syscall", S_IRUGO, proc_pid_syscall), 2862 INF("syscall", S_IRUGO, proc_pid_syscall),
2863 #endif 2863 #endif
2864 INF("cmdline", S_IRUGO, proc_pid_cmdline), 2864 INF("cmdline", S_IRUGO, proc_pid_cmdline),
2865 ONE("stat", S_IRUGO, proc_tid_stat), 2865 ONE("stat", S_IRUGO, proc_tid_stat),
2866 ONE("statm", S_IRUGO, proc_pid_statm), 2866 ONE("statm", S_IRUGO, proc_pid_statm),
2867 REG("maps", S_IRUGO, proc_tid_maps_operations), 2867 REG("maps", S_IRUGO, proc_tid_maps_operations),
2868 #ifdef CONFIG_CHECKPOINT_RESTORE 2868 #ifdef CONFIG_CHECKPOINT_RESTORE
2869 REG("children", S_IRUGO, proc_tid_children_operations), 2869 REG("children", S_IRUGO, proc_tid_children_operations),
2870 #endif 2870 #endif
2871 #ifdef CONFIG_NUMA 2871 #ifdef CONFIG_NUMA
2872 REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), 2872 REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
2873 #endif 2873 #endif
2874 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), 2874 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
2875 LNK("cwd", proc_cwd_link), 2875 LNK("cwd", proc_cwd_link),
2876 LNK("root", proc_root_link), 2876 LNK("root", proc_root_link),
2877 LNK("exe", proc_exe_link), 2877 LNK("exe", proc_exe_link),
2878 REG("mounts", S_IRUGO, proc_mounts_operations), 2878 REG("mounts", S_IRUGO, proc_mounts_operations),
2879 REG("mountinfo", S_IRUGO, proc_mountinfo_operations), 2879 REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
2880 #ifdef CONFIG_PROC_PAGE_MONITOR 2880 #ifdef CONFIG_PROC_PAGE_MONITOR
2881 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 2881 REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
2882 REG("smaps", S_IRUGO, proc_tid_smaps_operations), 2882 REG("smaps", S_IRUGO, proc_tid_smaps_operations),
2883 REG("pagemap", S_IRUGO, proc_pagemap_operations), 2883 REG("pagemap", S_IRUGO, proc_pagemap_operations),
2884 #endif 2884 #endif
2885 #ifdef CONFIG_SECURITY 2885 #ifdef CONFIG_SECURITY
2886 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 2886 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
2887 #endif 2887 #endif
2888 #ifdef CONFIG_KALLSYMS 2888 #ifdef CONFIG_KALLSYMS
2889 INF("wchan", S_IRUGO, proc_pid_wchan), 2889 INF("wchan", S_IRUGO, proc_pid_wchan),
2890 #endif 2890 #endif
2891 #ifdef CONFIG_STACKTRACE 2891 #ifdef CONFIG_STACKTRACE
2892 ONE("stack", S_IRUGO, proc_pid_stack), 2892 ONE("stack", S_IRUGO, proc_pid_stack),
2893 #endif 2893 #endif
2894 #ifdef CONFIG_SCHEDSTATS 2894 #ifdef CONFIG_SCHEDSTATS
2895 INF("schedstat", S_IRUGO, proc_pid_schedstat), 2895 INF("schedstat", S_IRUGO, proc_pid_schedstat),
2896 #endif 2896 #endif
2897 #ifdef CONFIG_LATENCYTOP 2897 #ifdef CONFIG_LATENCYTOP
2898 REG("latency", S_IRUGO, proc_lstats_operations), 2898 REG("latency", S_IRUGO, proc_lstats_operations),
2899 #endif 2899 #endif
2900 #ifdef CONFIG_PROC_PID_CPUSET 2900 #ifdef CONFIG_PROC_PID_CPUSET
2901 REG("cpuset", S_IRUGO, proc_cpuset_operations), 2901 REG("cpuset", S_IRUGO, proc_cpuset_operations),
2902 #endif 2902 #endif
2903 #ifdef CONFIG_CGROUPS 2903 #ifdef CONFIG_CGROUPS
2904 REG("cgroup", S_IRUGO, proc_cgroup_operations), 2904 REG("cgroup", S_IRUGO, proc_cgroup_operations),
2905 #endif 2905 #endif
2906 INF("oom_score", S_IRUGO, proc_oom_score), 2906 INF("oom_score", S_IRUGO, proc_oom_score),
2907 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), 2907 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
2908 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 2908 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
2909 #ifdef CONFIG_AUDITSYSCALL 2909 #ifdef CONFIG_AUDITSYSCALL
2910 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 2910 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
2911 REG("sessionid", S_IRUGO, proc_sessionid_operations), 2911 REG("sessionid", S_IRUGO, proc_sessionid_operations),
2912 #endif 2912 #endif
2913 #ifdef CONFIG_FAULT_INJECTION 2913 #ifdef CONFIG_FAULT_INJECTION
2914 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 2914 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2915 #endif 2915 #endif
2916 #ifdef CONFIG_TASK_IO_ACCOUNTING 2916 #ifdef CONFIG_TASK_IO_ACCOUNTING
2917 INF("io", S_IRUSR, proc_tid_io_accounting), 2917 INF("io", S_IRUSR, proc_tid_io_accounting),
2918 #endif 2918 #endif
2919 #ifdef CONFIG_HARDWALL 2919 #ifdef CONFIG_HARDWALL
2920 INF("hardwall", S_IRUGO, proc_pid_hardwall), 2920 INF("hardwall", S_IRUGO, proc_pid_hardwall),
2921 #endif 2921 #endif
2922 #ifdef CONFIG_USER_NS 2922 #ifdef CONFIG_USER_NS
2923 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 2923 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
2924 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 2924 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
2925 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 2925 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
2926 #endif 2926 #endif
2927 }; 2927 };
2928 2928
2929 static int proc_tid_base_readdir(struct file * filp, 2929 static int proc_tid_base_readdir(struct file * filp,
2930 void * dirent, filldir_t filldir) 2930 void * dirent, filldir_t filldir)
2931 { 2931 {
2932 return proc_pident_readdir(filp,dirent,filldir, 2932 return proc_pident_readdir(filp,dirent,filldir,
2933 tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); 2933 tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
2934 } 2934 }
2935 2935
2936 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 2936 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2937 { 2937 {
2938 return proc_pident_lookup(dir, dentry, 2938 return proc_pident_lookup(dir, dentry,
2939 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 2939 tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
2940 } 2940 }
2941 2941
2942 static const struct file_operations proc_tid_base_operations = { 2942 static const struct file_operations proc_tid_base_operations = {
2943 .read = generic_read_dir, 2943 .read = generic_read_dir,
2944 .readdir = proc_tid_base_readdir, 2944 .readdir = proc_tid_base_readdir,
2945 .llseek = default_llseek, 2945 .llseek = default_llseek,
2946 }; 2946 };
2947 2947
2948 static const struct inode_operations proc_tid_base_inode_operations = { 2948 static const struct inode_operations proc_tid_base_inode_operations = {
2949 .lookup = proc_tid_base_lookup, 2949 .lookup = proc_tid_base_lookup,
2950 .getattr = pid_getattr, 2950 .getattr = pid_getattr,
2951 .setattr = proc_setattr, 2951 .setattr = proc_setattr,
2952 }; 2952 };
2953 2953
2954 static struct dentry *proc_task_instantiate(struct inode *dir, 2954 static struct dentry *proc_task_instantiate(struct inode *dir,
2955 struct dentry *dentry, struct task_struct *task, const void *ptr) 2955 struct dentry *dentry, struct task_struct *task, const void *ptr)
2956 { 2956 {
2957 struct dentry *error = ERR_PTR(-ENOENT); 2957 struct dentry *error = ERR_PTR(-ENOENT);
2958 struct inode *inode; 2958 struct inode *inode;
2959 inode = proc_pid_make_inode(dir->i_sb, task); 2959 inode = proc_pid_make_inode(dir->i_sb, task);
2960 2960
2961 if (!inode) 2961 if (!inode)
2962 goto out; 2962 goto out;
2963 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; 2963 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
2964 inode->i_op = &proc_tid_base_inode_operations; 2964 inode->i_op = &proc_tid_base_inode_operations;
2965 inode->i_fop = &proc_tid_base_operations; 2965 inode->i_fop = &proc_tid_base_operations;
2966 inode->i_flags|=S_IMMUTABLE; 2966 inode->i_flags|=S_IMMUTABLE;
2967 2967
2968 set_nlink(inode, 2 + pid_entry_count_dirs(tid_base_stuff, 2968 set_nlink(inode, 2 + pid_entry_count_dirs(tid_base_stuff,
2969 ARRAY_SIZE(tid_base_stuff))); 2969 ARRAY_SIZE(tid_base_stuff)));
2970 2970
2971 d_set_d_op(dentry, &pid_dentry_operations); 2971 d_set_d_op(dentry, &pid_dentry_operations);
2972 2972
2973 d_add(dentry, inode); 2973 d_add(dentry, inode);
2974 /* Close the race of the process dying before we return the dentry */ 2974 /* Close the race of the process dying before we return the dentry */
2975 if (pid_revalidate(dentry, 0)) 2975 if (pid_revalidate(dentry, 0))
2976 error = NULL; 2976 error = NULL;
2977 out: 2977 out:
2978 return error; 2978 return error;
2979 } 2979 }
2980 2980
2981 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) 2981 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
2982 { 2982 {
2983 struct dentry *result = ERR_PTR(-ENOENT); 2983 struct dentry *result = ERR_PTR(-ENOENT);
2984 struct task_struct *task; 2984 struct task_struct *task;
2985 struct task_struct *leader = get_proc_task(dir); 2985 struct task_struct *leader = get_proc_task(dir);
2986 unsigned tid; 2986 unsigned tid;
2987 struct pid_namespace *ns; 2987 struct pid_namespace *ns;
2988 2988
2989 if (!leader) 2989 if (!leader)
2990 goto out_no_task; 2990 goto out_no_task;
2991 2991
2992 tid = name_to_int(dentry); 2992 tid = name_to_int(dentry);
2993 if (tid == ~0U) 2993 if (tid == ~0U)
2994 goto out; 2994 goto out;
2995 2995
2996 ns = dentry->d_sb->s_fs_info; 2996 ns = dentry->d_sb->s_fs_info;
2997 rcu_read_lock(); 2997 rcu_read_lock();
2998 task = find_task_by_pid_ns(tid, ns); 2998 task = find_task_by_pid_ns(tid, ns);
2999 if (task) 2999 if (task)
3000 get_task_struct(task); 3000 get_task_struct(task);
3001 rcu_read_unlock(); 3001 rcu_read_unlock();
3002 if (!task) 3002 if (!task)
3003 goto out; 3003 goto out;
3004 if (!same_thread_group(leader, task)) 3004 if (!same_thread_group(leader, task))
3005 goto out_drop_task; 3005 goto out_drop_task;
3006 3006
3007 result = proc_task_instantiate(dir, dentry, task, NULL); 3007 result = proc_task_instantiate(dir, dentry, task, NULL);
3008 out_drop_task: 3008 out_drop_task:
3009 put_task_struct(task); 3009 put_task_struct(task);
3010 out: 3010 out:
3011 put_task_struct(leader); 3011 put_task_struct(leader);
3012 out_no_task: 3012 out_no_task:
3013 return result; 3013 return result;
3014 } 3014 }
3015 3015
3016 /* 3016 /*
3017 * Find the first tid of a thread group to return to user space. 3017 * Find the first tid of a thread group to return to user space.
3018 * 3018 *
3019 * Usually this is just the thread group leader, but if the users 3019 * Usually this is just the thread group leader, but if the users
3020 * buffer was too small or there was a seek into the middle of the 3020 * buffer was too small or there was a seek into the middle of the
3021 * directory we have more work todo. 3021 * directory we have more work todo.
3022 * 3022 *
3023 * In the case of a short read we start with find_task_by_pid. 3023 * In the case of a short read we start with find_task_by_pid.
3024 * 3024 *
3025 * In the case of a seek we start with the leader and walk nr 3025 * In the case of a seek we start with the leader and walk nr
3026 * threads past it. 3026 * threads past it.
3027 */ 3027 */
3028 static struct task_struct *first_tid(struct task_struct *leader, 3028 static struct task_struct *first_tid(struct task_struct *leader,
3029 int tid, int nr, struct pid_namespace *ns) 3029 int tid, int nr, struct pid_namespace *ns)
3030 { 3030 {
3031 struct task_struct *pos; 3031 struct task_struct *pos;
3032 3032
3033 rcu_read_lock(); 3033 rcu_read_lock();
3034 /* Attempt to start with the pid of a thread */ 3034 /* Attempt to start with the pid of a thread */
3035 if (tid && (nr > 0)) { 3035 if (tid && (nr > 0)) {
3036 pos = find_task_by_pid_ns(tid, ns); 3036 pos = find_task_by_pid_ns(tid, ns);
3037 if (pos && (pos->group_leader == leader)) 3037 if (pos && (pos->group_leader == leader))
3038 goto found; 3038 goto found;
3039 } 3039 }
3040 3040
3041 /* If nr exceeds the number of threads there is nothing todo */ 3041 /* If nr exceeds the number of threads there is nothing todo */
3042 pos = NULL; 3042 pos = NULL;
3043 if (nr && nr >= get_nr_threads(leader)) 3043 if (nr && nr >= get_nr_threads(leader))
3044 goto out; 3044 goto out;
3045 3045
3046 /* If we haven't found our starting place yet start 3046 /* If we haven't found our starting place yet start
3047 * with the leader and walk nr threads forward. 3047 * with the leader and walk nr threads forward.
3048 */ 3048 */
3049 for (pos = leader; nr > 0; --nr) { 3049 for (pos = leader; nr > 0; --nr) {
3050 pos = next_thread(pos); 3050 pos = next_thread(pos);
3051 if (pos == leader) { 3051 if (pos == leader) {
3052 pos = NULL; 3052 pos = NULL;
3053 goto out; 3053 goto out;
3054 } 3054 }
3055 } 3055 }
3056 found: 3056 found:
3057 get_task_struct(pos); 3057 get_task_struct(pos);
3058 out: 3058 out:
3059 rcu_read_unlock(); 3059 rcu_read_unlock();
3060 return pos; 3060 return pos;
3061 } 3061 }
3062 3062
3063 /* 3063 /*
3064 * Find the next thread in the thread list. 3064 * Find the next thread in the thread list.
3065 * Return NULL if there is an error or no next thread. 3065 * Return NULL if there is an error or no next thread.
3066 * 3066 *
3067 * The reference to the input task_struct is released. 3067 * The reference to the input task_struct is released.
3068 */ 3068 */
3069 static struct task_struct *next_tid(struct task_struct *start) 3069 static struct task_struct *next_tid(struct task_struct *start)
3070 { 3070 {
3071 struct task_struct *pos = NULL; 3071 struct task_struct *pos = NULL;
3072 rcu_read_lock(); 3072 rcu_read_lock();
3073 if (pid_alive(start)) { 3073 if (pid_alive(start)) {
3074 pos = next_thread(start); 3074 pos = next_thread(start);
3075 if (thread_group_leader(pos)) 3075 if (thread_group_leader(pos))
3076 pos = NULL; 3076 pos = NULL;
3077 else 3077 else
3078 get_task_struct(pos); 3078 get_task_struct(pos);
3079 } 3079 }
3080 rcu_read_unlock(); 3080 rcu_read_unlock();
3081 put_task_struct(start); 3081 put_task_struct(start);
3082 return pos; 3082 return pos;
3083 } 3083 }
3084 3084
3085 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 3085 static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
3086 struct task_struct *task, int tid) 3086 struct task_struct *task, int tid)
3087 { 3087 {
3088 char name[PROC_NUMBUF]; 3088 char name[PROC_NUMBUF];
3089 int len = snprintf(name, sizeof(name), "%d", tid); 3089 int len = snprintf(name, sizeof(name), "%d", tid);
3090 return proc_fill_cache(filp, dirent, filldir, name, len, 3090 return proc_fill_cache(filp, dirent, filldir, name, len,
3091 proc_task_instantiate, task, NULL); 3091 proc_task_instantiate, task, NULL);
3092 } 3092 }
3093 3093
3094 /* for the /proc/TGID/task/ directories */ 3094 /* for the /proc/TGID/task/ directories */
3095 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) 3095 static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir)
3096 { 3096 {
3097 struct dentry *dentry = filp->f_path.dentry; 3097 struct dentry *dentry = filp->f_path.dentry;
3098 struct inode *inode = dentry->d_inode; 3098 struct inode *inode = dentry->d_inode;
3099 struct task_struct *leader = NULL; 3099 struct task_struct *leader = NULL;
3100 struct task_struct *task; 3100 struct task_struct *task;
3101 int retval = -ENOENT; 3101 int retval = -ENOENT;
3102 ino_t ino; 3102 ino_t ino;
3103 int tid; 3103 int tid;
3104 struct pid_namespace *ns; 3104 struct pid_namespace *ns;
3105 3105
3106 task = get_proc_task(inode); 3106 task = get_proc_task(inode);
3107 if (!task) 3107 if (!task)
3108 goto out_no_task; 3108 goto out_no_task;
3109 rcu_read_lock(); 3109 rcu_read_lock();
3110 if (pid_alive(task)) { 3110 if (pid_alive(task)) {
3111 leader = task->group_leader; 3111 leader = task->group_leader;
3112 get_task_struct(leader); 3112 get_task_struct(leader);
3113 } 3113 }
3114 rcu_read_unlock(); 3114 rcu_read_unlock();
3115 put_task_struct(task); 3115 put_task_struct(task);
3116 if (!leader) 3116 if (!leader)
3117 goto out_no_task; 3117 goto out_no_task;
3118 retval = 0; 3118 retval = 0;
3119 3119
3120 switch ((unsigned long)filp->f_pos) { 3120 switch ((unsigned long)filp->f_pos) {
3121 case 0: 3121 case 0:
3122 ino = inode->i_ino; 3122 ino = inode->i_ino;
3123 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0) 3123 if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0)
3124 goto out; 3124 goto out;
3125 filp->f_pos++; 3125 filp->f_pos++;
3126 /* fall through */ 3126 /* fall through */
3127 case 1: 3127 case 1:
3128 ino = parent_ino(dentry); 3128 ino = parent_ino(dentry);
3129 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0) 3129 if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0)
3130 goto out; 3130 goto out;
3131 filp->f_pos++; 3131 filp->f_pos++;
3132 /* fall through */ 3132 /* fall through */
3133 } 3133 }
3134 3134
3135 /* f_version caches the tgid value that the last readdir call couldn't 3135 /* f_version caches the tgid value that the last readdir call couldn't
3136 * return. lseek aka telldir automagically resets f_version to 0. 3136 * return. lseek aka telldir automagically resets f_version to 0.
3137 */ 3137 */
3138 ns = filp->f_dentry->d_sb->s_fs_info; 3138 ns = filp->f_dentry->d_sb->s_fs_info;
3139 tid = (int)filp->f_version; 3139 tid = (int)filp->f_version;
3140 filp->f_version = 0; 3140 filp->f_version = 0;
3141 for (task = first_tid(leader, tid, filp->f_pos - 2, ns); 3141 for (task = first_tid(leader, tid, filp->f_pos - 2, ns);
3142 task; 3142 task;
3143 task = next_tid(task), filp->f_pos++) { 3143 task = next_tid(task), filp->f_pos++) {
3144 tid = task_pid_nr_ns(task, ns); 3144 tid = task_pid_nr_ns(task, ns);
3145 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { 3145 if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) {
3146 /* returning this tgid failed, save it as the first 3146 /* returning this tgid failed, save it as the first
3147 * pid for the next readir call */ 3147 * pid for the next readir call */
3148 filp->f_version = (u64)tid; 3148 filp->f_version = (u64)tid;
3149 put_task_struct(task); 3149 put_task_struct(task);
3150 break; 3150 break;
3151 } 3151 }
3152 } 3152 }
3153 out: 3153 out:
3154 put_task_struct(leader); 3154 put_task_struct(leader);
3155 out_no_task: 3155 out_no_task:
3156 return retval; 3156 return retval;
3157 } 3157 }
3158 3158
3159 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 3159 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
3160 { 3160 {
3161 struct inode *inode = dentry->d_inode; 3161 struct inode *inode = dentry->d_inode;
3162 struct task_struct *p = get_proc_task(inode); 3162 struct task_struct *p = get_proc_task(inode);
3163 generic_fillattr(inode, stat); 3163 generic_fillattr(inode, stat);
3164 3164
3165 if (p) { 3165 if (p) {
3166 stat->nlink += get_nr_threads(p); 3166 stat->nlink += get_nr_threads(p);
3167 put_task_struct(p); 3167 put_task_struct(p);
3168 } 3168 }
3169 3169
3170 return 0; 3170 return 0;
3171 } 3171 }
3172 3172
3173 static const struct inode_operations proc_task_inode_operations = { 3173 static const struct inode_operations proc_task_inode_operations = {
3174 .lookup = proc_task_lookup, 3174 .lookup = proc_task_lookup,
3175 .getattr = proc_task_getattr, 3175 .getattr = proc_task_getattr,
3176 .setattr = proc_setattr, 3176 .setattr = proc_setattr,
3177 .permission = proc_pid_permission, 3177 .permission = proc_pid_permission,
3178 }; 3178 };
3179 3179
3180 static const struct file_operations proc_task_operations = { 3180 static const struct file_operations proc_task_operations = {
3181 .read = generic_read_dir, 3181 .read = generic_read_dir,
3182 .readdir = proc_task_readdir, 3182 .readdir = proc_task_readdir,
3183 .llseek = default_llseek, 3183 .llseek = default_llseek,
3184 }; 3184 };
3185 3185
1 /* 1 /*
2 * file.c 2 * file.c
3 * 3 *
4 * PURPOSE 4 * PURPOSE
5 * File handling routines for the OSTA-UDF(tm) filesystem. 5 * File handling routines for the OSTA-UDF(tm) filesystem.
6 * 6 *
7 * COPYRIGHT 7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public 8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from: 9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL 10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work. 11 * Each contributing author retains all rights to their own work.
12 * 12 *
13 * (C) 1998-1999 Dave Boynton 13 * (C) 1998-1999 Dave Boynton
14 * (C) 1998-2004 Ben Fennema 14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc 15 * (C) 1999-2000 Stelias Computing Inc
16 * 16 *
17 * HISTORY 17 * HISTORY
18 * 18 *
19 * 10/02/98 dgb Attempt to integrate into udf.o 19 * 10/02/98 dgb Attempt to integrate into udf.o
20 * 10/07/98 Switched to using generic_readpage, etc., like isofs 20 * 10/07/98 Switched to using generic_readpage, etc., like isofs
21 * And it works! 21 * And it works!
22 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but 22 * 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
23 * ICBTAG_FLAG_AD_IN_ICB. 23 * ICBTAG_FLAG_AD_IN_ICB.
24 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c 24 * 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
25 * 05/12/99 Preliminary file write support 25 * 05/12/99 Preliminary file write support
26 */ 26 */
27 27
28 #include "udfdecl.h" 28 #include "udfdecl.h"
29 #include <linux/fs.h> 29 #include <linux/fs.h>
30 #include <asm/uaccess.h> 30 #include <asm/uaccess.h>
31 #include <linux/kernel.h> 31 #include <linux/kernel.h>
32 #include <linux/string.h> /* memset */ 32 #include <linux/string.h> /* memset */
33 #include <linux/capability.h> 33 #include <linux/capability.h>
34 #include <linux/errno.h> 34 #include <linux/errno.h>
35 #include <linux/pagemap.h> 35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h> 36 #include <linux/buffer_head.h>
37 #include <linux/aio.h> 37 #include <linux/aio.h>
38 38
39 #include "udf_i.h" 39 #include "udf_i.h"
40 #include "udf_sb.h" 40 #include "udf_sb.h"
41 41
42 static void __udf_adinicb_readpage(struct page *page) 42 static void __udf_adinicb_readpage(struct page *page)
43 { 43 {
44 struct inode *inode = page->mapping->host; 44 struct inode *inode = page->mapping->host;
45 char *kaddr; 45 char *kaddr;
46 struct udf_inode_info *iinfo = UDF_I(inode); 46 struct udf_inode_info *iinfo = UDF_I(inode);
47 47
48 kaddr = kmap(page); 48 kaddr = kmap(page);
49 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); 49 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
50 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); 50 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
51 flush_dcache_page(page); 51 flush_dcache_page(page);
52 SetPageUptodate(page); 52 SetPageUptodate(page);
53 kunmap(page); 53 kunmap(page);
54 } 54 }
55 55
56 static int udf_adinicb_readpage(struct file *file, struct page *page) 56 static int udf_adinicb_readpage(struct file *file, struct page *page)
57 { 57 {
58 BUG_ON(!PageLocked(page)); 58 BUG_ON(!PageLocked(page));
59 __udf_adinicb_readpage(page); 59 __udf_adinicb_readpage(page);
60 unlock_page(page); 60 unlock_page(page);
61 61
62 return 0; 62 return 0;
63 } 63 }
64 64
65 static int udf_adinicb_writepage(struct page *page, 65 static int udf_adinicb_writepage(struct page *page,
66 struct writeback_control *wbc) 66 struct writeback_control *wbc)
67 { 67 {
68 struct inode *inode = page->mapping->host; 68 struct inode *inode = page->mapping->host;
69 char *kaddr; 69 char *kaddr;
70 struct udf_inode_info *iinfo = UDF_I(inode); 70 struct udf_inode_info *iinfo = UDF_I(inode);
71 71
72 BUG_ON(!PageLocked(page)); 72 BUG_ON(!PageLocked(page));
73 73
74 kaddr = kmap(page); 74 kaddr = kmap(page);
75 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); 75 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size);
76 mark_inode_dirty(inode); 76 mark_inode_dirty(inode);
77 SetPageUptodate(page); 77 SetPageUptodate(page);
78 kunmap(page); 78 kunmap(page);
79 unlock_page(page); 79 unlock_page(page);
80 80
81 return 0; 81 return 0;
82 } 82 }
83 83
84 static int udf_adinicb_write_begin(struct file *file, 84 static int udf_adinicb_write_begin(struct file *file,
85 struct address_space *mapping, loff_t pos, 85 struct address_space *mapping, loff_t pos,
86 unsigned len, unsigned flags, struct page **pagep, 86 unsigned len, unsigned flags, struct page **pagep,
87 void **fsdata) 87 void **fsdata)
88 { 88 {
89 struct page *page; 89 struct page *page;
90 90
91 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) 91 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
92 return -EIO; 92 return -EIO;
93 page = grab_cache_page_write_begin(mapping, 0, flags); 93 page = grab_cache_page_write_begin(mapping, 0, flags);
94 if (!page) 94 if (!page)
95 return -ENOMEM; 95 return -ENOMEM;
96 *pagep = page; 96 *pagep = page;
97 97
98 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) 98 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
99 __udf_adinicb_readpage(page); 99 __udf_adinicb_readpage(page);
100 return 0; 100 return 0;
101 } 101 }
102 102
103 static int udf_adinicb_write_end(struct file *file, 103 static int udf_adinicb_write_end(struct file *file,
104 struct address_space *mapping, 104 struct address_space *mapping,
105 loff_t pos, unsigned len, unsigned copied, 105 loff_t pos, unsigned len, unsigned copied,
106 struct page *page, void *fsdata) 106 struct page *page, void *fsdata)
107 { 107 {
108 struct inode *inode = mapping->host; 108 struct inode *inode = mapping->host;
109 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 109 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
110 char *kaddr; 110 char *kaddr;
111 struct udf_inode_info *iinfo = UDF_I(inode); 111 struct udf_inode_info *iinfo = UDF_I(inode);
112 112
113 kaddr = kmap_atomic(page); 113 kaddr = kmap_atomic(page);
114 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, 114 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
115 kaddr + offset, copied); 115 kaddr + offset, copied);
116 kunmap_atomic(kaddr); 116 kunmap_atomic(kaddr);
117 117
118 return simple_write_end(file, mapping, pos, len, copied, page, fsdata); 118 return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
119 } 119 }
120 120
121 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, 121 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
122 const struct iovec *iov, 122 const struct iovec *iov,
123 loff_t offset, unsigned long nr_segs) 123 loff_t offset, unsigned long nr_segs)
124 { 124 {
125 /* Fallback to buffered I/O. */ 125 /* Fallback to buffered I/O. */
126 return 0; 126 return 0;
127 } 127 }
128 128
129 const struct address_space_operations udf_adinicb_aops = { 129 const struct address_space_operations udf_adinicb_aops = {
130 .readpage = udf_adinicb_readpage, 130 .readpage = udf_adinicb_readpage,
131 .writepage = udf_adinicb_writepage, 131 .writepage = udf_adinicb_writepage,
132 .write_begin = udf_adinicb_write_begin, 132 .write_begin = udf_adinicb_write_begin,
133 .write_end = udf_adinicb_write_end, 133 .write_end = udf_adinicb_write_end,
134 .direct_IO = udf_adinicb_direct_IO, 134 .direct_IO = udf_adinicb_direct_IO,
135 }; 135 };
136 136
137 static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 137 static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
138 unsigned long nr_segs, loff_t ppos) 138 unsigned long nr_segs, loff_t ppos)
139 { 139 {
140 ssize_t retval; 140 ssize_t retval;
141 struct file *file = iocb->ki_filp; 141 struct file *file = iocb->ki_filp;
142 struct inode *inode = file_inode(file); 142 struct inode *inode = file_inode(file);
143 int err, pos; 143 int err, pos;
144 size_t count = iocb->ki_left; 144 size_t count = iocb->ki_left;
145 struct udf_inode_info *iinfo = UDF_I(inode); 145 struct udf_inode_info *iinfo = UDF_I(inode);
146 146
147 down_write(&iinfo->i_data_sem); 147 down_write(&iinfo->i_data_sem);
148 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 148 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
149 if (file->f_flags & O_APPEND) 149 if (file->f_flags & O_APPEND)
150 pos = inode->i_size; 150 pos = inode->i_size;
151 else 151 else
152 pos = ppos; 152 pos = ppos;
153 153
154 if (inode->i_sb->s_blocksize < 154 if (inode->i_sb->s_blocksize <
155 (udf_file_entry_alloc_offset(inode) + 155 (udf_file_entry_alloc_offset(inode) +
156 pos + count)) { 156 pos + count)) {
157 err = udf_expand_file_adinicb(inode); 157 err = udf_expand_file_adinicb(inode);
158 if (err) { 158 if (err) {
159 udf_debug("udf_expand_adinicb: err=%d\n", err); 159 udf_debug("udf_expand_adinicb: err=%d\n", err);
160 return err; 160 return err;
161 } 161 }
162 } else { 162 } else {
163 if (pos + count > inode->i_size) 163 if (pos + count > inode->i_size)
164 iinfo->i_lenAlloc = pos + count; 164 iinfo->i_lenAlloc = pos + count;
165 else 165 else
166 iinfo->i_lenAlloc = inode->i_size; 166 iinfo->i_lenAlloc = inode->i_size;
167 up_write(&iinfo->i_data_sem); 167 up_write(&iinfo->i_data_sem);
168 } 168 }
169 } else 169 } else
170 up_write(&iinfo->i_data_sem); 170 up_write(&iinfo->i_data_sem);
171 171
172 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); 172 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
173 if (retval > 0) 173 if (retval > 0)
174 mark_inode_dirty(inode); 174 mark_inode_dirty(inode);
175 175
176 return retval; 176 return retval;
177 } 177 }
178 178
179 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 179 long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
180 { 180 {
181 struct inode *inode = file_inode(filp); 181 struct inode *inode = file_inode(filp);
182 long old_block, new_block; 182 long old_block, new_block;
183 int result = -EINVAL; 183 int result = -EINVAL;
184 184
185 if (inode_permission(inode, MAY_READ) != 0) { 185 if (inode_permission(inode, MAY_READ) != 0) {
186 udf_debug("no permission to access inode %lu\n", inode->i_ino); 186 udf_debug("no permission to access inode %lu\n", inode->i_ino);
187 result = -EPERM; 187 result = -EPERM;
188 goto out; 188 goto out;
189 } 189 }
190 190
191 if (!arg) { 191 if (!arg) {
192 udf_debug("invalid argument to udf_ioctl\n"); 192 udf_debug("invalid argument to udf_ioctl\n");
193 result = -EINVAL; 193 result = -EINVAL;
194 goto out; 194 goto out;
195 } 195 }
196 196
197 switch (cmd) { 197 switch (cmd) {
198 case UDF_GETVOLIDENT: 198 case UDF_GETVOLIDENT:
199 if (copy_to_user((char __user *)arg, 199 if (copy_to_user((char __user *)arg,
200 UDF_SB(inode->i_sb)->s_volume_ident, 32)) 200 UDF_SB(inode->i_sb)->s_volume_ident, 32))
201 result = -EFAULT; 201 result = -EFAULT;
202 else 202 else
203 result = 0; 203 result = 0;
204 goto out; 204 goto out;
205 case UDF_RELOCATE_BLOCKS: 205 case UDF_RELOCATE_BLOCKS:
206 if (!capable(CAP_SYS_ADMIN)) { 206 if (!capable(CAP_SYS_ADMIN)) {
207 result = -EACCES; 207 result = -EPERM;
208 goto out; 208 goto out;
209 } 209 }
210 if (get_user(old_block, (long __user *)arg)) { 210 if (get_user(old_block, (long __user *)arg)) {
211 result = -EFAULT; 211 result = -EFAULT;
212 goto out; 212 goto out;
213 } 213 }
214 result = udf_relocate_blocks(inode->i_sb, 214 result = udf_relocate_blocks(inode->i_sb,
215 old_block, &new_block); 215 old_block, &new_block);
216 if (result == 0) 216 if (result == 0)
217 result = put_user(new_block, (long __user *)arg); 217 result = put_user(new_block, (long __user *)arg);
218 goto out; 218 goto out;
219 case UDF_GETEASIZE: 219 case UDF_GETEASIZE:
220 result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); 220 result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
221 goto out; 221 goto out;
222 case UDF_GETEABLOCK: 222 case UDF_GETEABLOCK:
223 result = copy_to_user((char __user *)arg, 223 result = copy_to_user((char __user *)arg,
224 UDF_I(inode)->i_ext.i_data, 224 UDF_I(inode)->i_ext.i_data,
225 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; 225 UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
226 goto out; 226 goto out;
227 } 227 }
228 228
229 out: 229 out:
230 return result; 230 return result;
231 } 231 }
232 232
233 static int udf_release_file(struct inode *inode, struct file *filp) 233 static int udf_release_file(struct inode *inode, struct file *filp)
234 { 234 {
235 if (filp->f_mode & FMODE_WRITE) { 235 if (filp->f_mode & FMODE_WRITE) {
236 down_write(&UDF_I(inode)->i_data_sem); 236 down_write(&UDF_I(inode)->i_data_sem);
237 udf_discard_prealloc(inode); 237 udf_discard_prealloc(inode);
238 udf_truncate_tail_extent(inode); 238 udf_truncate_tail_extent(inode);
239 up_write(&UDF_I(inode)->i_data_sem); 239 up_write(&UDF_I(inode)->i_data_sem);
240 } 240 }
241 return 0; 241 return 0;
242 } 242 }
243 243
244 const struct file_operations udf_file_operations = { 244 const struct file_operations udf_file_operations = {
245 .read = do_sync_read, 245 .read = do_sync_read,
246 .aio_read = generic_file_aio_read, 246 .aio_read = generic_file_aio_read,
247 .unlocked_ioctl = udf_ioctl, 247 .unlocked_ioctl = udf_ioctl,
248 .open = generic_file_open, 248 .open = generic_file_open,
249 .mmap = generic_file_mmap, 249 .mmap = generic_file_mmap,
250 .write = do_sync_write, 250 .write = do_sync_write,
251 .aio_write = udf_file_aio_write, 251 .aio_write = udf_file_aio_write,
252 .release = udf_release_file, 252 .release = udf_release_file,
253 .fsync = generic_file_fsync, 253 .fsync = generic_file_fsync,
254 .splice_read = generic_file_splice_read, 254 .splice_read = generic_file_splice_read,
255 .llseek = generic_file_llseek, 255 .llseek = generic_file_llseek,
256 }; 256 };
257 257
258 static int udf_setattr(struct dentry *dentry, struct iattr *attr) 258 static int udf_setattr(struct dentry *dentry, struct iattr *attr)
259 { 259 {
260 struct inode *inode = dentry->d_inode; 260 struct inode *inode = dentry->d_inode;
261 int error; 261 int error;
262 262
263 error = inode_change_ok(inode, attr); 263 error = inode_change_ok(inode, attr);
264 if (error) 264 if (error)
265 return error; 265 return error;
266 266
267 if ((attr->ia_valid & ATTR_SIZE) && 267 if ((attr->ia_valid & ATTR_SIZE) &&
268 attr->ia_size != i_size_read(inode)) { 268 attr->ia_size != i_size_read(inode)) {
269 error = udf_setsize(inode, attr->ia_size); 269 error = udf_setsize(inode, attr->ia_size);
270 if (error) 270 if (error)
271 return error; 271 return error;
272 } 272 }
273 273
274 setattr_copy(inode, attr); 274 setattr_copy(inode, attr);
275 mark_inode_dirty(inode); 275 mark_inode_dirty(inode);
276 return 0; 276 return 0;
277 } 277 }
278 278
279 const struct inode_operations udf_file_inode_operations = { 279 const struct inode_operations udf_file_inode_operations = {
280 .setattr = udf_setattr, 280 .setattr = udf_setattr,
281 }; 281 };
282 282