Commit 92562927826fceb2f8e69c89e28161b8c1e0b125
Committed by
James Morris
1 parent
93db628658
Exists in
master
and in
7 other branches
integrity: special fs magic
Discussion on the mailing list questioned the use of these magic values in userspace, concluding these values are already exported to userspace via statfs and their correct/incorrect usage is left up to the userspace application. - Move special fs magic number definitions to magic.h - Add magic.h include Signed-off-by: Mimi Zohar <zohar@us.ibm.com> Reviewed-by: James Morris <jmorris@namei.org> Signed-off-by: James Morris <jmorris@namei.org>
Showing 4 changed files with 7 additions and 7 deletions Inline Diff
fs/debugfs/inode.c
1 | /* | 1 | /* |
2 | * file.c - part of debugfs, a tiny little debug file system | 2 | * file.c - part of debugfs, a tiny little debug file system |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | 4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
5 | * Copyright (C) 2004 IBM Inc. | 5 | * Copyright (C) 2004 IBM Inc. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License version | 8 | * modify it under the terms of the GNU General Public License version |
9 | * 2 as published by the Free Software Foundation. | 9 | * 2 as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * debugfs is for people to use instead of /proc or /sys. | 11 | * debugfs is for people to use instead of /proc or /sys. |
12 | * See Documentation/DocBook/kernel-api for more details. | 12 | * See Documentation/DocBook/kernel-api for more details. |
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /* uncomment to get debug messages from the debug filesystem, ah the irony. */ | 16 | /* uncomment to get debug messages from the debug filesystem, ah the irony. */ |
17 | /* #define DEBUG */ | 17 | /* #define DEBUG */ |
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/mount.h> | 21 | #include <linux/mount.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/kobject.h> | 24 | #include <linux/kobject.h> |
25 | #include <linux/namei.h> | 25 | #include <linux/namei.h> |
26 | #include <linux/debugfs.h> | 26 | #include <linux/debugfs.h> |
27 | #include <linux/fsnotify.h> | 27 | #include <linux/fsnotify.h> |
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | 29 | #include <linux/magic.h> | |
30 | #define DEBUGFS_MAGIC 0x64626720 | ||
31 | 30 | ||
32 | static struct vfsmount *debugfs_mount; | 31 | static struct vfsmount *debugfs_mount; |
33 | static int debugfs_mount_count; | 32 | static int debugfs_mount_count; |
34 | 33 | ||
35 | static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) | 34 | static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) |
36 | { | 35 | { |
37 | struct inode *inode = new_inode(sb); | 36 | struct inode *inode = new_inode(sb); |
38 | 37 | ||
39 | if (inode) { | 38 | if (inode) { |
40 | inode->i_mode = mode; | 39 | inode->i_mode = mode; |
41 | inode->i_uid = 0; | 40 | inode->i_uid = 0; |
42 | inode->i_gid = 0; | 41 | inode->i_gid = 0; |
43 | inode->i_blocks = 0; | 42 | inode->i_blocks = 0; |
44 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 43 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
45 | switch (mode & S_IFMT) { | 44 | switch (mode & S_IFMT) { |
46 | default: | 45 | default: |
47 | init_special_inode(inode, mode, dev); | 46 | init_special_inode(inode, mode, dev); |
48 | break; | 47 | break; |
49 | case S_IFREG: | 48 | case S_IFREG: |
50 | inode->i_fop = &debugfs_file_operations; | 49 | inode->i_fop = &debugfs_file_operations; |
51 | break; | 50 | break; |
52 | case S_IFLNK: | 51 | case S_IFLNK: |
53 | inode->i_op = &debugfs_link_operations; | 52 | inode->i_op = &debugfs_link_operations; |
54 | break; | 53 | break; |
55 | case S_IFDIR: | 54 | case S_IFDIR: |
56 | inode->i_op = &simple_dir_inode_operations; | 55 | inode->i_op = &simple_dir_inode_operations; |
57 | inode->i_fop = &simple_dir_operations; | 56 | inode->i_fop = &simple_dir_operations; |
58 | 57 | ||
59 | /* directory inodes start off with i_nlink == 2 | 58 | /* directory inodes start off with i_nlink == 2 |
60 | * (for "." entry) */ | 59 | * (for "." entry) */ |
61 | inc_nlink(inode); | 60 | inc_nlink(inode); |
62 | break; | 61 | break; |
63 | } | 62 | } |
64 | } | 63 | } |
65 | return inode; | 64 | return inode; |
66 | } | 65 | } |
67 | 66 | ||
68 | /* SMP-safe */ | 67 | /* SMP-safe */ |
69 | static int debugfs_mknod(struct inode *dir, struct dentry *dentry, | 68 | static int debugfs_mknod(struct inode *dir, struct dentry *dentry, |
70 | int mode, dev_t dev) | 69 | int mode, dev_t dev) |
71 | { | 70 | { |
72 | struct inode *inode; | 71 | struct inode *inode; |
73 | int error = -EPERM; | 72 | int error = -EPERM; |
74 | 73 | ||
75 | if (dentry->d_inode) | 74 | if (dentry->d_inode) |
76 | return -EEXIST; | 75 | return -EEXIST; |
77 | 76 | ||
78 | inode = debugfs_get_inode(dir->i_sb, mode, dev); | 77 | inode = debugfs_get_inode(dir->i_sb, mode, dev); |
79 | if (inode) { | 78 | if (inode) { |
80 | d_instantiate(dentry, inode); | 79 | d_instantiate(dentry, inode); |
81 | dget(dentry); | 80 | dget(dentry); |
82 | error = 0; | 81 | error = 0; |
83 | } | 82 | } |
84 | return error; | 83 | return error; |
85 | } | 84 | } |
86 | 85 | ||
87 | static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 86 | static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
88 | { | 87 | { |
89 | int res; | 88 | int res; |
90 | 89 | ||
91 | mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; | 90 | mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; |
92 | res = debugfs_mknod(dir, dentry, mode, 0); | 91 | res = debugfs_mknod(dir, dentry, mode, 0); |
93 | if (!res) { | 92 | if (!res) { |
94 | inc_nlink(dir); | 93 | inc_nlink(dir); |
95 | fsnotify_mkdir(dir, dentry); | 94 | fsnotify_mkdir(dir, dentry); |
96 | } | 95 | } |
97 | return res; | 96 | return res; |
98 | } | 97 | } |
99 | 98 | ||
100 | static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode) | 99 | static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode) |
101 | { | 100 | { |
102 | mode = (mode & S_IALLUGO) | S_IFLNK; | 101 | mode = (mode & S_IALLUGO) | S_IFLNK; |
103 | return debugfs_mknod(dir, dentry, mode, 0); | 102 | return debugfs_mknod(dir, dentry, mode, 0); |
104 | } | 103 | } |
105 | 104 | ||
106 | static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode) | 105 | static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode) |
107 | { | 106 | { |
108 | int res; | 107 | int res; |
109 | 108 | ||
110 | mode = (mode & S_IALLUGO) | S_IFREG; | 109 | mode = (mode & S_IALLUGO) | S_IFREG; |
111 | res = debugfs_mknod(dir, dentry, mode, 0); | 110 | res = debugfs_mknod(dir, dentry, mode, 0); |
112 | if (!res) | 111 | if (!res) |
113 | fsnotify_create(dir, dentry); | 112 | fsnotify_create(dir, dentry); |
114 | return res; | 113 | return res; |
115 | } | 114 | } |
116 | 115 | ||
117 | static inline int debugfs_positive(struct dentry *dentry) | 116 | static inline int debugfs_positive(struct dentry *dentry) |
118 | { | 117 | { |
119 | return dentry->d_inode && !d_unhashed(dentry); | 118 | return dentry->d_inode && !d_unhashed(dentry); |
120 | } | 119 | } |
121 | 120 | ||
122 | static int debug_fill_super(struct super_block *sb, void *data, int silent) | 121 | static int debug_fill_super(struct super_block *sb, void *data, int silent) |
123 | { | 122 | { |
124 | static struct tree_descr debug_files[] = {{""}}; | 123 | static struct tree_descr debug_files[] = {{""}}; |
125 | 124 | ||
126 | return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); | 125 | return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); |
127 | } | 126 | } |
128 | 127 | ||
129 | static int debug_get_sb(struct file_system_type *fs_type, | 128 | static int debug_get_sb(struct file_system_type *fs_type, |
130 | int flags, const char *dev_name, | 129 | int flags, const char *dev_name, |
131 | void *data, struct vfsmount *mnt) | 130 | void *data, struct vfsmount *mnt) |
132 | { | 131 | { |
133 | return get_sb_single(fs_type, flags, data, debug_fill_super, mnt); | 132 | return get_sb_single(fs_type, flags, data, debug_fill_super, mnt); |
134 | } | 133 | } |
135 | 134 | ||
136 | static struct file_system_type debug_fs_type = { | 135 | static struct file_system_type debug_fs_type = { |
137 | .owner = THIS_MODULE, | 136 | .owner = THIS_MODULE, |
138 | .name = "debugfs", | 137 | .name = "debugfs", |
139 | .get_sb = debug_get_sb, | 138 | .get_sb = debug_get_sb, |
140 | .kill_sb = kill_litter_super, | 139 | .kill_sb = kill_litter_super, |
141 | }; | 140 | }; |
142 | 141 | ||
143 | static int debugfs_create_by_name(const char *name, mode_t mode, | 142 | static int debugfs_create_by_name(const char *name, mode_t mode, |
144 | struct dentry *parent, | 143 | struct dentry *parent, |
145 | struct dentry **dentry) | 144 | struct dentry **dentry) |
146 | { | 145 | { |
147 | int error = 0; | 146 | int error = 0; |
148 | 147 | ||
149 | /* If the parent is not specified, we create it in the root. | 148 | /* If the parent is not specified, we create it in the root. |
150 | * We need the root dentry to do this, which is in the super | 149 | * We need the root dentry to do this, which is in the super |
151 | * block. A pointer to that is in the struct vfsmount that we | 150 | * block. A pointer to that is in the struct vfsmount that we |
152 | * have around. | 151 | * have around. |
153 | */ | 152 | */ |
154 | if (!parent) { | 153 | if (!parent) { |
155 | if (debugfs_mount && debugfs_mount->mnt_sb) { | 154 | if (debugfs_mount && debugfs_mount->mnt_sb) { |
156 | parent = debugfs_mount->mnt_sb->s_root; | 155 | parent = debugfs_mount->mnt_sb->s_root; |
157 | } | 156 | } |
158 | } | 157 | } |
159 | if (!parent) { | 158 | if (!parent) { |
160 | pr_debug("debugfs: Ah! can not find a parent!\n"); | 159 | pr_debug("debugfs: Ah! can not find a parent!\n"); |
161 | return -EFAULT; | 160 | return -EFAULT; |
162 | } | 161 | } |
163 | 162 | ||
164 | *dentry = NULL; | 163 | *dentry = NULL; |
165 | mutex_lock(&parent->d_inode->i_mutex); | 164 | mutex_lock(&parent->d_inode->i_mutex); |
166 | *dentry = lookup_one_len(name, parent, strlen(name)); | 165 | *dentry = lookup_one_len(name, parent, strlen(name)); |
167 | if (!IS_ERR(*dentry)) { | 166 | if (!IS_ERR(*dentry)) { |
168 | switch (mode & S_IFMT) { | 167 | switch (mode & S_IFMT) { |
169 | case S_IFDIR: | 168 | case S_IFDIR: |
170 | error = debugfs_mkdir(parent->d_inode, *dentry, mode); | 169 | error = debugfs_mkdir(parent->d_inode, *dentry, mode); |
171 | break; | 170 | break; |
172 | case S_IFLNK: | 171 | case S_IFLNK: |
173 | error = debugfs_link(parent->d_inode, *dentry, mode); | 172 | error = debugfs_link(parent->d_inode, *dentry, mode); |
174 | break; | 173 | break; |
175 | default: | 174 | default: |
176 | error = debugfs_create(parent->d_inode, *dentry, mode); | 175 | error = debugfs_create(parent->d_inode, *dentry, mode); |
177 | break; | 176 | break; |
178 | } | 177 | } |
179 | dput(*dentry); | 178 | dput(*dentry); |
180 | } else | 179 | } else |
181 | error = PTR_ERR(*dentry); | 180 | error = PTR_ERR(*dentry); |
182 | mutex_unlock(&parent->d_inode->i_mutex); | 181 | mutex_unlock(&parent->d_inode->i_mutex); |
183 | 182 | ||
184 | return error; | 183 | return error; |
185 | } | 184 | } |
186 | 185 | ||
187 | /** | 186 | /** |
188 | * debugfs_create_file - create a file in the debugfs filesystem | 187 | * debugfs_create_file - create a file in the debugfs filesystem |
189 | * @name: a pointer to a string containing the name of the file to create. | 188 | * @name: a pointer to a string containing the name of the file to create. |
190 | * @mode: the permission that the file should have | 189 | * @mode: the permission that the file should have |
191 | * @parent: a pointer to the parent dentry for this file. This should be a | 190 | * @parent: a pointer to the parent dentry for this file. This should be a |
192 | * directory dentry if set. If this paramater is NULL, then the | 191 | * directory dentry if set. If this paramater is NULL, then the |
193 | * file will be created in the root of the debugfs filesystem. | 192 | * file will be created in the root of the debugfs filesystem. |
194 | * @data: a pointer to something that the caller will want to get to later | 193 | * @data: a pointer to something that the caller will want to get to later |
195 | * on. The inode.i_private pointer will point to this value on | 194 | * on. The inode.i_private pointer will point to this value on |
196 | * the open() call. | 195 | * the open() call. |
197 | * @fops: a pointer to a struct file_operations that should be used for | 196 | * @fops: a pointer to a struct file_operations that should be used for |
198 | * this file. | 197 | * this file. |
199 | * | 198 | * |
200 | * This is the basic "create a file" function for debugfs. It allows for a | 199 | * This is the basic "create a file" function for debugfs. It allows for a |
201 | * wide range of flexibility in createing a file, or a directory (if you | 200 | * wide range of flexibility in createing a file, or a directory (if you |
202 | * want to create a directory, the debugfs_create_dir() function is | 201 | * want to create a directory, the debugfs_create_dir() function is |
203 | * recommended to be used instead.) | 202 | * recommended to be used instead.) |
204 | * | 203 | * |
205 | * This function will return a pointer to a dentry if it succeeds. This | 204 | * This function will return a pointer to a dentry if it succeeds. This |
206 | * pointer must be passed to the debugfs_remove() function when the file is | 205 | * pointer must be passed to the debugfs_remove() function when the file is |
207 | * to be removed (no automatic cleanup happens if your module is unloaded, | 206 | * to be removed (no automatic cleanup happens if your module is unloaded, |
208 | * you are responsible here.) If an error occurs, %NULL will be returned. | 207 | * you are responsible here.) If an error occurs, %NULL will be returned. |
209 | * | 208 | * |
210 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 209 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
211 | * returned. | 210 | * returned. |
212 | */ | 211 | */ |
213 | struct dentry *debugfs_create_file(const char *name, mode_t mode, | 212 | struct dentry *debugfs_create_file(const char *name, mode_t mode, |
214 | struct dentry *parent, void *data, | 213 | struct dentry *parent, void *data, |
215 | const struct file_operations *fops) | 214 | const struct file_operations *fops) |
216 | { | 215 | { |
217 | struct dentry *dentry = NULL; | 216 | struct dentry *dentry = NULL; |
218 | int error; | 217 | int error; |
219 | 218 | ||
220 | pr_debug("debugfs: creating file '%s'\n",name); | 219 | pr_debug("debugfs: creating file '%s'\n",name); |
221 | 220 | ||
222 | error = simple_pin_fs(&debug_fs_type, &debugfs_mount, | 221 | error = simple_pin_fs(&debug_fs_type, &debugfs_mount, |
223 | &debugfs_mount_count); | 222 | &debugfs_mount_count); |
224 | if (error) | 223 | if (error) |
225 | goto exit; | 224 | goto exit; |
226 | 225 | ||
227 | error = debugfs_create_by_name(name, mode, parent, &dentry); | 226 | error = debugfs_create_by_name(name, mode, parent, &dentry); |
228 | if (error) { | 227 | if (error) { |
229 | dentry = NULL; | 228 | dentry = NULL; |
230 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 229 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
231 | goto exit; | 230 | goto exit; |
232 | } | 231 | } |
233 | 232 | ||
234 | if (dentry->d_inode) { | 233 | if (dentry->d_inode) { |
235 | if (data) | 234 | if (data) |
236 | dentry->d_inode->i_private = data; | 235 | dentry->d_inode->i_private = data; |
237 | if (fops) | 236 | if (fops) |
238 | dentry->d_inode->i_fop = fops; | 237 | dentry->d_inode->i_fop = fops; |
239 | } | 238 | } |
240 | exit: | 239 | exit: |
241 | return dentry; | 240 | return dentry; |
242 | } | 241 | } |
243 | EXPORT_SYMBOL_GPL(debugfs_create_file); | 242 | EXPORT_SYMBOL_GPL(debugfs_create_file); |
244 | 243 | ||
245 | /** | 244 | /** |
246 | * debugfs_create_dir - create a directory in the debugfs filesystem | 245 | * debugfs_create_dir - create a directory in the debugfs filesystem |
247 | * @name: a pointer to a string containing the name of the directory to | 246 | * @name: a pointer to a string containing the name of the directory to |
248 | * create. | 247 | * create. |
249 | * @parent: a pointer to the parent dentry for this file. This should be a | 248 | * @parent: a pointer to the parent dentry for this file. This should be a |
250 | * directory dentry if set. If this paramater is NULL, then the | 249 | * directory dentry if set. If this paramater is NULL, then the |
251 | * directory will be created in the root of the debugfs filesystem. | 250 | * directory will be created in the root of the debugfs filesystem. |
252 | * | 251 | * |
253 | * This function creates a directory in debugfs with the given name. | 252 | * This function creates a directory in debugfs with the given name. |
254 | * | 253 | * |
255 | * This function will return a pointer to a dentry if it succeeds. This | 254 | * This function will return a pointer to a dentry if it succeeds. This |
256 | * pointer must be passed to the debugfs_remove() function when the file is | 255 | * pointer must be passed to the debugfs_remove() function when the file is |
257 | * to be removed (no automatic cleanup happens if your module is unloaded, | 256 | * to be removed (no automatic cleanup happens if your module is unloaded, |
258 | * you are responsible here.) If an error occurs, %NULL will be returned. | 257 | * you are responsible here.) If an error occurs, %NULL will be returned. |
259 | * | 258 | * |
260 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 259 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
261 | * returned. | 260 | * returned. |
262 | */ | 261 | */ |
263 | struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) | 262 | struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) |
264 | { | 263 | { |
265 | return debugfs_create_file(name, | 264 | return debugfs_create_file(name, |
266 | S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, | 265 | S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, |
267 | parent, NULL, NULL); | 266 | parent, NULL, NULL); |
268 | } | 267 | } |
269 | EXPORT_SYMBOL_GPL(debugfs_create_dir); | 268 | EXPORT_SYMBOL_GPL(debugfs_create_dir); |
270 | 269 | ||
271 | /** | 270 | /** |
272 | * debugfs_create_symlink- create a symbolic link in the debugfs filesystem | 271 | * debugfs_create_symlink- create a symbolic link in the debugfs filesystem |
273 | * @name: a pointer to a string containing the name of the symbolic link to | 272 | * @name: a pointer to a string containing the name of the symbolic link to |
274 | * create. | 273 | * create. |
275 | * @parent: a pointer to the parent dentry for this symbolic link. This | 274 | * @parent: a pointer to the parent dentry for this symbolic link. This |
276 | * should be a directory dentry if set. If this paramater is NULL, | 275 | * should be a directory dentry if set. If this paramater is NULL, |
277 | * then the symbolic link will be created in the root of the debugfs | 276 | * then the symbolic link will be created in the root of the debugfs |
278 | * filesystem. | 277 | * filesystem. |
279 | * @target: a pointer to a string containing the path to the target of the | 278 | * @target: a pointer to a string containing the path to the target of the |
280 | * symbolic link. | 279 | * symbolic link. |
281 | * | 280 | * |
282 | * This function creates a symbolic link with the given name in debugfs that | 281 | * This function creates a symbolic link with the given name in debugfs that |
283 | * links to the given target path. | 282 | * links to the given target path. |
284 | * | 283 | * |
285 | * This function will return a pointer to a dentry if it succeeds. This | 284 | * This function will return a pointer to a dentry if it succeeds. This |
286 | * pointer must be passed to the debugfs_remove() function when the symbolic | 285 | * pointer must be passed to the debugfs_remove() function when the symbolic |
287 | * link is to be removed (no automatic cleanup happens if your module is | 286 | * link is to be removed (no automatic cleanup happens if your module is |
288 | * unloaded, you are responsible here.) If an error occurs, %NULL will be | 287 | * unloaded, you are responsible here.) If an error occurs, %NULL will be |
289 | * returned. | 288 | * returned. |
290 | * | 289 | * |
291 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 290 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
292 | * returned. | 291 | * returned. |
293 | */ | 292 | */ |
294 | struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, | 293 | struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, |
295 | const char *target) | 294 | const char *target) |
296 | { | 295 | { |
297 | struct dentry *result; | 296 | struct dentry *result; |
298 | char *link; | 297 | char *link; |
299 | 298 | ||
300 | link = kstrdup(target, GFP_KERNEL); | 299 | link = kstrdup(target, GFP_KERNEL); |
301 | if (!link) | 300 | if (!link) |
302 | return NULL; | 301 | return NULL; |
303 | 302 | ||
304 | result = debugfs_create_file(name, S_IFLNK | S_IRWXUGO, parent, link, | 303 | result = debugfs_create_file(name, S_IFLNK | S_IRWXUGO, parent, link, |
305 | NULL); | 304 | NULL); |
306 | if (!result) | 305 | if (!result) |
307 | kfree(link); | 306 | kfree(link); |
308 | return result; | 307 | return result; |
309 | } | 308 | } |
310 | EXPORT_SYMBOL_GPL(debugfs_create_symlink); | 309 | EXPORT_SYMBOL_GPL(debugfs_create_symlink); |
311 | 310 | ||
312 | static void __debugfs_remove(struct dentry *dentry, struct dentry *parent) | 311 | static void __debugfs_remove(struct dentry *dentry, struct dentry *parent) |
313 | { | 312 | { |
314 | int ret = 0; | 313 | int ret = 0; |
315 | 314 | ||
316 | if (debugfs_positive(dentry)) { | 315 | if (debugfs_positive(dentry)) { |
317 | if (dentry->d_inode) { | 316 | if (dentry->d_inode) { |
318 | dget(dentry); | 317 | dget(dentry); |
319 | switch (dentry->d_inode->i_mode & S_IFMT) { | 318 | switch (dentry->d_inode->i_mode & S_IFMT) { |
320 | case S_IFDIR: | 319 | case S_IFDIR: |
321 | ret = simple_rmdir(parent->d_inode, dentry); | 320 | ret = simple_rmdir(parent->d_inode, dentry); |
322 | break; | 321 | break; |
323 | case S_IFLNK: | 322 | case S_IFLNK: |
324 | kfree(dentry->d_inode->i_private); | 323 | kfree(dentry->d_inode->i_private); |
325 | /* fall through */ | 324 | /* fall through */ |
326 | default: | 325 | default: |
327 | simple_unlink(parent->d_inode, dentry); | 326 | simple_unlink(parent->d_inode, dentry); |
328 | break; | 327 | break; |
329 | } | 328 | } |
330 | if (!ret) | 329 | if (!ret) |
331 | d_delete(dentry); | 330 | d_delete(dentry); |
332 | dput(dentry); | 331 | dput(dentry); |
333 | } | 332 | } |
334 | } | 333 | } |
335 | } | 334 | } |
336 | 335 | ||
337 | /** | 336 | /** |
338 | * debugfs_remove - removes a file or directory from the debugfs filesystem | 337 | * debugfs_remove - removes a file or directory from the debugfs filesystem |
339 | * @dentry: a pointer to a the dentry of the file or directory to be | 338 | * @dentry: a pointer to a the dentry of the file or directory to be |
340 | * removed. | 339 | * removed. |
341 | * | 340 | * |
342 | * This function removes a file or directory in debugfs that was previously | 341 | * This function removes a file or directory in debugfs that was previously |
343 | * created with a call to another debugfs function (like | 342 | * created with a call to another debugfs function (like |
344 | * debugfs_create_file() or variants thereof.) | 343 | * debugfs_create_file() or variants thereof.) |
345 | * | 344 | * |
346 | * This function is required to be called in order for the file to be | 345 | * This function is required to be called in order for the file to be |
347 | * removed, no automatic cleanup of files will happen when a module is | 346 | * removed, no automatic cleanup of files will happen when a module is |
348 | * removed, you are responsible here. | 347 | * removed, you are responsible here. |
349 | */ | 348 | */ |
350 | void debugfs_remove(struct dentry *dentry) | 349 | void debugfs_remove(struct dentry *dentry) |
351 | { | 350 | { |
352 | struct dentry *parent; | 351 | struct dentry *parent; |
353 | 352 | ||
354 | if (!dentry) | 353 | if (!dentry) |
355 | return; | 354 | return; |
356 | 355 | ||
357 | parent = dentry->d_parent; | 356 | parent = dentry->d_parent; |
358 | if (!parent || !parent->d_inode) | 357 | if (!parent || !parent->d_inode) |
359 | return; | 358 | return; |
360 | 359 | ||
361 | mutex_lock(&parent->d_inode->i_mutex); | 360 | mutex_lock(&parent->d_inode->i_mutex); |
362 | __debugfs_remove(dentry, parent); | 361 | __debugfs_remove(dentry, parent); |
363 | mutex_unlock(&parent->d_inode->i_mutex); | 362 | mutex_unlock(&parent->d_inode->i_mutex); |
364 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 363 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
365 | } | 364 | } |
366 | EXPORT_SYMBOL_GPL(debugfs_remove); | 365 | EXPORT_SYMBOL_GPL(debugfs_remove); |
367 | 366 | ||
368 | /** | 367 | /** |
369 | * debugfs_remove_recursive - recursively removes a directory | 368 | * debugfs_remove_recursive - recursively removes a directory |
370 | * @dentry: a pointer to a the dentry of the directory to be removed. | 369 | * @dentry: a pointer to a the dentry of the directory to be removed. |
371 | * | 370 | * |
372 | * This function recursively removes a directory tree in debugfs that | 371 | * This function recursively removes a directory tree in debugfs that |
373 | * was previously created with a call to another debugfs function | 372 | * was previously created with a call to another debugfs function |
374 | * (like debugfs_create_file() or variants thereof.) | 373 | * (like debugfs_create_file() or variants thereof.) |
375 | * | 374 | * |
376 | * This function is required to be called in order for the file to be | 375 | * This function is required to be called in order for the file to be |
377 | * removed, no automatic cleanup of files will happen when a module is | 376 | * removed, no automatic cleanup of files will happen when a module is |
378 | * removed, you are responsible here. | 377 | * removed, you are responsible here. |
379 | */ | 378 | */ |
380 | void debugfs_remove_recursive(struct dentry *dentry) | 379 | void debugfs_remove_recursive(struct dentry *dentry) |
381 | { | 380 | { |
382 | struct dentry *child; | 381 | struct dentry *child; |
383 | struct dentry *parent; | 382 | struct dentry *parent; |
384 | 383 | ||
385 | if (!dentry) | 384 | if (!dentry) |
386 | return; | 385 | return; |
387 | 386 | ||
388 | parent = dentry->d_parent; | 387 | parent = dentry->d_parent; |
389 | if (!parent || !parent->d_inode) | 388 | if (!parent || !parent->d_inode) |
390 | return; | 389 | return; |
391 | 390 | ||
392 | parent = dentry; | 391 | parent = dentry; |
393 | mutex_lock(&parent->d_inode->i_mutex); | 392 | mutex_lock(&parent->d_inode->i_mutex); |
394 | 393 | ||
395 | while (1) { | 394 | while (1) { |
396 | /* | 395 | /* |
397 | * When all dentries under "parent" has been removed, | 396 | * When all dentries under "parent" has been removed, |
398 | * walk up the tree until we reach our starting point. | 397 | * walk up the tree until we reach our starting point. |
399 | */ | 398 | */ |
400 | if (list_empty(&parent->d_subdirs)) { | 399 | if (list_empty(&parent->d_subdirs)) { |
401 | mutex_unlock(&parent->d_inode->i_mutex); | 400 | mutex_unlock(&parent->d_inode->i_mutex); |
402 | if (parent == dentry) | 401 | if (parent == dentry) |
403 | break; | 402 | break; |
404 | parent = parent->d_parent; | 403 | parent = parent->d_parent; |
405 | mutex_lock(&parent->d_inode->i_mutex); | 404 | mutex_lock(&parent->d_inode->i_mutex); |
406 | } | 405 | } |
407 | child = list_entry(parent->d_subdirs.next, struct dentry, | 406 | child = list_entry(parent->d_subdirs.next, struct dentry, |
408 | d_u.d_child); | 407 | d_u.d_child); |
409 | 408 | ||
410 | /* | 409 | /* |
411 | * If "child" isn't empty, walk down the tree and | 410 | * If "child" isn't empty, walk down the tree and |
412 | * remove all its descendants first. | 411 | * remove all its descendants first. |
413 | */ | 412 | */ |
414 | if (!list_empty(&child->d_subdirs)) { | 413 | if (!list_empty(&child->d_subdirs)) { |
415 | mutex_unlock(&parent->d_inode->i_mutex); | 414 | mutex_unlock(&parent->d_inode->i_mutex); |
416 | parent = child; | 415 | parent = child; |
417 | mutex_lock(&parent->d_inode->i_mutex); | 416 | mutex_lock(&parent->d_inode->i_mutex); |
418 | continue; | 417 | continue; |
419 | } | 418 | } |
420 | __debugfs_remove(child, parent); | 419 | __debugfs_remove(child, parent); |
421 | if (parent->d_subdirs.next == &child->d_u.d_child) { | 420 | if (parent->d_subdirs.next == &child->d_u.d_child) { |
422 | /* | 421 | /* |
423 | * Avoid infinite loop if we fail to remove | 422 | * Avoid infinite loop if we fail to remove |
424 | * one dentry. | 423 | * one dentry. |
425 | */ | 424 | */ |
426 | mutex_unlock(&parent->d_inode->i_mutex); | 425 | mutex_unlock(&parent->d_inode->i_mutex); |
427 | break; | 426 | break; |
428 | } | 427 | } |
429 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 428 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
430 | } | 429 | } |
431 | 430 | ||
432 | parent = dentry->d_parent; | 431 | parent = dentry->d_parent; |
433 | mutex_lock(&parent->d_inode->i_mutex); | 432 | mutex_lock(&parent->d_inode->i_mutex); |
434 | __debugfs_remove(dentry, parent); | 433 | __debugfs_remove(dentry, parent); |
435 | mutex_unlock(&parent->d_inode->i_mutex); | 434 | mutex_unlock(&parent->d_inode->i_mutex); |
436 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 435 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
437 | } | 436 | } |
438 | EXPORT_SYMBOL_GPL(debugfs_remove_recursive); | 437 | EXPORT_SYMBOL_GPL(debugfs_remove_recursive); |
439 | 438 | ||
440 | /** | 439 | /** |
441 | * debugfs_rename - rename a file/directory in the debugfs filesystem | 440 | * debugfs_rename - rename a file/directory in the debugfs filesystem |
442 | * @old_dir: a pointer to the parent dentry for the renamed object. This | 441 | * @old_dir: a pointer to the parent dentry for the renamed object. This |
443 | * should be a directory dentry. | 442 | * should be a directory dentry. |
444 | * @old_dentry: dentry of an object to be renamed. | 443 | * @old_dentry: dentry of an object to be renamed. |
445 | * @new_dir: a pointer to the parent dentry where the object should be | 444 | * @new_dir: a pointer to the parent dentry where the object should be |
446 | * moved. This should be a directory dentry. | 445 | * moved. This should be a directory dentry. |
447 | * @new_name: a pointer to a string containing the target name. | 446 | * @new_name: a pointer to a string containing the target name. |
448 | * | 447 | * |
449 | * This function renames a file/directory in debugfs. The target must not | 448 | * This function renames a file/directory in debugfs. The target must not |
450 | * exist for rename to succeed. | 449 | * exist for rename to succeed. |
451 | * | 450 | * |
452 | * This function will return a pointer to old_dentry (which is updated to | 451 | * This function will return a pointer to old_dentry (which is updated to |
453 | * reflect renaming) if it succeeds. If an error occurs, %NULL will be | 452 | * reflect renaming) if it succeeds. If an error occurs, %NULL will be |
454 | * returned. | 453 | * returned. |
455 | * | 454 | * |
456 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 455 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
457 | * returned. | 456 | * returned. |
458 | */ | 457 | */ |
459 | struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, | 458 | struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, |
460 | struct dentry *new_dir, const char *new_name) | 459 | struct dentry *new_dir, const char *new_name) |
461 | { | 460 | { |
462 | int error; | 461 | int error; |
463 | struct dentry *dentry = NULL, *trap; | 462 | struct dentry *dentry = NULL, *trap; |
464 | const char *old_name; | 463 | const char *old_name; |
465 | 464 | ||
466 | trap = lock_rename(new_dir, old_dir); | 465 | trap = lock_rename(new_dir, old_dir); |
467 | /* Source or destination directories don't exist? */ | 466 | /* Source or destination directories don't exist? */ |
468 | if (!old_dir->d_inode || !new_dir->d_inode) | 467 | if (!old_dir->d_inode || !new_dir->d_inode) |
469 | goto exit; | 468 | goto exit; |
470 | /* Source does not exist, cyclic rename, or mountpoint? */ | 469 | /* Source does not exist, cyclic rename, or mountpoint? */ |
471 | if (!old_dentry->d_inode || old_dentry == trap || | 470 | if (!old_dentry->d_inode || old_dentry == trap || |
472 | d_mountpoint(old_dentry)) | 471 | d_mountpoint(old_dentry)) |
473 | goto exit; | 472 | goto exit; |
474 | dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); | 473 | dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); |
475 | /* Lookup failed, cyclic rename or target exists? */ | 474 | /* Lookup failed, cyclic rename or target exists? */ |
476 | if (IS_ERR(dentry) || dentry == trap || dentry->d_inode) | 475 | if (IS_ERR(dentry) || dentry == trap || dentry->d_inode) |
477 | goto exit; | 476 | goto exit; |
478 | 477 | ||
479 | old_name = fsnotify_oldname_init(old_dentry->d_name.name); | 478 | old_name = fsnotify_oldname_init(old_dentry->d_name.name); |
480 | 479 | ||
481 | error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, | 480 | error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, |
482 | dentry); | 481 | dentry); |
483 | if (error) { | 482 | if (error) { |
484 | fsnotify_oldname_free(old_name); | 483 | fsnotify_oldname_free(old_name); |
485 | goto exit; | 484 | goto exit; |
486 | } | 485 | } |
487 | d_move(old_dentry, dentry); | 486 | d_move(old_dentry, dentry); |
488 | fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, | 487 | fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, |
489 | old_dentry->d_name.name, S_ISDIR(old_dentry->d_inode->i_mode), | 488 | old_dentry->d_name.name, S_ISDIR(old_dentry->d_inode->i_mode), |
490 | NULL, old_dentry); | 489 | NULL, old_dentry); |
491 | fsnotify_oldname_free(old_name); | 490 | fsnotify_oldname_free(old_name); |
492 | unlock_rename(new_dir, old_dir); | 491 | unlock_rename(new_dir, old_dir); |
493 | dput(dentry); | 492 | dput(dentry); |
494 | return old_dentry; | 493 | return old_dentry; |
495 | exit: | 494 | exit: |
496 | if (dentry && !IS_ERR(dentry)) | 495 | if (dentry && !IS_ERR(dentry)) |
497 | dput(dentry); | 496 | dput(dentry); |
498 | unlock_rename(new_dir, old_dir); | 497 | unlock_rename(new_dir, old_dir); |
499 | return NULL; | 498 | return NULL; |
500 | } | 499 | } |
501 | EXPORT_SYMBOL_GPL(debugfs_rename); | 500 | EXPORT_SYMBOL_GPL(debugfs_rename); |
502 | 501 | ||
503 | static struct kobject *debug_kobj; | 502 | static struct kobject *debug_kobj; |
504 | 503 | ||
505 | static int __init debugfs_init(void) | 504 | static int __init debugfs_init(void) |
506 | { | 505 | { |
507 | int retval; | 506 | int retval; |
508 | 507 | ||
509 | debug_kobj = kobject_create_and_add("debug", kernel_kobj); | 508 | debug_kobj = kobject_create_and_add("debug", kernel_kobj); |
510 | if (!debug_kobj) | 509 | if (!debug_kobj) |
511 | return -EINVAL; | 510 | return -EINVAL; |
512 | 511 | ||
513 | retval = register_filesystem(&debug_fs_type); | 512 | retval = register_filesystem(&debug_fs_type); |
514 | if (retval) | 513 | if (retval) |
515 | kobject_put(debug_kobj); | 514 | kobject_put(debug_kobj); |
516 | return retval; | 515 | return retval; |
517 | } | 516 | } |
518 | 517 | ||
519 | static void __exit debugfs_exit(void) | 518 | static void __exit debugfs_exit(void) |
520 | { | 519 | { |
521 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 520 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
522 | unregister_filesystem(&debug_fs_type); | 521 | unregister_filesystem(&debug_fs_type); |
523 | kobject_put(debug_kobj); | 522 | kobject_put(debug_kobj); |
524 | } | 523 | } |
525 | 524 | ||
526 | core_initcall(debugfs_init); | 525 | core_initcall(debugfs_init); |
527 | module_exit(debugfs_exit); | 526 | module_exit(debugfs_exit); |
528 | MODULE_LICENSE("GPL"); | 527 | MODULE_LICENSE("GPL"); |
529 | 528 | ||
530 | 529 |
include/linux/magic.h
1 | #ifndef __LINUX_MAGIC_H__ | 1 | #ifndef __LINUX_MAGIC_H__ |
2 | #define __LINUX_MAGIC_H__ | 2 | #define __LINUX_MAGIC_H__ |
3 | 3 | ||
4 | #define ADFS_SUPER_MAGIC 0xadf5 | 4 | #define ADFS_SUPER_MAGIC 0xadf5 |
5 | #define AFFS_SUPER_MAGIC 0xadff | 5 | #define AFFS_SUPER_MAGIC 0xadff |
6 | #define AFS_SUPER_MAGIC 0x5346414F | 6 | #define AFS_SUPER_MAGIC 0x5346414F |
7 | #define AUTOFS_SUPER_MAGIC 0x0187 | 7 | #define AUTOFS_SUPER_MAGIC 0x0187 |
8 | #define CODA_SUPER_MAGIC 0x73757245 | 8 | #define CODA_SUPER_MAGIC 0x73757245 |
9 | #define DEBUGFS_MAGIC 0x64626720 | ||
10 | #define SYSFS_MAGIC 0x62656572 | ||
11 | #define SECURITYFS_MAGIC 0x73636673 | ||
12 | #define TMPFS_MAGIC 0x01021994 | ||
9 | #define EFS_SUPER_MAGIC 0x414A53 | 13 | #define EFS_SUPER_MAGIC 0x414A53 |
10 | #define EXT2_SUPER_MAGIC 0xEF53 | 14 | #define EXT2_SUPER_MAGIC 0xEF53 |
11 | #define EXT3_SUPER_MAGIC 0xEF53 | 15 | #define EXT3_SUPER_MAGIC 0xEF53 |
12 | #define EXT4_SUPER_MAGIC 0xEF53 | 16 | #define EXT4_SUPER_MAGIC 0xEF53 |
13 | #define HPFS_SUPER_MAGIC 0xf995e849 | 17 | #define HPFS_SUPER_MAGIC 0xf995e849 |
14 | #define ISOFS_SUPER_MAGIC 0x9660 | 18 | #define ISOFS_SUPER_MAGIC 0x9660 |
15 | #define JFFS2_SUPER_MAGIC 0x72b6 | 19 | #define JFFS2_SUPER_MAGIC 0x72b6 |
16 | #define ANON_INODE_FS_MAGIC 0x09041934 | 20 | #define ANON_INODE_FS_MAGIC 0x09041934 |
17 | 21 | ||
18 | #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ | 22 | #define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ |
19 | #define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */ | 23 | #define MINIX_SUPER_MAGIC2 0x138F /* minix fs, 30 char names */ |
20 | #define MINIX2_SUPER_MAGIC 0x2468 /* minix V2 fs */ | 24 | #define MINIX2_SUPER_MAGIC 0x2468 /* minix V2 fs */ |
21 | #define MINIX2_SUPER_MAGIC2 0x2478 /* minix V2 fs, 30 char names */ | 25 | #define MINIX2_SUPER_MAGIC2 0x2478 /* minix V2 fs, 30 char names */ |
22 | #define MINIX3_SUPER_MAGIC 0x4d5a /* minix V3 fs */ | 26 | #define MINIX3_SUPER_MAGIC 0x4d5a /* minix V3 fs */ |
23 | 27 | ||
24 | #define MSDOS_SUPER_MAGIC 0x4d44 /* MD */ | 28 | #define MSDOS_SUPER_MAGIC 0x4d44 /* MD */ |
25 | #define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */ | 29 | #define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */ |
26 | #define NFS_SUPER_MAGIC 0x6969 | 30 | #define NFS_SUPER_MAGIC 0x6969 |
27 | #define OPENPROM_SUPER_MAGIC 0x9fa1 | 31 | #define OPENPROM_SUPER_MAGIC 0x9fa1 |
28 | #define PROC_SUPER_MAGIC 0x9fa0 | 32 | #define PROC_SUPER_MAGIC 0x9fa0 |
29 | #define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */ | 33 | #define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */ |
30 | 34 | ||
31 | #define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */ | 35 | #define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */ |
32 | /* used by file system utilities that | 36 | /* used by file system utilities that |
33 | look at the superblock, etc. */ | 37 | look at the superblock, etc. */ |
34 | #define REISERFS_SUPER_MAGIC_STRING "ReIsErFs" | 38 | #define REISERFS_SUPER_MAGIC_STRING "ReIsErFs" |
35 | #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" | 39 | #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" |
36 | #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs" | 40 | #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs" |
37 | 41 | ||
38 | #define SMB_SUPER_MAGIC 0x517B | 42 | #define SMB_SUPER_MAGIC 0x517B |
39 | #define USBDEVICE_SUPER_MAGIC 0x9fa2 | 43 | #define USBDEVICE_SUPER_MAGIC 0x9fa2 |
40 | #define CGROUP_SUPER_MAGIC 0x27e0eb | 44 | #define CGROUP_SUPER_MAGIC 0x27e0eb |
41 | 45 | ||
42 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA | 46 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA |
43 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA | 47 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA |
44 | 48 | ||
45 | #endif /* __LINUX_MAGIC_H__ */ | 49 | #endif /* __LINUX_MAGIC_H__ */ |
46 | 50 |
mm/shmem.c
1 | /* | 1 | /* |
2 | * Resizable virtual memory filesystem for Linux. | 2 | * Resizable virtual memory filesystem for Linux. |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Linus Torvalds. | 4 | * Copyright (C) 2000 Linus Torvalds. |
5 | * 2000 Transmeta Corp. | 5 | * 2000 Transmeta Corp. |
6 | * 2000-2001 Christoph Rohland | 6 | * 2000-2001 Christoph Rohland |
7 | * 2000-2001 SAP AG | 7 | * 2000-2001 SAP AG |
8 | * 2002 Red Hat Inc. | 8 | * 2002 Red Hat Inc. |
9 | * Copyright (C) 2002-2005 Hugh Dickins. | 9 | * Copyright (C) 2002-2005 Hugh Dickins. |
10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. | 10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. |
11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs | 11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
12 | * | 12 | * |
13 | * Extended attribute support for tmpfs: | 13 | * Extended attribute support for tmpfs: |
14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> | 14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> |
15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | 15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> |
16 | * | 16 | * |
17 | * This file is released under the GPL. | 17 | * This file is released under the GPL. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * This virtual memory filesystem is heavily based on the ramfs. It | 21 | * This virtual memory filesystem is heavily based on the ramfs. It |
22 | * extends ramfs by the ability to use swap and honor resource limits | 22 | * extends ramfs by the ability to use swap and honor resource limits |
23 | * which makes it a completely usable filesystem. | 23 | * which makes it a completely usable filesystem. |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | #include <linux/xattr.h> | 29 | #include <linux/xattr.h> |
30 | #include <linux/exportfs.h> | 30 | #include <linux/exportfs.h> |
31 | #include <linux/generic_acl.h> | 31 | #include <linux/generic_acl.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/mman.h> | 33 | #include <linux/mman.h> |
34 | #include <linux/file.h> | 34 | #include <linux/file.h> |
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pagemap.h> | 36 | #include <linux/pagemap.h> |
37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/backing-dev.h> | 39 | #include <linux/backing-dev.h> |
40 | #include <linux/shmem_fs.h> | 40 | #include <linux/shmem_fs.h> |
41 | #include <linux/mount.h> | 41 | #include <linux/mount.h> |
42 | #include <linux/writeback.h> | 42 | #include <linux/writeback.h> |
43 | #include <linux/vfs.h> | 43 | #include <linux/vfs.h> |
44 | #include <linux/blkdev.h> | 44 | #include <linux/blkdev.h> |
45 | #include <linux/security.h> | 45 | #include <linux/security.h> |
46 | #include <linux/swapops.h> | 46 | #include <linux/swapops.h> |
47 | #include <linux/mempolicy.h> | 47 | #include <linux/mempolicy.h> |
48 | #include <linux/namei.h> | 48 | #include <linux/namei.h> |
49 | #include <linux/ctype.h> | 49 | #include <linux/ctype.h> |
50 | #include <linux/migrate.h> | 50 | #include <linux/migrate.h> |
51 | #include <linux/highmem.h> | 51 | #include <linux/highmem.h> |
52 | #include <linux/seq_file.h> | 52 | #include <linux/seq_file.h> |
53 | #include <linux/magic.h> | ||
53 | 54 | ||
54 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
55 | #include <asm/div64.h> | 56 | #include <asm/div64.h> |
56 | #include <asm/pgtable.h> | 57 | #include <asm/pgtable.h> |
57 | |||
58 | /* This magic number is used in glibc for posix shared memory */ | ||
59 | #define TMPFS_MAGIC 0x01021994 | ||
60 | 58 | ||
61 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | 59 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) |
62 | #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | 60 | #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) |
63 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | 61 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) |
64 | 62 | ||
65 | #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | 63 | #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) |
66 | #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) | 64 | #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) |
67 | 65 | ||
68 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | 66 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) |
69 | 67 | ||
70 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | 68 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ |
71 | #define SHMEM_PAGEIN VM_READ | 69 | #define SHMEM_PAGEIN VM_READ |
72 | #define SHMEM_TRUNCATE VM_WRITE | 70 | #define SHMEM_TRUNCATE VM_WRITE |
73 | 71 | ||
74 | /* Definition to limit shmem_truncate's steps between cond_rescheds */ | 72 | /* Definition to limit shmem_truncate's steps between cond_rescheds */ |
75 | #define LATENCY_LIMIT 64 | 73 | #define LATENCY_LIMIT 64 |
76 | 74 | ||
77 | /* Pretend that each entry is of this size in directory's i_size */ | 75 | /* Pretend that each entry is of this size in directory's i_size */ |
78 | #define BOGO_DIRENT_SIZE 20 | 76 | #define BOGO_DIRENT_SIZE 20 |
79 | 77 | ||
80 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ | 78 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ |
81 | enum sgp_type { | 79 | enum sgp_type { |
82 | SGP_READ, /* don't exceed i_size, don't allocate page */ | 80 | SGP_READ, /* don't exceed i_size, don't allocate page */ |
83 | SGP_CACHE, /* don't exceed i_size, may allocate page */ | 81 | SGP_CACHE, /* don't exceed i_size, may allocate page */ |
84 | SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ | 82 | SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ |
85 | SGP_WRITE, /* may exceed i_size, may allocate page */ | 83 | SGP_WRITE, /* may exceed i_size, may allocate page */ |
86 | }; | 84 | }; |
87 | 85 | ||
88 | #ifdef CONFIG_TMPFS | 86 | #ifdef CONFIG_TMPFS |
89 | static unsigned long shmem_default_max_blocks(void) | 87 | static unsigned long shmem_default_max_blocks(void) |
90 | { | 88 | { |
91 | return totalram_pages / 2; | 89 | return totalram_pages / 2; |
92 | } | 90 | } |
93 | 91 | ||
94 | static unsigned long shmem_default_max_inodes(void) | 92 | static unsigned long shmem_default_max_inodes(void) |
95 | { | 93 | { |
96 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); | 94 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); |
97 | } | 95 | } |
98 | #endif | 96 | #endif |
99 | 97 | ||
100 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 98 | static int shmem_getpage(struct inode *inode, unsigned long idx, |
101 | struct page **pagep, enum sgp_type sgp, int *type); | 99 | struct page **pagep, enum sgp_type sgp, int *type); |
102 | 100 | ||
103 | static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) | 101 | static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) |
104 | { | 102 | { |
105 | /* | 103 | /* |
106 | * The above definition of ENTRIES_PER_PAGE, and the use of | 104 | * The above definition of ENTRIES_PER_PAGE, and the use of |
107 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: | 105 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: |
108 | * might be reconsidered if it ever diverges from PAGE_SIZE. | 106 | * might be reconsidered if it ever diverges from PAGE_SIZE. |
109 | * | 107 | * |
110 | * Mobility flags are masked out as swap vectors cannot move | 108 | * Mobility flags are masked out as swap vectors cannot move |
111 | */ | 109 | */ |
112 | return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, | 110 | return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, |
113 | PAGE_CACHE_SHIFT-PAGE_SHIFT); | 111 | PAGE_CACHE_SHIFT-PAGE_SHIFT); |
114 | } | 112 | } |
115 | 113 | ||
116 | static inline void shmem_dir_free(struct page *page) | 114 | static inline void shmem_dir_free(struct page *page) |
117 | { | 115 | { |
118 | __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); | 116 | __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); |
119 | } | 117 | } |
120 | 118 | ||
121 | static struct page **shmem_dir_map(struct page *page) | 119 | static struct page **shmem_dir_map(struct page *page) |
122 | { | 120 | { |
123 | return (struct page **)kmap_atomic(page, KM_USER0); | 121 | return (struct page **)kmap_atomic(page, KM_USER0); |
124 | } | 122 | } |
125 | 123 | ||
126 | static inline void shmem_dir_unmap(struct page **dir) | 124 | static inline void shmem_dir_unmap(struct page **dir) |
127 | { | 125 | { |
128 | kunmap_atomic(dir, KM_USER0); | 126 | kunmap_atomic(dir, KM_USER0); |
129 | } | 127 | } |
130 | 128 | ||
131 | static swp_entry_t *shmem_swp_map(struct page *page) | 129 | static swp_entry_t *shmem_swp_map(struct page *page) |
132 | { | 130 | { |
133 | return (swp_entry_t *)kmap_atomic(page, KM_USER1); | 131 | return (swp_entry_t *)kmap_atomic(page, KM_USER1); |
134 | } | 132 | } |
135 | 133 | ||
136 | static inline void shmem_swp_balance_unmap(void) | 134 | static inline void shmem_swp_balance_unmap(void) |
137 | { | 135 | { |
138 | /* | 136 | /* |
139 | * When passing a pointer to an i_direct entry, to code which | 137 | * When passing a pointer to an i_direct entry, to code which |
140 | * also handles indirect entries and so will shmem_swp_unmap, | 138 | * also handles indirect entries and so will shmem_swp_unmap, |
141 | * we must arrange for the preempt count to remain in balance. | 139 | * we must arrange for the preempt count to remain in balance. |
142 | * What kmap_atomic of a lowmem page does depends on config | 140 | * What kmap_atomic of a lowmem page does depends on config |
143 | * and architecture, so pretend to kmap_atomic some lowmem page. | 141 | * and architecture, so pretend to kmap_atomic some lowmem page. |
144 | */ | 142 | */ |
145 | (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); | 143 | (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); |
146 | } | 144 | } |
147 | 145 | ||
148 | static inline void shmem_swp_unmap(swp_entry_t *entry) | 146 | static inline void shmem_swp_unmap(swp_entry_t *entry) |
149 | { | 147 | { |
150 | kunmap_atomic(entry, KM_USER1); | 148 | kunmap_atomic(entry, KM_USER1); |
151 | } | 149 | } |
152 | 150 | ||
153 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) | 151 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) |
154 | { | 152 | { |
155 | return sb->s_fs_info; | 153 | return sb->s_fs_info; |
156 | } | 154 | } |
157 | 155 | ||
158 | /* | 156 | /* |
159 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, | 157 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, |
160 | * for shared memory and for shared anonymous (/dev/zero) mappings | 158 | * for shared memory and for shared anonymous (/dev/zero) mappings |
161 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), | 159 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), |
162 | * consistent with the pre-accounting of private mappings ... | 160 | * consistent with the pre-accounting of private mappings ... |
163 | */ | 161 | */ |
164 | static inline int shmem_acct_size(unsigned long flags, loff_t size) | 162 | static inline int shmem_acct_size(unsigned long flags, loff_t size) |
165 | { | 163 | { |
166 | return (flags & VM_ACCOUNT)? | 164 | return (flags & VM_ACCOUNT)? |
167 | security_vm_enough_memory(VM_ACCT(size)): 0; | 165 | security_vm_enough_memory(VM_ACCT(size)): 0; |
168 | } | 166 | } |
169 | 167 | ||
170 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) | 168 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) |
171 | { | 169 | { |
172 | if (flags & VM_ACCOUNT) | 170 | if (flags & VM_ACCOUNT) |
173 | vm_unacct_memory(VM_ACCT(size)); | 171 | vm_unacct_memory(VM_ACCT(size)); |
174 | } | 172 | } |
175 | 173 | ||
176 | /* | 174 | /* |
177 | * ... whereas tmpfs objects are accounted incrementally as | 175 | * ... whereas tmpfs objects are accounted incrementally as |
178 | * pages are allocated, in order to allow huge sparse files. | 176 | * pages are allocated, in order to allow huge sparse files. |
179 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, | 177 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, |
180 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. | 178 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. |
181 | */ | 179 | */ |
182 | static inline int shmem_acct_block(unsigned long flags) | 180 | static inline int shmem_acct_block(unsigned long flags) |
183 | { | 181 | { |
184 | return (flags & VM_ACCOUNT)? | 182 | return (flags & VM_ACCOUNT)? |
185 | 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); | 183 | 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); |
186 | } | 184 | } |
187 | 185 | ||
188 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | 186 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
189 | { | 187 | { |
190 | if (!(flags & VM_ACCOUNT)) | 188 | if (!(flags & VM_ACCOUNT)) |
191 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 189 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); |
192 | } | 190 | } |
193 | 191 | ||
194 | static const struct super_operations shmem_ops; | 192 | static const struct super_operations shmem_ops; |
195 | static const struct address_space_operations shmem_aops; | 193 | static const struct address_space_operations shmem_aops; |
196 | static const struct file_operations shmem_file_operations; | 194 | static const struct file_operations shmem_file_operations; |
197 | static const struct inode_operations shmem_inode_operations; | 195 | static const struct inode_operations shmem_inode_operations; |
198 | static const struct inode_operations shmem_dir_inode_operations; | 196 | static const struct inode_operations shmem_dir_inode_operations; |
199 | static const struct inode_operations shmem_special_inode_operations; | 197 | static const struct inode_operations shmem_special_inode_operations; |
200 | static struct vm_operations_struct shmem_vm_ops; | 198 | static struct vm_operations_struct shmem_vm_ops; |
201 | 199 | ||
202 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { | 200 | static struct backing_dev_info shmem_backing_dev_info __read_mostly = { |
203 | .ra_pages = 0, /* No readahead */ | 201 | .ra_pages = 0, /* No readahead */ |
204 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | 202 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, |
205 | .unplug_io_fn = default_unplug_io_fn, | 203 | .unplug_io_fn = default_unplug_io_fn, |
206 | }; | 204 | }; |
207 | 205 | ||
208 | static LIST_HEAD(shmem_swaplist); | 206 | static LIST_HEAD(shmem_swaplist); |
209 | static DEFINE_MUTEX(shmem_swaplist_mutex); | 207 | static DEFINE_MUTEX(shmem_swaplist_mutex); |
210 | 208 | ||
211 | static void shmem_free_blocks(struct inode *inode, long pages) | 209 | static void shmem_free_blocks(struct inode *inode, long pages) |
212 | { | 210 | { |
213 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 211 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
214 | if (sbinfo->max_blocks) { | 212 | if (sbinfo->max_blocks) { |
215 | spin_lock(&sbinfo->stat_lock); | 213 | spin_lock(&sbinfo->stat_lock); |
216 | sbinfo->free_blocks += pages; | 214 | sbinfo->free_blocks += pages; |
217 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; | 215 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; |
218 | spin_unlock(&sbinfo->stat_lock); | 216 | spin_unlock(&sbinfo->stat_lock); |
219 | } | 217 | } |
220 | } | 218 | } |
221 | 219 | ||
222 | static int shmem_reserve_inode(struct super_block *sb) | 220 | static int shmem_reserve_inode(struct super_block *sb) |
223 | { | 221 | { |
224 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 222 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
225 | if (sbinfo->max_inodes) { | 223 | if (sbinfo->max_inodes) { |
226 | spin_lock(&sbinfo->stat_lock); | 224 | spin_lock(&sbinfo->stat_lock); |
227 | if (!sbinfo->free_inodes) { | 225 | if (!sbinfo->free_inodes) { |
228 | spin_unlock(&sbinfo->stat_lock); | 226 | spin_unlock(&sbinfo->stat_lock); |
229 | return -ENOSPC; | 227 | return -ENOSPC; |
230 | } | 228 | } |
231 | sbinfo->free_inodes--; | 229 | sbinfo->free_inodes--; |
232 | spin_unlock(&sbinfo->stat_lock); | 230 | spin_unlock(&sbinfo->stat_lock); |
233 | } | 231 | } |
234 | return 0; | 232 | return 0; |
235 | } | 233 | } |
236 | 234 | ||
237 | static void shmem_free_inode(struct super_block *sb) | 235 | static void shmem_free_inode(struct super_block *sb) |
238 | { | 236 | { |
239 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 237 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
240 | if (sbinfo->max_inodes) { | 238 | if (sbinfo->max_inodes) { |
241 | spin_lock(&sbinfo->stat_lock); | 239 | spin_lock(&sbinfo->stat_lock); |
242 | sbinfo->free_inodes++; | 240 | sbinfo->free_inodes++; |
243 | spin_unlock(&sbinfo->stat_lock); | 241 | spin_unlock(&sbinfo->stat_lock); |
244 | } | 242 | } |
245 | } | 243 | } |
246 | 244 | ||
247 | /** | 245 | /** |
248 | * shmem_recalc_inode - recalculate the size of an inode | 246 | * shmem_recalc_inode - recalculate the size of an inode |
249 | * @inode: inode to recalc | 247 | * @inode: inode to recalc |
250 | * | 248 | * |
251 | * We have to calculate the free blocks since the mm can drop | 249 | * We have to calculate the free blocks since the mm can drop |
252 | * undirtied hole pages behind our back. | 250 | * undirtied hole pages behind our back. |
253 | * | 251 | * |
254 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped | 252 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped |
255 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) | 253 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) |
256 | * | 254 | * |
257 | * It has to be called with the spinlock held. | 255 | * It has to be called with the spinlock held. |
258 | */ | 256 | */ |
259 | static void shmem_recalc_inode(struct inode *inode) | 257 | static void shmem_recalc_inode(struct inode *inode) |
260 | { | 258 | { |
261 | struct shmem_inode_info *info = SHMEM_I(inode); | 259 | struct shmem_inode_info *info = SHMEM_I(inode); |
262 | long freed; | 260 | long freed; |
263 | 261 | ||
264 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; | 262 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; |
265 | if (freed > 0) { | 263 | if (freed > 0) { |
266 | info->alloced -= freed; | 264 | info->alloced -= freed; |
267 | shmem_unacct_blocks(info->flags, freed); | 265 | shmem_unacct_blocks(info->flags, freed); |
268 | shmem_free_blocks(inode, freed); | 266 | shmem_free_blocks(inode, freed); |
269 | } | 267 | } |
270 | } | 268 | } |
271 | 269 | ||
272 | /** | 270 | /** |
273 | * shmem_swp_entry - find the swap vector position in the info structure | 271 | * shmem_swp_entry - find the swap vector position in the info structure |
274 | * @info: info structure for the inode | 272 | * @info: info structure for the inode |
275 | * @index: index of the page to find | 273 | * @index: index of the page to find |
276 | * @page: optional page to add to the structure. Has to be preset to | 274 | * @page: optional page to add to the structure. Has to be preset to |
277 | * all zeros | 275 | * all zeros |
278 | * | 276 | * |
279 | * If there is no space allocated yet it will return NULL when | 277 | * If there is no space allocated yet it will return NULL when |
280 | * page is NULL, else it will use the page for the needed block, | 278 | * page is NULL, else it will use the page for the needed block, |
281 | * setting it to NULL on return to indicate that it has been used. | 279 | * setting it to NULL on return to indicate that it has been used. |
282 | * | 280 | * |
283 | * The swap vector is organized the following way: | 281 | * The swap vector is organized the following way: |
284 | * | 282 | * |
285 | * There are SHMEM_NR_DIRECT entries directly stored in the | 283 | * There are SHMEM_NR_DIRECT entries directly stored in the |
286 | * shmem_inode_info structure. So small files do not need an addional | 284 | * shmem_inode_info structure. So small files do not need an addional |
287 | * allocation. | 285 | * allocation. |
288 | * | 286 | * |
289 | * For pages with index > SHMEM_NR_DIRECT there is the pointer | 287 | * For pages with index > SHMEM_NR_DIRECT there is the pointer |
290 | * i_indirect which points to a page which holds in the first half | 288 | * i_indirect which points to a page which holds in the first half |
291 | * doubly indirect blocks, in the second half triple indirect blocks: | 289 | * doubly indirect blocks, in the second half triple indirect blocks: |
292 | * | 290 | * |
293 | * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the | 291 | * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the |
294 | * following layout (for SHMEM_NR_DIRECT == 16): | 292 | * following layout (for SHMEM_NR_DIRECT == 16): |
295 | * | 293 | * |
296 | * i_indirect -> dir --> 16-19 | 294 | * i_indirect -> dir --> 16-19 |
297 | * | +-> 20-23 | 295 | * | +-> 20-23 |
298 | * | | 296 | * | |
299 | * +-->dir2 --> 24-27 | 297 | * +-->dir2 --> 24-27 |
300 | * | +-> 28-31 | 298 | * | +-> 28-31 |
301 | * | +-> 32-35 | 299 | * | +-> 32-35 |
302 | * | +-> 36-39 | 300 | * | +-> 36-39 |
303 | * | | 301 | * | |
304 | * +-->dir3 --> 40-43 | 302 | * +-->dir3 --> 40-43 |
305 | * +-> 44-47 | 303 | * +-> 44-47 |
306 | * +-> 48-51 | 304 | * +-> 48-51 |
307 | * +-> 52-55 | 305 | * +-> 52-55 |
308 | */ | 306 | */ |
309 | static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) | 307 | static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) |
310 | { | 308 | { |
311 | unsigned long offset; | 309 | unsigned long offset; |
312 | struct page **dir; | 310 | struct page **dir; |
313 | struct page *subdir; | 311 | struct page *subdir; |
314 | 312 | ||
315 | if (index < SHMEM_NR_DIRECT) { | 313 | if (index < SHMEM_NR_DIRECT) { |
316 | shmem_swp_balance_unmap(); | 314 | shmem_swp_balance_unmap(); |
317 | return info->i_direct+index; | 315 | return info->i_direct+index; |
318 | } | 316 | } |
319 | if (!info->i_indirect) { | 317 | if (!info->i_indirect) { |
320 | if (page) { | 318 | if (page) { |
321 | info->i_indirect = *page; | 319 | info->i_indirect = *page; |
322 | *page = NULL; | 320 | *page = NULL; |
323 | } | 321 | } |
324 | return NULL; /* need another page */ | 322 | return NULL; /* need another page */ |
325 | } | 323 | } |
326 | 324 | ||
327 | index -= SHMEM_NR_DIRECT; | 325 | index -= SHMEM_NR_DIRECT; |
328 | offset = index % ENTRIES_PER_PAGE; | 326 | offset = index % ENTRIES_PER_PAGE; |
329 | index /= ENTRIES_PER_PAGE; | 327 | index /= ENTRIES_PER_PAGE; |
330 | dir = shmem_dir_map(info->i_indirect); | 328 | dir = shmem_dir_map(info->i_indirect); |
331 | 329 | ||
332 | if (index >= ENTRIES_PER_PAGE/2) { | 330 | if (index >= ENTRIES_PER_PAGE/2) { |
333 | index -= ENTRIES_PER_PAGE/2; | 331 | index -= ENTRIES_PER_PAGE/2; |
334 | dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; | 332 | dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; |
335 | index %= ENTRIES_PER_PAGE; | 333 | index %= ENTRIES_PER_PAGE; |
336 | subdir = *dir; | 334 | subdir = *dir; |
337 | if (!subdir) { | 335 | if (!subdir) { |
338 | if (page) { | 336 | if (page) { |
339 | *dir = *page; | 337 | *dir = *page; |
340 | *page = NULL; | 338 | *page = NULL; |
341 | } | 339 | } |
342 | shmem_dir_unmap(dir); | 340 | shmem_dir_unmap(dir); |
343 | return NULL; /* need another page */ | 341 | return NULL; /* need another page */ |
344 | } | 342 | } |
345 | shmem_dir_unmap(dir); | 343 | shmem_dir_unmap(dir); |
346 | dir = shmem_dir_map(subdir); | 344 | dir = shmem_dir_map(subdir); |
347 | } | 345 | } |
348 | 346 | ||
349 | dir += index; | 347 | dir += index; |
350 | subdir = *dir; | 348 | subdir = *dir; |
351 | if (!subdir) { | 349 | if (!subdir) { |
352 | if (!page || !(subdir = *page)) { | 350 | if (!page || !(subdir = *page)) { |
353 | shmem_dir_unmap(dir); | 351 | shmem_dir_unmap(dir); |
354 | return NULL; /* need a page */ | 352 | return NULL; /* need a page */ |
355 | } | 353 | } |
356 | *dir = subdir; | 354 | *dir = subdir; |
357 | *page = NULL; | 355 | *page = NULL; |
358 | } | 356 | } |
359 | shmem_dir_unmap(dir); | 357 | shmem_dir_unmap(dir); |
360 | return shmem_swp_map(subdir) + offset; | 358 | return shmem_swp_map(subdir) + offset; |
361 | } | 359 | } |
362 | 360 | ||
363 | static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) | 361 | static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) |
364 | { | 362 | { |
365 | long incdec = value? 1: -1; | 363 | long incdec = value? 1: -1; |
366 | 364 | ||
367 | entry->val = value; | 365 | entry->val = value; |
368 | info->swapped += incdec; | 366 | info->swapped += incdec; |
369 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { | 367 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { |
370 | struct page *page = kmap_atomic_to_page(entry); | 368 | struct page *page = kmap_atomic_to_page(entry); |
371 | set_page_private(page, page_private(page) + incdec); | 369 | set_page_private(page, page_private(page) + incdec); |
372 | } | 370 | } |
373 | } | 371 | } |
374 | 372 | ||
375 | /** | 373 | /** |
376 | * shmem_swp_alloc - get the position of the swap entry for the page. | 374 | * shmem_swp_alloc - get the position of the swap entry for the page. |
377 | * @info: info structure for the inode | 375 | * @info: info structure for the inode |
378 | * @index: index of the page to find | 376 | * @index: index of the page to find |
379 | * @sgp: check and recheck i_size? skip allocation? | 377 | * @sgp: check and recheck i_size? skip allocation? |
380 | * | 378 | * |
381 | * If the entry does not exist, allocate it. | 379 | * If the entry does not exist, allocate it. |
382 | */ | 380 | */ |
383 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) | 381 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) |
384 | { | 382 | { |
385 | struct inode *inode = &info->vfs_inode; | 383 | struct inode *inode = &info->vfs_inode; |
386 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 384 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
387 | struct page *page = NULL; | 385 | struct page *page = NULL; |
388 | swp_entry_t *entry; | 386 | swp_entry_t *entry; |
389 | 387 | ||
390 | if (sgp != SGP_WRITE && | 388 | if (sgp != SGP_WRITE && |
391 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 389 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
392 | return ERR_PTR(-EINVAL); | 390 | return ERR_PTR(-EINVAL); |
393 | 391 | ||
394 | while (!(entry = shmem_swp_entry(info, index, &page))) { | 392 | while (!(entry = shmem_swp_entry(info, index, &page))) { |
395 | if (sgp == SGP_READ) | 393 | if (sgp == SGP_READ) |
396 | return shmem_swp_map(ZERO_PAGE(0)); | 394 | return shmem_swp_map(ZERO_PAGE(0)); |
397 | /* | 395 | /* |
398 | * Test free_blocks against 1 not 0, since we have 1 data | 396 | * Test free_blocks against 1 not 0, since we have 1 data |
399 | * page (and perhaps indirect index pages) yet to allocate: | 397 | * page (and perhaps indirect index pages) yet to allocate: |
400 | * a waste to allocate index if we cannot allocate data. | 398 | * a waste to allocate index if we cannot allocate data. |
401 | */ | 399 | */ |
402 | if (sbinfo->max_blocks) { | 400 | if (sbinfo->max_blocks) { |
403 | spin_lock(&sbinfo->stat_lock); | 401 | spin_lock(&sbinfo->stat_lock); |
404 | if (sbinfo->free_blocks <= 1) { | 402 | if (sbinfo->free_blocks <= 1) { |
405 | spin_unlock(&sbinfo->stat_lock); | 403 | spin_unlock(&sbinfo->stat_lock); |
406 | return ERR_PTR(-ENOSPC); | 404 | return ERR_PTR(-ENOSPC); |
407 | } | 405 | } |
408 | sbinfo->free_blocks--; | 406 | sbinfo->free_blocks--; |
409 | inode->i_blocks += BLOCKS_PER_PAGE; | 407 | inode->i_blocks += BLOCKS_PER_PAGE; |
410 | spin_unlock(&sbinfo->stat_lock); | 408 | spin_unlock(&sbinfo->stat_lock); |
411 | } | 409 | } |
412 | 410 | ||
413 | spin_unlock(&info->lock); | 411 | spin_unlock(&info->lock); |
414 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); | 412 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); |
415 | if (page) | 413 | if (page) |
416 | set_page_private(page, 0); | 414 | set_page_private(page, 0); |
417 | spin_lock(&info->lock); | 415 | spin_lock(&info->lock); |
418 | 416 | ||
419 | if (!page) { | 417 | if (!page) { |
420 | shmem_free_blocks(inode, 1); | 418 | shmem_free_blocks(inode, 1); |
421 | return ERR_PTR(-ENOMEM); | 419 | return ERR_PTR(-ENOMEM); |
422 | } | 420 | } |
423 | if (sgp != SGP_WRITE && | 421 | if (sgp != SGP_WRITE && |
424 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | 422 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { |
425 | entry = ERR_PTR(-EINVAL); | 423 | entry = ERR_PTR(-EINVAL); |
426 | break; | 424 | break; |
427 | } | 425 | } |
428 | if (info->next_index <= index) | 426 | if (info->next_index <= index) |
429 | info->next_index = index + 1; | 427 | info->next_index = index + 1; |
430 | } | 428 | } |
431 | if (page) { | 429 | if (page) { |
432 | /* another task gave its page, or truncated the file */ | 430 | /* another task gave its page, or truncated the file */ |
433 | shmem_free_blocks(inode, 1); | 431 | shmem_free_blocks(inode, 1); |
434 | shmem_dir_free(page); | 432 | shmem_dir_free(page); |
435 | } | 433 | } |
436 | if (info->next_index <= index && !IS_ERR(entry)) | 434 | if (info->next_index <= index && !IS_ERR(entry)) |
437 | info->next_index = index + 1; | 435 | info->next_index = index + 1; |
438 | return entry; | 436 | return entry; |
439 | } | 437 | } |
440 | 438 | ||
441 | /** | 439 | /** |
442 | * shmem_free_swp - free some swap entries in a directory | 440 | * shmem_free_swp - free some swap entries in a directory |
443 | * @dir: pointer to the directory | 441 | * @dir: pointer to the directory |
444 | * @edir: pointer after last entry of the directory | 442 | * @edir: pointer after last entry of the directory |
445 | * @punch_lock: pointer to spinlock when needed for the holepunch case | 443 | * @punch_lock: pointer to spinlock when needed for the holepunch case |
446 | */ | 444 | */ |
447 | static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, | 445 | static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, |
448 | spinlock_t *punch_lock) | 446 | spinlock_t *punch_lock) |
449 | { | 447 | { |
450 | spinlock_t *punch_unlock = NULL; | 448 | spinlock_t *punch_unlock = NULL; |
451 | swp_entry_t *ptr; | 449 | swp_entry_t *ptr; |
452 | int freed = 0; | 450 | int freed = 0; |
453 | 451 | ||
454 | for (ptr = dir; ptr < edir; ptr++) { | 452 | for (ptr = dir; ptr < edir; ptr++) { |
455 | if (ptr->val) { | 453 | if (ptr->val) { |
456 | if (unlikely(punch_lock)) { | 454 | if (unlikely(punch_lock)) { |
457 | punch_unlock = punch_lock; | 455 | punch_unlock = punch_lock; |
458 | punch_lock = NULL; | 456 | punch_lock = NULL; |
459 | spin_lock(punch_unlock); | 457 | spin_lock(punch_unlock); |
460 | if (!ptr->val) | 458 | if (!ptr->val) |
461 | continue; | 459 | continue; |
462 | } | 460 | } |
463 | free_swap_and_cache(*ptr); | 461 | free_swap_and_cache(*ptr); |
464 | *ptr = (swp_entry_t){0}; | 462 | *ptr = (swp_entry_t){0}; |
465 | freed++; | 463 | freed++; |
466 | } | 464 | } |
467 | } | 465 | } |
468 | if (punch_unlock) | 466 | if (punch_unlock) |
469 | spin_unlock(punch_unlock); | 467 | spin_unlock(punch_unlock); |
470 | return freed; | 468 | return freed; |
471 | } | 469 | } |
472 | 470 | ||
473 | static int shmem_map_and_free_swp(struct page *subdir, int offset, | 471 | static int shmem_map_and_free_swp(struct page *subdir, int offset, |
474 | int limit, struct page ***dir, spinlock_t *punch_lock) | 472 | int limit, struct page ***dir, spinlock_t *punch_lock) |
475 | { | 473 | { |
476 | swp_entry_t *ptr; | 474 | swp_entry_t *ptr; |
477 | int freed = 0; | 475 | int freed = 0; |
478 | 476 | ||
479 | ptr = shmem_swp_map(subdir); | 477 | ptr = shmem_swp_map(subdir); |
480 | for (; offset < limit; offset += LATENCY_LIMIT) { | 478 | for (; offset < limit; offset += LATENCY_LIMIT) { |
481 | int size = limit - offset; | 479 | int size = limit - offset; |
482 | if (size > LATENCY_LIMIT) | 480 | if (size > LATENCY_LIMIT) |
483 | size = LATENCY_LIMIT; | 481 | size = LATENCY_LIMIT; |
484 | freed += shmem_free_swp(ptr+offset, ptr+offset+size, | 482 | freed += shmem_free_swp(ptr+offset, ptr+offset+size, |
485 | punch_lock); | 483 | punch_lock); |
486 | if (need_resched()) { | 484 | if (need_resched()) { |
487 | shmem_swp_unmap(ptr); | 485 | shmem_swp_unmap(ptr); |
488 | if (*dir) { | 486 | if (*dir) { |
489 | shmem_dir_unmap(*dir); | 487 | shmem_dir_unmap(*dir); |
490 | *dir = NULL; | 488 | *dir = NULL; |
491 | } | 489 | } |
492 | cond_resched(); | 490 | cond_resched(); |
493 | ptr = shmem_swp_map(subdir); | 491 | ptr = shmem_swp_map(subdir); |
494 | } | 492 | } |
495 | } | 493 | } |
496 | shmem_swp_unmap(ptr); | 494 | shmem_swp_unmap(ptr); |
497 | return freed; | 495 | return freed; |
498 | } | 496 | } |
499 | 497 | ||
500 | static void shmem_free_pages(struct list_head *next) | 498 | static void shmem_free_pages(struct list_head *next) |
501 | { | 499 | { |
502 | struct page *page; | 500 | struct page *page; |
503 | int freed = 0; | 501 | int freed = 0; |
504 | 502 | ||
505 | do { | 503 | do { |
506 | page = container_of(next, struct page, lru); | 504 | page = container_of(next, struct page, lru); |
507 | next = next->next; | 505 | next = next->next; |
508 | shmem_dir_free(page); | 506 | shmem_dir_free(page); |
509 | freed++; | 507 | freed++; |
510 | if (freed >= LATENCY_LIMIT) { | 508 | if (freed >= LATENCY_LIMIT) { |
511 | cond_resched(); | 509 | cond_resched(); |
512 | freed = 0; | 510 | freed = 0; |
513 | } | 511 | } |
514 | } while (next); | 512 | } while (next); |
515 | } | 513 | } |
516 | 514 | ||
517 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | 515 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) |
518 | { | 516 | { |
519 | struct shmem_inode_info *info = SHMEM_I(inode); | 517 | struct shmem_inode_info *info = SHMEM_I(inode); |
520 | unsigned long idx; | 518 | unsigned long idx; |
521 | unsigned long size; | 519 | unsigned long size; |
522 | unsigned long limit; | 520 | unsigned long limit; |
523 | unsigned long stage; | 521 | unsigned long stage; |
524 | unsigned long diroff; | 522 | unsigned long diroff; |
525 | struct page **dir; | 523 | struct page **dir; |
526 | struct page *topdir; | 524 | struct page *topdir; |
527 | struct page *middir; | 525 | struct page *middir; |
528 | struct page *subdir; | 526 | struct page *subdir; |
529 | swp_entry_t *ptr; | 527 | swp_entry_t *ptr; |
530 | LIST_HEAD(pages_to_free); | 528 | LIST_HEAD(pages_to_free); |
531 | long nr_pages_to_free = 0; | 529 | long nr_pages_to_free = 0; |
532 | long nr_swaps_freed = 0; | 530 | long nr_swaps_freed = 0; |
533 | int offset; | 531 | int offset; |
534 | int freed; | 532 | int freed; |
535 | int punch_hole; | 533 | int punch_hole; |
536 | spinlock_t *needs_lock; | 534 | spinlock_t *needs_lock; |
537 | spinlock_t *punch_lock; | 535 | spinlock_t *punch_lock; |
538 | unsigned long upper_limit; | 536 | unsigned long upper_limit; |
539 | 537 | ||
540 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 538 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; |
541 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 539 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
542 | if (idx >= info->next_index) | 540 | if (idx >= info->next_index) |
543 | return; | 541 | return; |
544 | 542 | ||
545 | spin_lock(&info->lock); | 543 | spin_lock(&info->lock); |
546 | info->flags |= SHMEM_TRUNCATE; | 544 | info->flags |= SHMEM_TRUNCATE; |
547 | if (likely(end == (loff_t) -1)) { | 545 | if (likely(end == (loff_t) -1)) { |
548 | limit = info->next_index; | 546 | limit = info->next_index; |
549 | upper_limit = SHMEM_MAX_INDEX; | 547 | upper_limit = SHMEM_MAX_INDEX; |
550 | info->next_index = idx; | 548 | info->next_index = idx; |
551 | needs_lock = NULL; | 549 | needs_lock = NULL; |
552 | punch_hole = 0; | 550 | punch_hole = 0; |
553 | } else { | 551 | } else { |
554 | if (end + 1 >= inode->i_size) { /* we may free a little more */ | 552 | if (end + 1 >= inode->i_size) { /* we may free a little more */ |
555 | limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> | 553 | limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> |
556 | PAGE_CACHE_SHIFT; | 554 | PAGE_CACHE_SHIFT; |
557 | upper_limit = SHMEM_MAX_INDEX; | 555 | upper_limit = SHMEM_MAX_INDEX; |
558 | } else { | 556 | } else { |
559 | limit = (end + 1) >> PAGE_CACHE_SHIFT; | 557 | limit = (end + 1) >> PAGE_CACHE_SHIFT; |
560 | upper_limit = limit; | 558 | upper_limit = limit; |
561 | } | 559 | } |
562 | needs_lock = &info->lock; | 560 | needs_lock = &info->lock; |
563 | punch_hole = 1; | 561 | punch_hole = 1; |
564 | } | 562 | } |
565 | 563 | ||
566 | topdir = info->i_indirect; | 564 | topdir = info->i_indirect; |
567 | if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { | 565 | if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { |
568 | info->i_indirect = NULL; | 566 | info->i_indirect = NULL; |
569 | nr_pages_to_free++; | 567 | nr_pages_to_free++; |
570 | list_add(&topdir->lru, &pages_to_free); | 568 | list_add(&topdir->lru, &pages_to_free); |
571 | } | 569 | } |
572 | spin_unlock(&info->lock); | 570 | spin_unlock(&info->lock); |
573 | 571 | ||
574 | if (info->swapped && idx < SHMEM_NR_DIRECT) { | 572 | if (info->swapped && idx < SHMEM_NR_DIRECT) { |
575 | ptr = info->i_direct; | 573 | ptr = info->i_direct; |
576 | size = limit; | 574 | size = limit; |
577 | if (size > SHMEM_NR_DIRECT) | 575 | if (size > SHMEM_NR_DIRECT) |
578 | size = SHMEM_NR_DIRECT; | 576 | size = SHMEM_NR_DIRECT; |
579 | nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); | 577 | nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); |
580 | } | 578 | } |
581 | 579 | ||
582 | /* | 580 | /* |
583 | * If there are no indirect blocks or we are punching a hole | 581 | * If there are no indirect blocks or we are punching a hole |
584 | * below indirect blocks, nothing to be done. | 582 | * below indirect blocks, nothing to be done. |
585 | */ | 583 | */ |
586 | if (!topdir || limit <= SHMEM_NR_DIRECT) | 584 | if (!topdir || limit <= SHMEM_NR_DIRECT) |
587 | goto done2; | 585 | goto done2; |
588 | 586 | ||
589 | /* | 587 | /* |
590 | * The truncation case has already dropped info->lock, and we're safe | 588 | * The truncation case has already dropped info->lock, and we're safe |
591 | * because i_size and next_index have already been lowered, preventing | 589 | * because i_size and next_index have already been lowered, preventing |
592 | * access beyond. But in the punch_hole case, we still need to take | 590 | * access beyond. But in the punch_hole case, we still need to take |
593 | * the lock when updating the swap directory, because there might be | 591 | * the lock when updating the swap directory, because there might be |
594 | * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or | 592 | * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or |
595 | * shmem_writepage. However, whenever we find we can remove a whole | 593 | * shmem_writepage. However, whenever we find we can remove a whole |
596 | * directory page (not at the misaligned start or end of the range), | 594 | * directory page (not at the misaligned start or end of the range), |
597 | * we first NULLify its pointer in the level above, and then have no | 595 | * we first NULLify its pointer in the level above, and then have no |
598 | * need to take the lock when updating its contents: needs_lock and | 596 | * need to take the lock when updating its contents: needs_lock and |
599 | * punch_lock (either pointing to info->lock or NULL) manage this. | 597 | * punch_lock (either pointing to info->lock or NULL) manage this. |
600 | */ | 598 | */ |
601 | 599 | ||
602 | upper_limit -= SHMEM_NR_DIRECT; | 600 | upper_limit -= SHMEM_NR_DIRECT; |
603 | limit -= SHMEM_NR_DIRECT; | 601 | limit -= SHMEM_NR_DIRECT; |
604 | idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; | 602 | idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; |
605 | offset = idx % ENTRIES_PER_PAGE; | 603 | offset = idx % ENTRIES_PER_PAGE; |
606 | idx -= offset; | 604 | idx -= offset; |
607 | 605 | ||
608 | dir = shmem_dir_map(topdir); | 606 | dir = shmem_dir_map(topdir); |
609 | stage = ENTRIES_PER_PAGEPAGE/2; | 607 | stage = ENTRIES_PER_PAGEPAGE/2; |
610 | if (idx < ENTRIES_PER_PAGEPAGE/2) { | 608 | if (idx < ENTRIES_PER_PAGEPAGE/2) { |
611 | middir = topdir; | 609 | middir = topdir; |
612 | diroff = idx/ENTRIES_PER_PAGE; | 610 | diroff = idx/ENTRIES_PER_PAGE; |
613 | } else { | 611 | } else { |
614 | dir += ENTRIES_PER_PAGE/2; | 612 | dir += ENTRIES_PER_PAGE/2; |
615 | dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; | 613 | dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; |
616 | while (stage <= idx) | 614 | while (stage <= idx) |
617 | stage += ENTRIES_PER_PAGEPAGE; | 615 | stage += ENTRIES_PER_PAGEPAGE; |
618 | middir = *dir; | 616 | middir = *dir; |
619 | if (*dir) { | 617 | if (*dir) { |
620 | diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % | 618 | diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % |
621 | ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; | 619 | ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; |
622 | if (!diroff && !offset && upper_limit >= stage) { | 620 | if (!diroff && !offset && upper_limit >= stage) { |
623 | if (needs_lock) { | 621 | if (needs_lock) { |
624 | spin_lock(needs_lock); | 622 | spin_lock(needs_lock); |
625 | *dir = NULL; | 623 | *dir = NULL; |
626 | spin_unlock(needs_lock); | 624 | spin_unlock(needs_lock); |
627 | needs_lock = NULL; | 625 | needs_lock = NULL; |
628 | } else | 626 | } else |
629 | *dir = NULL; | 627 | *dir = NULL; |
630 | nr_pages_to_free++; | 628 | nr_pages_to_free++; |
631 | list_add(&middir->lru, &pages_to_free); | 629 | list_add(&middir->lru, &pages_to_free); |
632 | } | 630 | } |
633 | shmem_dir_unmap(dir); | 631 | shmem_dir_unmap(dir); |
634 | dir = shmem_dir_map(middir); | 632 | dir = shmem_dir_map(middir); |
635 | } else { | 633 | } else { |
636 | diroff = 0; | 634 | diroff = 0; |
637 | offset = 0; | 635 | offset = 0; |
638 | idx = stage; | 636 | idx = stage; |
639 | } | 637 | } |
640 | } | 638 | } |
641 | 639 | ||
642 | for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { | 640 | for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { |
643 | if (unlikely(idx == stage)) { | 641 | if (unlikely(idx == stage)) { |
644 | shmem_dir_unmap(dir); | 642 | shmem_dir_unmap(dir); |
645 | dir = shmem_dir_map(topdir) + | 643 | dir = shmem_dir_map(topdir) + |
646 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | 644 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; |
647 | while (!*dir) { | 645 | while (!*dir) { |
648 | dir++; | 646 | dir++; |
649 | idx += ENTRIES_PER_PAGEPAGE; | 647 | idx += ENTRIES_PER_PAGEPAGE; |
650 | if (idx >= limit) | 648 | if (idx >= limit) |
651 | goto done1; | 649 | goto done1; |
652 | } | 650 | } |
653 | stage = idx + ENTRIES_PER_PAGEPAGE; | 651 | stage = idx + ENTRIES_PER_PAGEPAGE; |
654 | middir = *dir; | 652 | middir = *dir; |
655 | if (punch_hole) | 653 | if (punch_hole) |
656 | needs_lock = &info->lock; | 654 | needs_lock = &info->lock; |
657 | if (upper_limit >= stage) { | 655 | if (upper_limit >= stage) { |
658 | if (needs_lock) { | 656 | if (needs_lock) { |
659 | spin_lock(needs_lock); | 657 | spin_lock(needs_lock); |
660 | *dir = NULL; | 658 | *dir = NULL; |
661 | spin_unlock(needs_lock); | 659 | spin_unlock(needs_lock); |
662 | needs_lock = NULL; | 660 | needs_lock = NULL; |
663 | } else | 661 | } else |
664 | *dir = NULL; | 662 | *dir = NULL; |
665 | nr_pages_to_free++; | 663 | nr_pages_to_free++; |
666 | list_add(&middir->lru, &pages_to_free); | 664 | list_add(&middir->lru, &pages_to_free); |
667 | } | 665 | } |
668 | shmem_dir_unmap(dir); | 666 | shmem_dir_unmap(dir); |
669 | cond_resched(); | 667 | cond_resched(); |
670 | dir = shmem_dir_map(middir); | 668 | dir = shmem_dir_map(middir); |
671 | diroff = 0; | 669 | diroff = 0; |
672 | } | 670 | } |
673 | punch_lock = needs_lock; | 671 | punch_lock = needs_lock; |
674 | subdir = dir[diroff]; | 672 | subdir = dir[diroff]; |
675 | if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { | 673 | if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { |
676 | if (needs_lock) { | 674 | if (needs_lock) { |
677 | spin_lock(needs_lock); | 675 | spin_lock(needs_lock); |
678 | dir[diroff] = NULL; | 676 | dir[diroff] = NULL; |
679 | spin_unlock(needs_lock); | 677 | spin_unlock(needs_lock); |
680 | punch_lock = NULL; | 678 | punch_lock = NULL; |
681 | } else | 679 | } else |
682 | dir[diroff] = NULL; | 680 | dir[diroff] = NULL; |
683 | nr_pages_to_free++; | 681 | nr_pages_to_free++; |
684 | list_add(&subdir->lru, &pages_to_free); | 682 | list_add(&subdir->lru, &pages_to_free); |
685 | } | 683 | } |
686 | if (subdir && page_private(subdir) /* has swap entries */) { | 684 | if (subdir && page_private(subdir) /* has swap entries */) { |
687 | size = limit - idx; | 685 | size = limit - idx; |
688 | if (size > ENTRIES_PER_PAGE) | 686 | if (size > ENTRIES_PER_PAGE) |
689 | size = ENTRIES_PER_PAGE; | 687 | size = ENTRIES_PER_PAGE; |
690 | freed = shmem_map_and_free_swp(subdir, | 688 | freed = shmem_map_and_free_swp(subdir, |
691 | offset, size, &dir, punch_lock); | 689 | offset, size, &dir, punch_lock); |
692 | if (!dir) | 690 | if (!dir) |
693 | dir = shmem_dir_map(middir); | 691 | dir = shmem_dir_map(middir); |
694 | nr_swaps_freed += freed; | 692 | nr_swaps_freed += freed; |
695 | if (offset || punch_lock) { | 693 | if (offset || punch_lock) { |
696 | spin_lock(&info->lock); | 694 | spin_lock(&info->lock); |
697 | set_page_private(subdir, | 695 | set_page_private(subdir, |
698 | page_private(subdir) - freed); | 696 | page_private(subdir) - freed); |
699 | spin_unlock(&info->lock); | 697 | spin_unlock(&info->lock); |
700 | } else | 698 | } else |
701 | BUG_ON(page_private(subdir) != freed); | 699 | BUG_ON(page_private(subdir) != freed); |
702 | } | 700 | } |
703 | offset = 0; | 701 | offset = 0; |
704 | } | 702 | } |
705 | done1: | 703 | done1: |
706 | shmem_dir_unmap(dir); | 704 | shmem_dir_unmap(dir); |
707 | done2: | 705 | done2: |
708 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | 706 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { |
709 | /* | 707 | /* |
710 | * Call truncate_inode_pages again: racing shmem_unuse_inode | 708 | * Call truncate_inode_pages again: racing shmem_unuse_inode |
711 | * may have swizzled a page in from swap since vmtruncate or | 709 | * may have swizzled a page in from swap since vmtruncate or |
712 | * generic_delete_inode did it, before we lowered next_index. | 710 | * generic_delete_inode did it, before we lowered next_index. |
713 | * Also, though shmem_getpage checks i_size before adding to | 711 | * Also, though shmem_getpage checks i_size before adding to |
714 | * cache, no recheck after: so fix the narrow window there too. | 712 | * cache, no recheck after: so fix the narrow window there too. |
715 | * | 713 | * |
716 | * Recalling truncate_inode_pages_range and unmap_mapping_range | 714 | * Recalling truncate_inode_pages_range and unmap_mapping_range |
717 | * every time for punch_hole (which never got a chance to clear | 715 | * every time for punch_hole (which never got a chance to clear |
718 | * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, | 716 | * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, |
719 | * yet hardly ever necessary: try to optimize them out later. | 717 | * yet hardly ever necessary: try to optimize them out later. |
720 | */ | 718 | */ |
721 | truncate_inode_pages_range(inode->i_mapping, start, end); | 719 | truncate_inode_pages_range(inode->i_mapping, start, end); |
722 | if (punch_hole) | 720 | if (punch_hole) |
723 | unmap_mapping_range(inode->i_mapping, start, | 721 | unmap_mapping_range(inode->i_mapping, start, |
724 | end - start, 1); | 722 | end - start, 1); |
725 | } | 723 | } |
726 | 724 | ||
727 | spin_lock(&info->lock); | 725 | spin_lock(&info->lock); |
728 | info->flags &= ~SHMEM_TRUNCATE; | 726 | info->flags &= ~SHMEM_TRUNCATE; |
729 | info->swapped -= nr_swaps_freed; | 727 | info->swapped -= nr_swaps_freed; |
730 | if (nr_pages_to_free) | 728 | if (nr_pages_to_free) |
731 | shmem_free_blocks(inode, nr_pages_to_free); | 729 | shmem_free_blocks(inode, nr_pages_to_free); |
732 | shmem_recalc_inode(inode); | 730 | shmem_recalc_inode(inode); |
733 | spin_unlock(&info->lock); | 731 | spin_unlock(&info->lock); |
734 | 732 | ||
735 | /* | 733 | /* |
736 | * Empty swap vector directory pages to be freed? | 734 | * Empty swap vector directory pages to be freed? |
737 | */ | 735 | */ |
738 | if (!list_empty(&pages_to_free)) { | 736 | if (!list_empty(&pages_to_free)) { |
739 | pages_to_free.prev->next = NULL; | 737 | pages_to_free.prev->next = NULL; |
740 | shmem_free_pages(pages_to_free.next); | 738 | shmem_free_pages(pages_to_free.next); |
741 | } | 739 | } |
742 | } | 740 | } |
743 | 741 | ||
744 | static void shmem_truncate(struct inode *inode) | 742 | static void shmem_truncate(struct inode *inode) |
745 | { | 743 | { |
746 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); | 744 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); |
747 | } | 745 | } |
748 | 746 | ||
749 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 747 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) |
750 | { | 748 | { |
751 | struct inode *inode = dentry->d_inode; | 749 | struct inode *inode = dentry->d_inode; |
752 | struct page *page = NULL; | 750 | struct page *page = NULL; |
753 | int error; | 751 | int error; |
754 | 752 | ||
755 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | 753 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
756 | if (attr->ia_size < inode->i_size) { | 754 | if (attr->ia_size < inode->i_size) { |
757 | /* | 755 | /* |
758 | * If truncating down to a partial page, then | 756 | * If truncating down to a partial page, then |
759 | * if that page is already allocated, hold it | 757 | * if that page is already allocated, hold it |
760 | * in memory until the truncation is over, so | 758 | * in memory until the truncation is over, so |
761 | * truncate_partial_page cannnot miss it were | 759 | * truncate_partial_page cannnot miss it were |
762 | * it assigned to swap. | 760 | * it assigned to swap. |
763 | */ | 761 | */ |
764 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { | 762 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { |
765 | (void) shmem_getpage(inode, | 763 | (void) shmem_getpage(inode, |
766 | attr->ia_size>>PAGE_CACHE_SHIFT, | 764 | attr->ia_size>>PAGE_CACHE_SHIFT, |
767 | &page, SGP_READ, NULL); | 765 | &page, SGP_READ, NULL); |
768 | if (page) | 766 | if (page) |
769 | unlock_page(page); | 767 | unlock_page(page); |
770 | } | 768 | } |
771 | /* | 769 | /* |
772 | * Reset SHMEM_PAGEIN flag so that shmem_truncate can | 770 | * Reset SHMEM_PAGEIN flag so that shmem_truncate can |
773 | * detect if any pages might have been added to cache | 771 | * detect if any pages might have been added to cache |
774 | * after truncate_inode_pages. But we needn't bother | 772 | * after truncate_inode_pages. But we needn't bother |
775 | * if it's being fully truncated to zero-length: the | 773 | * if it's being fully truncated to zero-length: the |
776 | * nrpages check is efficient enough in that case. | 774 | * nrpages check is efficient enough in that case. |
777 | */ | 775 | */ |
778 | if (attr->ia_size) { | 776 | if (attr->ia_size) { |
779 | struct shmem_inode_info *info = SHMEM_I(inode); | 777 | struct shmem_inode_info *info = SHMEM_I(inode); |
780 | spin_lock(&info->lock); | 778 | spin_lock(&info->lock); |
781 | info->flags &= ~SHMEM_PAGEIN; | 779 | info->flags &= ~SHMEM_PAGEIN; |
782 | spin_unlock(&info->lock); | 780 | spin_unlock(&info->lock); |
783 | } | 781 | } |
784 | } | 782 | } |
785 | } | 783 | } |
786 | 784 | ||
787 | error = inode_change_ok(inode, attr); | 785 | error = inode_change_ok(inode, attr); |
788 | if (!error) | 786 | if (!error) |
789 | error = inode_setattr(inode, attr); | 787 | error = inode_setattr(inode, attr); |
790 | #ifdef CONFIG_TMPFS_POSIX_ACL | 788 | #ifdef CONFIG_TMPFS_POSIX_ACL |
791 | if (!error && (attr->ia_valid & ATTR_MODE)) | 789 | if (!error && (attr->ia_valid & ATTR_MODE)) |
792 | error = generic_acl_chmod(inode, &shmem_acl_ops); | 790 | error = generic_acl_chmod(inode, &shmem_acl_ops); |
793 | #endif | 791 | #endif |
794 | if (page) | 792 | if (page) |
795 | page_cache_release(page); | 793 | page_cache_release(page); |
796 | return error; | 794 | return error; |
797 | } | 795 | } |
798 | 796 | ||
799 | static void shmem_delete_inode(struct inode *inode) | 797 | static void shmem_delete_inode(struct inode *inode) |
800 | { | 798 | { |
801 | struct shmem_inode_info *info = SHMEM_I(inode); | 799 | struct shmem_inode_info *info = SHMEM_I(inode); |
802 | 800 | ||
803 | if (inode->i_op->truncate == shmem_truncate) { | 801 | if (inode->i_op->truncate == shmem_truncate) { |
804 | truncate_inode_pages(inode->i_mapping, 0); | 802 | truncate_inode_pages(inode->i_mapping, 0); |
805 | shmem_unacct_size(info->flags, inode->i_size); | 803 | shmem_unacct_size(info->flags, inode->i_size); |
806 | inode->i_size = 0; | 804 | inode->i_size = 0; |
807 | shmem_truncate(inode); | 805 | shmem_truncate(inode); |
808 | if (!list_empty(&info->swaplist)) { | 806 | if (!list_empty(&info->swaplist)) { |
809 | mutex_lock(&shmem_swaplist_mutex); | 807 | mutex_lock(&shmem_swaplist_mutex); |
810 | list_del_init(&info->swaplist); | 808 | list_del_init(&info->swaplist); |
811 | mutex_unlock(&shmem_swaplist_mutex); | 809 | mutex_unlock(&shmem_swaplist_mutex); |
812 | } | 810 | } |
813 | } | 811 | } |
814 | BUG_ON(inode->i_blocks); | 812 | BUG_ON(inode->i_blocks); |
815 | shmem_free_inode(inode->i_sb); | 813 | shmem_free_inode(inode->i_sb); |
816 | clear_inode(inode); | 814 | clear_inode(inode); |
817 | } | 815 | } |
818 | 816 | ||
819 | static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) | 817 | static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) |
820 | { | 818 | { |
821 | swp_entry_t *ptr; | 819 | swp_entry_t *ptr; |
822 | 820 | ||
823 | for (ptr = dir; ptr < edir; ptr++) { | 821 | for (ptr = dir; ptr < edir; ptr++) { |
824 | if (ptr->val == entry.val) | 822 | if (ptr->val == entry.val) |
825 | return ptr - dir; | 823 | return ptr - dir; |
826 | } | 824 | } |
827 | return -1; | 825 | return -1; |
828 | } | 826 | } |
829 | 827 | ||
830 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | 828 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) |
831 | { | 829 | { |
832 | struct inode *inode; | 830 | struct inode *inode; |
833 | unsigned long idx; | 831 | unsigned long idx; |
834 | unsigned long size; | 832 | unsigned long size; |
835 | unsigned long limit; | 833 | unsigned long limit; |
836 | unsigned long stage; | 834 | unsigned long stage; |
837 | struct page **dir; | 835 | struct page **dir; |
838 | struct page *subdir; | 836 | struct page *subdir; |
839 | swp_entry_t *ptr; | 837 | swp_entry_t *ptr; |
840 | int offset; | 838 | int offset; |
841 | int error; | 839 | int error; |
842 | 840 | ||
843 | idx = 0; | 841 | idx = 0; |
844 | ptr = info->i_direct; | 842 | ptr = info->i_direct; |
845 | spin_lock(&info->lock); | 843 | spin_lock(&info->lock); |
846 | if (!info->swapped) { | 844 | if (!info->swapped) { |
847 | list_del_init(&info->swaplist); | 845 | list_del_init(&info->swaplist); |
848 | goto lost2; | 846 | goto lost2; |
849 | } | 847 | } |
850 | limit = info->next_index; | 848 | limit = info->next_index; |
851 | size = limit; | 849 | size = limit; |
852 | if (size > SHMEM_NR_DIRECT) | 850 | if (size > SHMEM_NR_DIRECT) |
853 | size = SHMEM_NR_DIRECT; | 851 | size = SHMEM_NR_DIRECT; |
854 | offset = shmem_find_swp(entry, ptr, ptr+size); | 852 | offset = shmem_find_swp(entry, ptr, ptr+size); |
855 | if (offset >= 0) | 853 | if (offset >= 0) |
856 | goto found; | 854 | goto found; |
857 | if (!info->i_indirect) | 855 | if (!info->i_indirect) |
858 | goto lost2; | 856 | goto lost2; |
859 | 857 | ||
860 | dir = shmem_dir_map(info->i_indirect); | 858 | dir = shmem_dir_map(info->i_indirect); |
861 | stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; | 859 | stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; |
862 | 860 | ||
863 | for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { | 861 | for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { |
864 | if (unlikely(idx == stage)) { | 862 | if (unlikely(idx == stage)) { |
865 | shmem_dir_unmap(dir-1); | 863 | shmem_dir_unmap(dir-1); |
866 | if (cond_resched_lock(&info->lock)) { | 864 | if (cond_resched_lock(&info->lock)) { |
867 | /* check it has not been truncated */ | 865 | /* check it has not been truncated */ |
868 | if (limit > info->next_index) { | 866 | if (limit > info->next_index) { |
869 | limit = info->next_index; | 867 | limit = info->next_index; |
870 | if (idx >= limit) | 868 | if (idx >= limit) |
871 | goto lost2; | 869 | goto lost2; |
872 | } | 870 | } |
873 | } | 871 | } |
874 | dir = shmem_dir_map(info->i_indirect) + | 872 | dir = shmem_dir_map(info->i_indirect) + |
875 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | 873 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; |
876 | while (!*dir) { | 874 | while (!*dir) { |
877 | dir++; | 875 | dir++; |
878 | idx += ENTRIES_PER_PAGEPAGE; | 876 | idx += ENTRIES_PER_PAGEPAGE; |
879 | if (idx >= limit) | 877 | if (idx >= limit) |
880 | goto lost1; | 878 | goto lost1; |
881 | } | 879 | } |
882 | stage = idx + ENTRIES_PER_PAGEPAGE; | 880 | stage = idx + ENTRIES_PER_PAGEPAGE; |
883 | subdir = *dir; | 881 | subdir = *dir; |
884 | shmem_dir_unmap(dir); | 882 | shmem_dir_unmap(dir); |
885 | dir = shmem_dir_map(subdir); | 883 | dir = shmem_dir_map(subdir); |
886 | } | 884 | } |
887 | subdir = *dir; | 885 | subdir = *dir; |
888 | if (subdir && page_private(subdir)) { | 886 | if (subdir && page_private(subdir)) { |
889 | ptr = shmem_swp_map(subdir); | 887 | ptr = shmem_swp_map(subdir); |
890 | size = limit - idx; | 888 | size = limit - idx; |
891 | if (size > ENTRIES_PER_PAGE) | 889 | if (size > ENTRIES_PER_PAGE) |
892 | size = ENTRIES_PER_PAGE; | 890 | size = ENTRIES_PER_PAGE; |
893 | offset = shmem_find_swp(entry, ptr, ptr+size); | 891 | offset = shmem_find_swp(entry, ptr, ptr+size); |
894 | shmem_swp_unmap(ptr); | 892 | shmem_swp_unmap(ptr); |
895 | if (offset >= 0) { | 893 | if (offset >= 0) { |
896 | shmem_dir_unmap(dir); | 894 | shmem_dir_unmap(dir); |
897 | goto found; | 895 | goto found; |
898 | } | 896 | } |
899 | } | 897 | } |
900 | } | 898 | } |
901 | lost1: | 899 | lost1: |
902 | shmem_dir_unmap(dir-1); | 900 | shmem_dir_unmap(dir-1); |
903 | lost2: | 901 | lost2: |
904 | spin_unlock(&info->lock); | 902 | spin_unlock(&info->lock); |
905 | return 0; | 903 | return 0; |
906 | found: | 904 | found: |
907 | idx += offset; | 905 | idx += offset; |
908 | inode = igrab(&info->vfs_inode); | 906 | inode = igrab(&info->vfs_inode); |
909 | spin_unlock(&info->lock); | 907 | spin_unlock(&info->lock); |
910 | 908 | ||
911 | /* | 909 | /* |
912 | * Move _head_ to start search for next from here. | 910 | * Move _head_ to start search for next from here. |
913 | * But be careful: shmem_delete_inode checks list_empty without taking | 911 | * But be careful: shmem_delete_inode checks list_empty without taking |
914 | * mutex, and there's an instant in list_move_tail when info->swaplist | 912 | * mutex, and there's an instant in list_move_tail when info->swaplist |
915 | * would appear empty, if it were the only one on shmem_swaplist. We | 913 | * would appear empty, if it were the only one on shmem_swaplist. We |
916 | * could avoid doing it if inode NULL; or use this minor optimization. | 914 | * could avoid doing it if inode NULL; or use this minor optimization. |
917 | */ | 915 | */ |
918 | if (shmem_swaplist.next != &info->swaplist) | 916 | if (shmem_swaplist.next != &info->swaplist) |
919 | list_move_tail(&shmem_swaplist, &info->swaplist); | 917 | list_move_tail(&shmem_swaplist, &info->swaplist); |
920 | mutex_unlock(&shmem_swaplist_mutex); | 918 | mutex_unlock(&shmem_swaplist_mutex); |
921 | 919 | ||
922 | error = 1; | 920 | error = 1; |
923 | if (!inode) | 921 | if (!inode) |
924 | goto out; | 922 | goto out; |
925 | /* Precharge page using GFP_KERNEL while we can wait */ | 923 | /* Precharge page using GFP_KERNEL while we can wait */ |
926 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 924 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); |
927 | if (error) | 925 | if (error) |
928 | goto out; | 926 | goto out; |
929 | error = radix_tree_preload(GFP_KERNEL); | 927 | error = radix_tree_preload(GFP_KERNEL); |
930 | if (error) { | 928 | if (error) { |
931 | mem_cgroup_uncharge_cache_page(page); | 929 | mem_cgroup_uncharge_cache_page(page); |
932 | goto out; | 930 | goto out; |
933 | } | 931 | } |
934 | error = 1; | 932 | error = 1; |
935 | 933 | ||
936 | spin_lock(&info->lock); | 934 | spin_lock(&info->lock); |
937 | ptr = shmem_swp_entry(info, idx, NULL); | 935 | ptr = shmem_swp_entry(info, idx, NULL); |
938 | if (ptr && ptr->val == entry.val) { | 936 | if (ptr && ptr->val == entry.val) { |
939 | error = add_to_page_cache_locked(page, inode->i_mapping, | 937 | error = add_to_page_cache_locked(page, inode->i_mapping, |
940 | idx, GFP_NOWAIT); | 938 | idx, GFP_NOWAIT); |
941 | /* does mem_cgroup_uncharge_cache_page on error */ | 939 | /* does mem_cgroup_uncharge_cache_page on error */ |
942 | } else /* we must compensate for our precharge above */ | 940 | } else /* we must compensate for our precharge above */ |
943 | mem_cgroup_uncharge_cache_page(page); | 941 | mem_cgroup_uncharge_cache_page(page); |
944 | 942 | ||
945 | if (error == -EEXIST) { | 943 | if (error == -EEXIST) { |
946 | struct page *filepage = find_get_page(inode->i_mapping, idx); | 944 | struct page *filepage = find_get_page(inode->i_mapping, idx); |
947 | error = 1; | 945 | error = 1; |
948 | if (filepage) { | 946 | if (filepage) { |
949 | /* | 947 | /* |
950 | * There might be a more uptodate page coming down | 948 | * There might be a more uptodate page coming down |
951 | * from a stacked writepage: forget our swappage if so. | 949 | * from a stacked writepage: forget our swappage if so. |
952 | */ | 950 | */ |
953 | if (PageUptodate(filepage)) | 951 | if (PageUptodate(filepage)) |
954 | error = 0; | 952 | error = 0; |
955 | page_cache_release(filepage); | 953 | page_cache_release(filepage); |
956 | } | 954 | } |
957 | } | 955 | } |
958 | if (!error) { | 956 | if (!error) { |
959 | delete_from_swap_cache(page); | 957 | delete_from_swap_cache(page); |
960 | set_page_dirty(page); | 958 | set_page_dirty(page); |
961 | info->flags |= SHMEM_PAGEIN; | 959 | info->flags |= SHMEM_PAGEIN; |
962 | shmem_swp_set(info, ptr, 0); | 960 | shmem_swp_set(info, ptr, 0); |
963 | swap_free(entry); | 961 | swap_free(entry); |
964 | error = 1; /* not an error, but entry was found */ | 962 | error = 1; /* not an error, but entry was found */ |
965 | } | 963 | } |
966 | if (ptr) | 964 | if (ptr) |
967 | shmem_swp_unmap(ptr); | 965 | shmem_swp_unmap(ptr); |
968 | spin_unlock(&info->lock); | 966 | spin_unlock(&info->lock); |
969 | radix_tree_preload_end(); | 967 | radix_tree_preload_end(); |
970 | out: | 968 | out: |
971 | unlock_page(page); | 969 | unlock_page(page); |
972 | page_cache_release(page); | 970 | page_cache_release(page); |
973 | iput(inode); /* allows for NULL */ | 971 | iput(inode); /* allows for NULL */ |
974 | return error; | 972 | return error; |
975 | } | 973 | } |
976 | 974 | ||
977 | /* | 975 | /* |
978 | * shmem_unuse() search for an eventually swapped out shmem page. | 976 | * shmem_unuse() search for an eventually swapped out shmem page. |
979 | */ | 977 | */ |
980 | int shmem_unuse(swp_entry_t entry, struct page *page) | 978 | int shmem_unuse(swp_entry_t entry, struct page *page) |
981 | { | 979 | { |
982 | struct list_head *p, *next; | 980 | struct list_head *p, *next; |
983 | struct shmem_inode_info *info; | 981 | struct shmem_inode_info *info; |
984 | int found = 0; | 982 | int found = 0; |
985 | 983 | ||
986 | mutex_lock(&shmem_swaplist_mutex); | 984 | mutex_lock(&shmem_swaplist_mutex); |
987 | list_for_each_safe(p, next, &shmem_swaplist) { | 985 | list_for_each_safe(p, next, &shmem_swaplist) { |
988 | info = list_entry(p, struct shmem_inode_info, swaplist); | 986 | info = list_entry(p, struct shmem_inode_info, swaplist); |
989 | found = shmem_unuse_inode(info, entry, page); | 987 | found = shmem_unuse_inode(info, entry, page); |
990 | cond_resched(); | 988 | cond_resched(); |
991 | if (found) | 989 | if (found) |
992 | goto out; | 990 | goto out; |
993 | } | 991 | } |
994 | mutex_unlock(&shmem_swaplist_mutex); | 992 | mutex_unlock(&shmem_swaplist_mutex); |
995 | out: return found; /* 0 or 1 or -ENOMEM */ | 993 | out: return found; /* 0 or 1 or -ENOMEM */ |
996 | } | 994 | } |
997 | 995 | ||
998 | /* | 996 | /* |
999 | * Move the page from the page cache to the swap cache. | 997 | * Move the page from the page cache to the swap cache. |
1000 | */ | 998 | */ |
1001 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) | 999 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) |
1002 | { | 1000 | { |
1003 | struct shmem_inode_info *info; | 1001 | struct shmem_inode_info *info; |
1004 | swp_entry_t *entry, swap; | 1002 | swp_entry_t *entry, swap; |
1005 | struct address_space *mapping; | 1003 | struct address_space *mapping; |
1006 | unsigned long index; | 1004 | unsigned long index; |
1007 | struct inode *inode; | 1005 | struct inode *inode; |
1008 | 1006 | ||
1009 | BUG_ON(!PageLocked(page)); | 1007 | BUG_ON(!PageLocked(page)); |
1010 | mapping = page->mapping; | 1008 | mapping = page->mapping; |
1011 | index = page->index; | 1009 | index = page->index; |
1012 | inode = mapping->host; | 1010 | inode = mapping->host; |
1013 | info = SHMEM_I(inode); | 1011 | info = SHMEM_I(inode); |
1014 | if (info->flags & VM_LOCKED) | 1012 | if (info->flags & VM_LOCKED) |
1015 | goto redirty; | 1013 | goto redirty; |
1016 | if (!total_swap_pages) | 1014 | if (!total_swap_pages) |
1017 | goto redirty; | 1015 | goto redirty; |
1018 | 1016 | ||
1019 | /* | 1017 | /* |
1020 | * shmem_backing_dev_info's capabilities prevent regular writeback or | 1018 | * shmem_backing_dev_info's capabilities prevent regular writeback or |
1021 | * sync from ever calling shmem_writepage; but a stacking filesystem | 1019 | * sync from ever calling shmem_writepage; but a stacking filesystem |
1022 | * may use the ->writepage of its underlying filesystem, in which case | 1020 | * may use the ->writepage of its underlying filesystem, in which case |
1023 | * tmpfs should write out to swap only in response to memory pressure, | 1021 | * tmpfs should write out to swap only in response to memory pressure, |
1024 | * and not for pdflush or sync. However, in those cases, we do still | 1022 | * and not for pdflush or sync. However, in those cases, we do still |
1025 | * want to check if there's a redundant swappage to be discarded. | 1023 | * want to check if there's a redundant swappage to be discarded. |
1026 | */ | 1024 | */ |
1027 | if (wbc->for_reclaim) | 1025 | if (wbc->for_reclaim) |
1028 | swap = get_swap_page(); | 1026 | swap = get_swap_page(); |
1029 | else | 1027 | else |
1030 | swap.val = 0; | 1028 | swap.val = 0; |
1031 | 1029 | ||
1032 | spin_lock(&info->lock); | 1030 | spin_lock(&info->lock); |
1033 | if (index >= info->next_index) { | 1031 | if (index >= info->next_index) { |
1034 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | 1032 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); |
1035 | goto unlock; | 1033 | goto unlock; |
1036 | } | 1034 | } |
1037 | entry = shmem_swp_entry(info, index, NULL); | 1035 | entry = shmem_swp_entry(info, index, NULL); |
1038 | if (entry->val) { | 1036 | if (entry->val) { |
1039 | /* | 1037 | /* |
1040 | * The more uptodate page coming down from a stacked | 1038 | * The more uptodate page coming down from a stacked |
1041 | * writepage should replace our old swappage. | 1039 | * writepage should replace our old swappage. |
1042 | */ | 1040 | */ |
1043 | free_swap_and_cache(*entry); | 1041 | free_swap_and_cache(*entry); |
1044 | shmem_swp_set(info, entry, 0); | 1042 | shmem_swp_set(info, entry, 0); |
1045 | } | 1043 | } |
1046 | shmem_recalc_inode(inode); | 1044 | shmem_recalc_inode(inode); |
1047 | 1045 | ||
1048 | if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { | 1046 | if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { |
1049 | remove_from_page_cache(page); | 1047 | remove_from_page_cache(page); |
1050 | shmem_swp_set(info, entry, swap.val); | 1048 | shmem_swp_set(info, entry, swap.val); |
1051 | shmem_swp_unmap(entry); | 1049 | shmem_swp_unmap(entry); |
1052 | if (list_empty(&info->swaplist)) | 1050 | if (list_empty(&info->swaplist)) |
1053 | inode = igrab(inode); | 1051 | inode = igrab(inode); |
1054 | else | 1052 | else |
1055 | inode = NULL; | 1053 | inode = NULL; |
1056 | spin_unlock(&info->lock); | 1054 | spin_unlock(&info->lock); |
1057 | swap_duplicate(swap); | 1055 | swap_duplicate(swap); |
1058 | BUG_ON(page_mapped(page)); | 1056 | BUG_ON(page_mapped(page)); |
1059 | page_cache_release(page); /* pagecache ref */ | 1057 | page_cache_release(page); /* pagecache ref */ |
1060 | set_page_dirty(page); | 1058 | set_page_dirty(page); |
1061 | unlock_page(page); | 1059 | unlock_page(page); |
1062 | if (inode) { | 1060 | if (inode) { |
1063 | mutex_lock(&shmem_swaplist_mutex); | 1061 | mutex_lock(&shmem_swaplist_mutex); |
1064 | /* move instead of add in case we're racing */ | 1062 | /* move instead of add in case we're racing */ |
1065 | list_move_tail(&info->swaplist, &shmem_swaplist); | 1063 | list_move_tail(&info->swaplist, &shmem_swaplist); |
1066 | mutex_unlock(&shmem_swaplist_mutex); | 1064 | mutex_unlock(&shmem_swaplist_mutex); |
1067 | iput(inode); | 1065 | iput(inode); |
1068 | } | 1066 | } |
1069 | return 0; | 1067 | return 0; |
1070 | } | 1068 | } |
1071 | 1069 | ||
1072 | shmem_swp_unmap(entry); | 1070 | shmem_swp_unmap(entry); |
1073 | unlock: | 1071 | unlock: |
1074 | spin_unlock(&info->lock); | 1072 | spin_unlock(&info->lock); |
1075 | swap_free(swap); | 1073 | swap_free(swap); |
1076 | redirty: | 1074 | redirty: |
1077 | set_page_dirty(page); | 1075 | set_page_dirty(page); |
1078 | if (wbc->for_reclaim) | 1076 | if (wbc->for_reclaim) |
1079 | return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ | 1077 | return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ |
1080 | unlock_page(page); | 1078 | unlock_page(page); |
1081 | return 0; | 1079 | return 0; |
1082 | } | 1080 | } |
1083 | 1081 | ||
1084 | #ifdef CONFIG_NUMA | 1082 | #ifdef CONFIG_NUMA |
1085 | #ifdef CONFIG_TMPFS | 1083 | #ifdef CONFIG_TMPFS |
1086 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) | 1084 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
1087 | { | 1085 | { |
1088 | char buffer[64]; | 1086 | char buffer[64]; |
1089 | 1087 | ||
1090 | if (!mpol || mpol->mode == MPOL_DEFAULT) | 1088 | if (!mpol || mpol->mode == MPOL_DEFAULT) |
1091 | return; /* show nothing */ | 1089 | return; /* show nothing */ |
1092 | 1090 | ||
1093 | mpol_to_str(buffer, sizeof(buffer), mpol, 1); | 1091 | mpol_to_str(buffer, sizeof(buffer), mpol, 1); |
1094 | 1092 | ||
1095 | seq_printf(seq, ",mpol=%s", buffer); | 1093 | seq_printf(seq, ",mpol=%s", buffer); |
1096 | } | 1094 | } |
1097 | 1095 | ||
1098 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | 1096 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
1099 | { | 1097 | { |
1100 | struct mempolicy *mpol = NULL; | 1098 | struct mempolicy *mpol = NULL; |
1101 | if (sbinfo->mpol) { | 1099 | if (sbinfo->mpol) { |
1102 | spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ | 1100 | spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
1103 | mpol = sbinfo->mpol; | 1101 | mpol = sbinfo->mpol; |
1104 | mpol_get(mpol); | 1102 | mpol_get(mpol); |
1105 | spin_unlock(&sbinfo->stat_lock); | 1103 | spin_unlock(&sbinfo->stat_lock); |
1106 | } | 1104 | } |
1107 | return mpol; | 1105 | return mpol; |
1108 | } | 1106 | } |
1109 | #endif /* CONFIG_TMPFS */ | 1107 | #endif /* CONFIG_TMPFS */ |
1110 | 1108 | ||
1111 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 1109 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1112 | struct shmem_inode_info *info, unsigned long idx) | 1110 | struct shmem_inode_info *info, unsigned long idx) |
1113 | { | 1111 | { |
1114 | struct mempolicy mpol, *spol; | 1112 | struct mempolicy mpol, *spol; |
1115 | struct vm_area_struct pvma; | 1113 | struct vm_area_struct pvma; |
1116 | struct page *page; | 1114 | struct page *page; |
1117 | 1115 | ||
1118 | spol = mpol_cond_copy(&mpol, | 1116 | spol = mpol_cond_copy(&mpol, |
1119 | mpol_shared_policy_lookup(&info->policy, idx)); | 1117 | mpol_shared_policy_lookup(&info->policy, idx)); |
1120 | 1118 | ||
1121 | /* Create a pseudo vma that just contains the policy */ | 1119 | /* Create a pseudo vma that just contains the policy */ |
1122 | pvma.vm_start = 0; | 1120 | pvma.vm_start = 0; |
1123 | pvma.vm_pgoff = idx; | 1121 | pvma.vm_pgoff = idx; |
1124 | pvma.vm_ops = NULL; | 1122 | pvma.vm_ops = NULL; |
1125 | pvma.vm_policy = spol; | 1123 | pvma.vm_policy = spol; |
1126 | page = swapin_readahead(entry, gfp, &pvma, 0); | 1124 | page = swapin_readahead(entry, gfp, &pvma, 0); |
1127 | return page; | 1125 | return page; |
1128 | } | 1126 | } |
1129 | 1127 | ||
1130 | static struct page *shmem_alloc_page(gfp_t gfp, | 1128 | static struct page *shmem_alloc_page(gfp_t gfp, |
1131 | struct shmem_inode_info *info, unsigned long idx) | 1129 | struct shmem_inode_info *info, unsigned long idx) |
1132 | { | 1130 | { |
1133 | struct vm_area_struct pvma; | 1131 | struct vm_area_struct pvma; |
1134 | 1132 | ||
1135 | /* Create a pseudo vma that just contains the policy */ | 1133 | /* Create a pseudo vma that just contains the policy */ |
1136 | pvma.vm_start = 0; | 1134 | pvma.vm_start = 0; |
1137 | pvma.vm_pgoff = idx; | 1135 | pvma.vm_pgoff = idx; |
1138 | pvma.vm_ops = NULL; | 1136 | pvma.vm_ops = NULL; |
1139 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | 1137 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); |
1140 | 1138 | ||
1141 | /* | 1139 | /* |
1142 | * alloc_page_vma() will drop the shared policy reference | 1140 | * alloc_page_vma() will drop the shared policy reference |
1143 | */ | 1141 | */ |
1144 | return alloc_page_vma(gfp, &pvma, 0); | 1142 | return alloc_page_vma(gfp, &pvma, 0); |
1145 | } | 1143 | } |
1146 | #else /* !CONFIG_NUMA */ | 1144 | #else /* !CONFIG_NUMA */ |
1147 | #ifdef CONFIG_TMPFS | 1145 | #ifdef CONFIG_TMPFS |
1148 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) | 1146 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) |
1149 | { | 1147 | { |
1150 | } | 1148 | } |
1151 | #endif /* CONFIG_TMPFS */ | 1149 | #endif /* CONFIG_TMPFS */ |
1152 | 1150 | ||
1153 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 1151 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, |
1154 | struct shmem_inode_info *info, unsigned long idx) | 1152 | struct shmem_inode_info *info, unsigned long idx) |
1155 | { | 1153 | { |
1156 | return swapin_readahead(entry, gfp, NULL, 0); | 1154 | return swapin_readahead(entry, gfp, NULL, 0); |
1157 | } | 1155 | } |
1158 | 1156 | ||
1159 | static inline struct page *shmem_alloc_page(gfp_t gfp, | 1157 | static inline struct page *shmem_alloc_page(gfp_t gfp, |
1160 | struct shmem_inode_info *info, unsigned long idx) | 1158 | struct shmem_inode_info *info, unsigned long idx) |
1161 | { | 1159 | { |
1162 | return alloc_page(gfp); | 1160 | return alloc_page(gfp); |
1163 | } | 1161 | } |
1164 | #endif /* CONFIG_NUMA */ | 1162 | #endif /* CONFIG_NUMA */ |
1165 | 1163 | ||
1166 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) | 1164 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) |
1167 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | 1165 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
1168 | { | 1166 | { |
1169 | return NULL; | 1167 | return NULL; |
1170 | } | 1168 | } |
1171 | #endif | 1169 | #endif |
1172 | 1170 | ||
1173 | /* | 1171 | /* |
1174 | * shmem_getpage - either get the page from swap or allocate a new one | 1172 | * shmem_getpage - either get the page from swap or allocate a new one |
1175 | * | 1173 | * |
1176 | * If we allocate a new one we do not mark it dirty. That's up to the | 1174 | * If we allocate a new one we do not mark it dirty. That's up to the |
1177 | * vm. If we swap it in we mark it dirty since we also free the swap | 1175 | * vm. If we swap it in we mark it dirty since we also free the swap |
1178 | * entry since a page cannot live in both the swap and page cache | 1176 | * entry since a page cannot live in both the swap and page cache |
1179 | */ | 1177 | */ |
1180 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 1178 | static int shmem_getpage(struct inode *inode, unsigned long idx, |
1181 | struct page **pagep, enum sgp_type sgp, int *type) | 1179 | struct page **pagep, enum sgp_type sgp, int *type) |
1182 | { | 1180 | { |
1183 | struct address_space *mapping = inode->i_mapping; | 1181 | struct address_space *mapping = inode->i_mapping; |
1184 | struct shmem_inode_info *info = SHMEM_I(inode); | 1182 | struct shmem_inode_info *info = SHMEM_I(inode); |
1185 | struct shmem_sb_info *sbinfo; | 1183 | struct shmem_sb_info *sbinfo; |
1186 | struct page *filepage = *pagep; | 1184 | struct page *filepage = *pagep; |
1187 | struct page *swappage; | 1185 | struct page *swappage; |
1188 | swp_entry_t *entry; | 1186 | swp_entry_t *entry; |
1189 | swp_entry_t swap; | 1187 | swp_entry_t swap; |
1190 | gfp_t gfp; | 1188 | gfp_t gfp; |
1191 | int error; | 1189 | int error; |
1192 | 1190 | ||
1193 | if (idx >= SHMEM_MAX_INDEX) | 1191 | if (idx >= SHMEM_MAX_INDEX) |
1194 | return -EFBIG; | 1192 | return -EFBIG; |
1195 | 1193 | ||
1196 | if (type) | 1194 | if (type) |
1197 | *type = 0; | 1195 | *type = 0; |
1198 | 1196 | ||
1199 | /* | 1197 | /* |
1200 | * Normally, filepage is NULL on entry, and either found | 1198 | * Normally, filepage is NULL on entry, and either found |
1201 | * uptodate immediately, or allocated and zeroed, or read | 1199 | * uptodate immediately, or allocated and zeroed, or read |
1202 | * in under swappage, which is then assigned to filepage. | 1200 | * in under swappage, which is then assigned to filepage. |
1203 | * But shmem_readpage (required for splice) passes in a locked | 1201 | * But shmem_readpage (required for splice) passes in a locked |
1204 | * filepage, which may be found not uptodate by other callers | 1202 | * filepage, which may be found not uptodate by other callers |
1205 | * too, and may need to be copied from the swappage read in. | 1203 | * too, and may need to be copied from the swappage read in. |
1206 | */ | 1204 | */ |
1207 | repeat: | 1205 | repeat: |
1208 | if (!filepage) | 1206 | if (!filepage) |
1209 | filepage = find_lock_page(mapping, idx); | 1207 | filepage = find_lock_page(mapping, idx); |
1210 | if (filepage && PageUptodate(filepage)) | 1208 | if (filepage && PageUptodate(filepage)) |
1211 | goto done; | 1209 | goto done; |
1212 | error = 0; | 1210 | error = 0; |
1213 | gfp = mapping_gfp_mask(mapping); | 1211 | gfp = mapping_gfp_mask(mapping); |
1214 | if (!filepage) { | 1212 | if (!filepage) { |
1215 | /* | 1213 | /* |
1216 | * Try to preload while we can wait, to not make a habit of | 1214 | * Try to preload while we can wait, to not make a habit of |
1217 | * draining atomic reserves; but don't latch on to this cpu. | 1215 | * draining atomic reserves; but don't latch on to this cpu. |
1218 | */ | 1216 | */ |
1219 | error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 1217 | error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); |
1220 | if (error) | 1218 | if (error) |
1221 | goto failed; | 1219 | goto failed; |
1222 | radix_tree_preload_end(); | 1220 | radix_tree_preload_end(); |
1223 | } | 1221 | } |
1224 | 1222 | ||
1225 | spin_lock(&info->lock); | 1223 | spin_lock(&info->lock); |
1226 | shmem_recalc_inode(inode); | 1224 | shmem_recalc_inode(inode); |
1227 | entry = shmem_swp_alloc(info, idx, sgp); | 1225 | entry = shmem_swp_alloc(info, idx, sgp); |
1228 | if (IS_ERR(entry)) { | 1226 | if (IS_ERR(entry)) { |
1229 | spin_unlock(&info->lock); | 1227 | spin_unlock(&info->lock); |
1230 | error = PTR_ERR(entry); | 1228 | error = PTR_ERR(entry); |
1231 | goto failed; | 1229 | goto failed; |
1232 | } | 1230 | } |
1233 | swap = *entry; | 1231 | swap = *entry; |
1234 | 1232 | ||
1235 | if (swap.val) { | 1233 | if (swap.val) { |
1236 | /* Look it up and read it in.. */ | 1234 | /* Look it up and read it in.. */ |
1237 | swappage = lookup_swap_cache(swap); | 1235 | swappage = lookup_swap_cache(swap); |
1238 | if (!swappage) { | 1236 | if (!swappage) { |
1239 | shmem_swp_unmap(entry); | 1237 | shmem_swp_unmap(entry); |
1240 | /* here we actually do the io */ | 1238 | /* here we actually do the io */ |
1241 | if (type && !(*type & VM_FAULT_MAJOR)) { | 1239 | if (type && !(*type & VM_FAULT_MAJOR)) { |
1242 | __count_vm_event(PGMAJFAULT); | 1240 | __count_vm_event(PGMAJFAULT); |
1243 | *type |= VM_FAULT_MAJOR; | 1241 | *type |= VM_FAULT_MAJOR; |
1244 | } | 1242 | } |
1245 | spin_unlock(&info->lock); | 1243 | spin_unlock(&info->lock); |
1246 | swappage = shmem_swapin(swap, gfp, info, idx); | 1244 | swappage = shmem_swapin(swap, gfp, info, idx); |
1247 | if (!swappage) { | 1245 | if (!swappage) { |
1248 | spin_lock(&info->lock); | 1246 | spin_lock(&info->lock); |
1249 | entry = shmem_swp_alloc(info, idx, sgp); | 1247 | entry = shmem_swp_alloc(info, idx, sgp); |
1250 | if (IS_ERR(entry)) | 1248 | if (IS_ERR(entry)) |
1251 | error = PTR_ERR(entry); | 1249 | error = PTR_ERR(entry); |
1252 | else { | 1250 | else { |
1253 | if (entry->val == swap.val) | 1251 | if (entry->val == swap.val) |
1254 | error = -ENOMEM; | 1252 | error = -ENOMEM; |
1255 | shmem_swp_unmap(entry); | 1253 | shmem_swp_unmap(entry); |
1256 | } | 1254 | } |
1257 | spin_unlock(&info->lock); | 1255 | spin_unlock(&info->lock); |
1258 | if (error) | 1256 | if (error) |
1259 | goto failed; | 1257 | goto failed; |
1260 | goto repeat; | 1258 | goto repeat; |
1261 | } | 1259 | } |
1262 | wait_on_page_locked(swappage); | 1260 | wait_on_page_locked(swappage); |
1263 | page_cache_release(swappage); | 1261 | page_cache_release(swappage); |
1264 | goto repeat; | 1262 | goto repeat; |
1265 | } | 1263 | } |
1266 | 1264 | ||
1267 | /* We have to do this with page locked to prevent races */ | 1265 | /* We have to do this with page locked to prevent races */ |
1268 | if (!trylock_page(swappage)) { | 1266 | if (!trylock_page(swappage)) { |
1269 | shmem_swp_unmap(entry); | 1267 | shmem_swp_unmap(entry); |
1270 | spin_unlock(&info->lock); | 1268 | spin_unlock(&info->lock); |
1271 | wait_on_page_locked(swappage); | 1269 | wait_on_page_locked(swappage); |
1272 | page_cache_release(swappage); | 1270 | page_cache_release(swappage); |
1273 | goto repeat; | 1271 | goto repeat; |
1274 | } | 1272 | } |
1275 | if (PageWriteback(swappage)) { | 1273 | if (PageWriteback(swappage)) { |
1276 | shmem_swp_unmap(entry); | 1274 | shmem_swp_unmap(entry); |
1277 | spin_unlock(&info->lock); | 1275 | spin_unlock(&info->lock); |
1278 | wait_on_page_writeback(swappage); | 1276 | wait_on_page_writeback(swappage); |
1279 | unlock_page(swappage); | 1277 | unlock_page(swappage); |
1280 | page_cache_release(swappage); | 1278 | page_cache_release(swappage); |
1281 | goto repeat; | 1279 | goto repeat; |
1282 | } | 1280 | } |
1283 | if (!PageUptodate(swappage)) { | 1281 | if (!PageUptodate(swappage)) { |
1284 | shmem_swp_unmap(entry); | 1282 | shmem_swp_unmap(entry); |
1285 | spin_unlock(&info->lock); | 1283 | spin_unlock(&info->lock); |
1286 | unlock_page(swappage); | 1284 | unlock_page(swappage); |
1287 | page_cache_release(swappage); | 1285 | page_cache_release(swappage); |
1288 | error = -EIO; | 1286 | error = -EIO; |
1289 | goto failed; | 1287 | goto failed; |
1290 | } | 1288 | } |
1291 | 1289 | ||
1292 | if (filepage) { | 1290 | if (filepage) { |
1293 | shmem_swp_set(info, entry, 0); | 1291 | shmem_swp_set(info, entry, 0); |
1294 | shmem_swp_unmap(entry); | 1292 | shmem_swp_unmap(entry); |
1295 | delete_from_swap_cache(swappage); | 1293 | delete_from_swap_cache(swappage); |
1296 | spin_unlock(&info->lock); | 1294 | spin_unlock(&info->lock); |
1297 | copy_highpage(filepage, swappage); | 1295 | copy_highpage(filepage, swappage); |
1298 | unlock_page(swappage); | 1296 | unlock_page(swappage); |
1299 | page_cache_release(swappage); | 1297 | page_cache_release(swappage); |
1300 | flush_dcache_page(filepage); | 1298 | flush_dcache_page(filepage); |
1301 | SetPageUptodate(filepage); | 1299 | SetPageUptodate(filepage); |
1302 | set_page_dirty(filepage); | 1300 | set_page_dirty(filepage); |
1303 | swap_free(swap); | 1301 | swap_free(swap); |
1304 | } else if (!(error = add_to_page_cache_locked(swappage, mapping, | 1302 | } else if (!(error = add_to_page_cache_locked(swappage, mapping, |
1305 | idx, GFP_NOWAIT))) { | 1303 | idx, GFP_NOWAIT))) { |
1306 | info->flags |= SHMEM_PAGEIN; | 1304 | info->flags |= SHMEM_PAGEIN; |
1307 | shmem_swp_set(info, entry, 0); | 1305 | shmem_swp_set(info, entry, 0); |
1308 | shmem_swp_unmap(entry); | 1306 | shmem_swp_unmap(entry); |
1309 | delete_from_swap_cache(swappage); | 1307 | delete_from_swap_cache(swappage); |
1310 | spin_unlock(&info->lock); | 1308 | spin_unlock(&info->lock); |
1311 | filepage = swappage; | 1309 | filepage = swappage; |
1312 | set_page_dirty(filepage); | 1310 | set_page_dirty(filepage); |
1313 | swap_free(swap); | 1311 | swap_free(swap); |
1314 | } else { | 1312 | } else { |
1315 | shmem_swp_unmap(entry); | 1313 | shmem_swp_unmap(entry); |
1316 | spin_unlock(&info->lock); | 1314 | spin_unlock(&info->lock); |
1317 | unlock_page(swappage); | 1315 | unlock_page(swappage); |
1318 | page_cache_release(swappage); | 1316 | page_cache_release(swappage); |
1319 | if (error == -ENOMEM) { | 1317 | if (error == -ENOMEM) { |
1320 | /* allow reclaim from this memory cgroup */ | 1318 | /* allow reclaim from this memory cgroup */ |
1321 | error = mem_cgroup_shrink_usage(current->mm, | 1319 | error = mem_cgroup_shrink_usage(current->mm, |
1322 | gfp); | 1320 | gfp); |
1323 | if (error) | 1321 | if (error) |
1324 | goto failed; | 1322 | goto failed; |
1325 | } | 1323 | } |
1326 | goto repeat; | 1324 | goto repeat; |
1327 | } | 1325 | } |
1328 | } else if (sgp == SGP_READ && !filepage) { | 1326 | } else if (sgp == SGP_READ && !filepage) { |
1329 | shmem_swp_unmap(entry); | 1327 | shmem_swp_unmap(entry); |
1330 | filepage = find_get_page(mapping, idx); | 1328 | filepage = find_get_page(mapping, idx); |
1331 | if (filepage && | 1329 | if (filepage && |
1332 | (!PageUptodate(filepage) || !trylock_page(filepage))) { | 1330 | (!PageUptodate(filepage) || !trylock_page(filepage))) { |
1333 | spin_unlock(&info->lock); | 1331 | spin_unlock(&info->lock); |
1334 | wait_on_page_locked(filepage); | 1332 | wait_on_page_locked(filepage); |
1335 | page_cache_release(filepage); | 1333 | page_cache_release(filepage); |
1336 | filepage = NULL; | 1334 | filepage = NULL; |
1337 | goto repeat; | 1335 | goto repeat; |
1338 | } | 1336 | } |
1339 | spin_unlock(&info->lock); | 1337 | spin_unlock(&info->lock); |
1340 | } else { | 1338 | } else { |
1341 | shmem_swp_unmap(entry); | 1339 | shmem_swp_unmap(entry); |
1342 | sbinfo = SHMEM_SB(inode->i_sb); | 1340 | sbinfo = SHMEM_SB(inode->i_sb); |
1343 | if (sbinfo->max_blocks) { | 1341 | if (sbinfo->max_blocks) { |
1344 | spin_lock(&sbinfo->stat_lock); | 1342 | spin_lock(&sbinfo->stat_lock); |
1345 | if (sbinfo->free_blocks == 0 || | 1343 | if (sbinfo->free_blocks == 0 || |
1346 | shmem_acct_block(info->flags)) { | 1344 | shmem_acct_block(info->flags)) { |
1347 | spin_unlock(&sbinfo->stat_lock); | 1345 | spin_unlock(&sbinfo->stat_lock); |
1348 | spin_unlock(&info->lock); | 1346 | spin_unlock(&info->lock); |
1349 | error = -ENOSPC; | 1347 | error = -ENOSPC; |
1350 | goto failed; | 1348 | goto failed; |
1351 | } | 1349 | } |
1352 | sbinfo->free_blocks--; | 1350 | sbinfo->free_blocks--; |
1353 | inode->i_blocks += BLOCKS_PER_PAGE; | 1351 | inode->i_blocks += BLOCKS_PER_PAGE; |
1354 | spin_unlock(&sbinfo->stat_lock); | 1352 | spin_unlock(&sbinfo->stat_lock); |
1355 | } else if (shmem_acct_block(info->flags)) { | 1353 | } else if (shmem_acct_block(info->flags)) { |
1356 | spin_unlock(&info->lock); | 1354 | spin_unlock(&info->lock); |
1357 | error = -ENOSPC; | 1355 | error = -ENOSPC; |
1358 | goto failed; | 1356 | goto failed; |
1359 | } | 1357 | } |
1360 | 1358 | ||
1361 | if (!filepage) { | 1359 | if (!filepage) { |
1362 | int ret; | 1360 | int ret; |
1363 | 1361 | ||
1364 | spin_unlock(&info->lock); | 1362 | spin_unlock(&info->lock); |
1365 | filepage = shmem_alloc_page(gfp, info, idx); | 1363 | filepage = shmem_alloc_page(gfp, info, idx); |
1366 | if (!filepage) { | 1364 | if (!filepage) { |
1367 | shmem_unacct_blocks(info->flags, 1); | 1365 | shmem_unacct_blocks(info->flags, 1); |
1368 | shmem_free_blocks(inode, 1); | 1366 | shmem_free_blocks(inode, 1); |
1369 | error = -ENOMEM; | 1367 | error = -ENOMEM; |
1370 | goto failed; | 1368 | goto failed; |
1371 | } | 1369 | } |
1372 | 1370 | ||
1373 | /* Precharge page while we can wait, compensate after */ | 1371 | /* Precharge page while we can wait, compensate after */ |
1374 | error = mem_cgroup_cache_charge(filepage, current->mm, | 1372 | error = mem_cgroup_cache_charge(filepage, current->mm, |
1375 | gfp & ~__GFP_HIGHMEM); | 1373 | gfp & ~__GFP_HIGHMEM); |
1376 | if (error) { | 1374 | if (error) { |
1377 | page_cache_release(filepage); | 1375 | page_cache_release(filepage); |
1378 | shmem_unacct_blocks(info->flags, 1); | 1376 | shmem_unacct_blocks(info->flags, 1); |
1379 | shmem_free_blocks(inode, 1); | 1377 | shmem_free_blocks(inode, 1); |
1380 | filepage = NULL; | 1378 | filepage = NULL; |
1381 | goto failed; | 1379 | goto failed; |
1382 | } | 1380 | } |
1383 | 1381 | ||
1384 | spin_lock(&info->lock); | 1382 | spin_lock(&info->lock); |
1385 | entry = shmem_swp_alloc(info, idx, sgp); | 1383 | entry = shmem_swp_alloc(info, idx, sgp); |
1386 | if (IS_ERR(entry)) | 1384 | if (IS_ERR(entry)) |
1387 | error = PTR_ERR(entry); | 1385 | error = PTR_ERR(entry); |
1388 | else { | 1386 | else { |
1389 | swap = *entry; | 1387 | swap = *entry; |
1390 | shmem_swp_unmap(entry); | 1388 | shmem_swp_unmap(entry); |
1391 | } | 1389 | } |
1392 | ret = error || swap.val; | 1390 | ret = error || swap.val; |
1393 | if (ret) | 1391 | if (ret) |
1394 | mem_cgroup_uncharge_cache_page(filepage); | 1392 | mem_cgroup_uncharge_cache_page(filepage); |
1395 | else | 1393 | else |
1396 | ret = add_to_page_cache_lru(filepage, mapping, | 1394 | ret = add_to_page_cache_lru(filepage, mapping, |
1397 | idx, GFP_NOWAIT); | 1395 | idx, GFP_NOWAIT); |
1398 | /* | 1396 | /* |
1399 | * At add_to_page_cache_lru() failure, uncharge will | 1397 | * At add_to_page_cache_lru() failure, uncharge will |
1400 | * be done automatically. | 1398 | * be done automatically. |
1401 | */ | 1399 | */ |
1402 | if (ret) { | 1400 | if (ret) { |
1403 | spin_unlock(&info->lock); | 1401 | spin_unlock(&info->lock); |
1404 | page_cache_release(filepage); | 1402 | page_cache_release(filepage); |
1405 | shmem_unacct_blocks(info->flags, 1); | 1403 | shmem_unacct_blocks(info->flags, 1); |
1406 | shmem_free_blocks(inode, 1); | 1404 | shmem_free_blocks(inode, 1); |
1407 | filepage = NULL; | 1405 | filepage = NULL; |
1408 | if (error) | 1406 | if (error) |
1409 | goto failed; | 1407 | goto failed; |
1410 | goto repeat; | 1408 | goto repeat; |
1411 | } | 1409 | } |
1412 | info->flags |= SHMEM_PAGEIN; | 1410 | info->flags |= SHMEM_PAGEIN; |
1413 | } | 1411 | } |
1414 | 1412 | ||
1415 | info->alloced++; | 1413 | info->alloced++; |
1416 | spin_unlock(&info->lock); | 1414 | spin_unlock(&info->lock); |
1417 | clear_highpage(filepage); | 1415 | clear_highpage(filepage); |
1418 | flush_dcache_page(filepage); | 1416 | flush_dcache_page(filepage); |
1419 | SetPageUptodate(filepage); | 1417 | SetPageUptodate(filepage); |
1420 | if (sgp == SGP_DIRTY) | 1418 | if (sgp == SGP_DIRTY) |
1421 | set_page_dirty(filepage); | 1419 | set_page_dirty(filepage); |
1422 | } | 1420 | } |
1423 | done: | 1421 | done: |
1424 | *pagep = filepage; | 1422 | *pagep = filepage; |
1425 | return 0; | 1423 | return 0; |
1426 | 1424 | ||
1427 | failed: | 1425 | failed: |
1428 | if (*pagep != filepage) { | 1426 | if (*pagep != filepage) { |
1429 | unlock_page(filepage); | 1427 | unlock_page(filepage); |
1430 | page_cache_release(filepage); | 1428 | page_cache_release(filepage); |
1431 | } | 1429 | } |
1432 | return error; | 1430 | return error; |
1433 | } | 1431 | } |
1434 | 1432 | ||
1435 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 1433 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
1436 | { | 1434 | { |
1437 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 1435 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
1438 | int error; | 1436 | int error; |
1439 | int ret; | 1437 | int ret; |
1440 | 1438 | ||
1441 | if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 1439 | if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
1442 | return VM_FAULT_SIGBUS; | 1440 | return VM_FAULT_SIGBUS; |
1443 | 1441 | ||
1444 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); | 1442 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); |
1445 | if (error) | 1443 | if (error) |
1446 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); | 1444 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); |
1447 | 1445 | ||
1448 | mark_page_accessed(vmf->page); | 1446 | mark_page_accessed(vmf->page); |
1449 | return ret | VM_FAULT_LOCKED; | 1447 | return ret | VM_FAULT_LOCKED; |
1450 | } | 1448 | } |
1451 | 1449 | ||
1452 | #ifdef CONFIG_NUMA | 1450 | #ifdef CONFIG_NUMA |
1453 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | 1451 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
1454 | { | 1452 | { |
1455 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 1453 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; |
1456 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); | 1454 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); |
1457 | } | 1455 | } |
1458 | 1456 | ||
1459 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | 1457 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, |
1460 | unsigned long addr) | 1458 | unsigned long addr) |
1461 | { | 1459 | { |
1462 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 1460 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; |
1463 | unsigned long idx; | 1461 | unsigned long idx; |
1464 | 1462 | ||
1465 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 1463 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
1466 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); | 1464 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); |
1467 | } | 1465 | } |
1468 | #endif | 1466 | #endif |
1469 | 1467 | ||
1470 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | 1468 | int shmem_lock(struct file *file, int lock, struct user_struct *user) |
1471 | { | 1469 | { |
1472 | struct inode *inode = file->f_path.dentry->d_inode; | 1470 | struct inode *inode = file->f_path.dentry->d_inode; |
1473 | struct shmem_inode_info *info = SHMEM_I(inode); | 1471 | struct shmem_inode_info *info = SHMEM_I(inode); |
1474 | int retval = -ENOMEM; | 1472 | int retval = -ENOMEM; |
1475 | 1473 | ||
1476 | spin_lock(&info->lock); | 1474 | spin_lock(&info->lock); |
1477 | if (lock && !(info->flags & VM_LOCKED)) { | 1475 | if (lock && !(info->flags & VM_LOCKED)) { |
1478 | if (!user_shm_lock(inode->i_size, user)) | 1476 | if (!user_shm_lock(inode->i_size, user)) |
1479 | goto out_nomem; | 1477 | goto out_nomem; |
1480 | info->flags |= VM_LOCKED; | 1478 | info->flags |= VM_LOCKED; |
1481 | } | 1479 | } |
1482 | if (!lock && (info->flags & VM_LOCKED) && user) { | 1480 | if (!lock && (info->flags & VM_LOCKED) && user) { |
1483 | user_shm_unlock(inode->i_size, user); | 1481 | user_shm_unlock(inode->i_size, user); |
1484 | info->flags &= ~VM_LOCKED; | 1482 | info->flags &= ~VM_LOCKED; |
1485 | } | 1483 | } |
1486 | retval = 0; | 1484 | retval = 0; |
1487 | out_nomem: | 1485 | out_nomem: |
1488 | spin_unlock(&info->lock); | 1486 | spin_unlock(&info->lock); |
1489 | return retval; | 1487 | return retval; |
1490 | } | 1488 | } |
1491 | 1489 | ||
1492 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) | 1490 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) |
1493 | { | 1491 | { |
1494 | file_accessed(file); | 1492 | file_accessed(file); |
1495 | vma->vm_ops = &shmem_vm_ops; | 1493 | vma->vm_ops = &shmem_vm_ops; |
1496 | vma->vm_flags |= VM_CAN_NONLINEAR; | 1494 | vma->vm_flags |= VM_CAN_NONLINEAR; |
1497 | return 0; | 1495 | return 0; |
1498 | } | 1496 | } |
1499 | 1497 | ||
1500 | static struct inode * | 1498 | static struct inode * |
1501 | shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | 1499 | shmem_get_inode(struct super_block *sb, int mode, dev_t dev) |
1502 | { | 1500 | { |
1503 | struct inode *inode; | 1501 | struct inode *inode; |
1504 | struct shmem_inode_info *info; | 1502 | struct shmem_inode_info *info; |
1505 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 1503 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
1506 | 1504 | ||
1507 | if (shmem_reserve_inode(sb)) | 1505 | if (shmem_reserve_inode(sb)) |
1508 | return NULL; | 1506 | return NULL; |
1509 | 1507 | ||
1510 | inode = new_inode(sb); | 1508 | inode = new_inode(sb); |
1511 | if (inode) { | 1509 | if (inode) { |
1512 | inode->i_mode = mode; | 1510 | inode->i_mode = mode; |
1513 | inode->i_uid = current->fsuid; | 1511 | inode->i_uid = current->fsuid; |
1514 | inode->i_gid = current->fsgid; | 1512 | inode->i_gid = current->fsgid; |
1515 | inode->i_blocks = 0; | 1513 | inode->i_blocks = 0; |
1516 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | 1514 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; |
1517 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 1515 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
1518 | inode->i_generation = get_seconds(); | 1516 | inode->i_generation = get_seconds(); |
1519 | info = SHMEM_I(inode); | 1517 | info = SHMEM_I(inode); |
1520 | memset(info, 0, (char *)inode - (char *)info); | 1518 | memset(info, 0, (char *)inode - (char *)info); |
1521 | spin_lock_init(&info->lock); | 1519 | spin_lock_init(&info->lock); |
1522 | INIT_LIST_HEAD(&info->swaplist); | 1520 | INIT_LIST_HEAD(&info->swaplist); |
1523 | 1521 | ||
1524 | switch (mode & S_IFMT) { | 1522 | switch (mode & S_IFMT) { |
1525 | default: | 1523 | default: |
1526 | inode->i_op = &shmem_special_inode_operations; | 1524 | inode->i_op = &shmem_special_inode_operations; |
1527 | init_special_inode(inode, mode, dev); | 1525 | init_special_inode(inode, mode, dev); |
1528 | break; | 1526 | break; |
1529 | case S_IFREG: | 1527 | case S_IFREG: |
1530 | inode->i_mapping->a_ops = &shmem_aops; | 1528 | inode->i_mapping->a_ops = &shmem_aops; |
1531 | inode->i_op = &shmem_inode_operations; | 1529 | inode->i_op = &shmem_inode_operations; |
1532 | inode->i_fop = &shmem_file_operations; | 1530 | inode->i_fop = &shmem_file_operations; |
1533 | mpol_shared_policy_init(&info->policy, | 1531 | mpol_shared_policy_init(&info->policy, |
1534 | shmem_get_sbmpol(sbinfo)); | 1532 | shmem_get_sbmpol(sbinfo)); |
1535 | break; | 1533 | break; |
1536 | case S_IFDIR: | 1534 | case S_IFDIR: |
1537 | inc_nlink(inode); | 1535 | inc_nlink(inode); |
1538 | /* Some things misbehave if size == 0 on a directory */ | 1536 | /* Some things misbehave if size == 0 on a directory */ |
1539 | inode->i_size = 2 * BOGO_DIRENT_SIZE; | 1537 | inode->i_size = 2 * BOGO_DIRENT_SIZE; |
1540 | inode->i_op = &shmem_dir_inode_operations; | 1538 | inode->i_op = &shmem_dir_inode_operations; |
1541 | inode->i_fop = &simple_dir_operations; | 1539 | inode->i_fop = &simple_dir_operations; |
1542 | break; | 1540 | break; |
1543 | case S_IFLNK: | 1541 | case S_IFLNK: |
1544 | /* | 1542 | /* |
1545 | * Must not load anything in the rbtree, | 1543 | * Must not load anything in the rbtree, |
1546 | * mpol_free_shared_policy will not be called. | 1544 | * mpol_free_shared_policy will not be called. |
1547 | */ | 1545 | */ |
1548 | mpol_shared_policy_init(&info->policy, NULL); | 1546 | mpol_shared_policy_init(&info->policy, NULL); |
1549 | break; | 1547 | break; |
1550 | } | 1548 | } |
1551 | } else | 1549 | } else |
1552 | shmem_free_inode(sb); | 1550 | shmem_free_inode(sb); |
1553 | return inode; | 1551 | return inode; |
1554 | } | 1552 | } |
1555 | 1553 | ||
1556 | #ifdef CONFIG_TMPFS | 1554 | #ifdef CONFIG_TMPFS |
1557 | static const struct inode_operations shmem_symlink_inode_operations; | 1555 | static const struct inode_operations shmem_symlink_inode_operations; |
1558 | static const struct inode_operations shmem_symlink_inline_operations; | 1556 | static const struct inode_operations shmem_symlink_inline_operations; |
1559 | 1557 | ||
1560 | /* | 1558 | /* |
1561 | * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; | 1559 | * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; |
1562 | * but providing them allows a tmpfs file to be used for splice, sendfile, and | 1560 | * but providing them allows a tmpfs file to be used for splice, sendfile, and |
1563 | * below the loop driver, in the generic fashion that many filesystems support. | 1561 | * below the loop driver, in the generic fashion that many filesystems support. |
1564 | */ | 1562 | */ |
1565 | static int shmem_readpage(struct file *file, struct page *page) | 1563 | static int shmem_readpage(struct file *file, struct page *page) |
1566 | { | 1564 | { |
1567 | struct inode *inode = page->mapping->host; | 1565 | struct inode *inode = page->mapping->host; |
1568 | int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); | 1566 | int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); |
1569 | unlock_page(page); | 1567 | unlock_page(page); |
1570 | return error; | 1568 | return error; |
1571 | } | 1569 | } |
1572 | 1570 | ||
1573 | static int | 1571 | static int |
1574 | shmem_write_begin(struct file *file, struct address_space *mapping, | 1572 | shmem_write_begin(struct file *file, struct address_space *mapping, |
1575 | loff_t pos, unsigned len, unsigned flags, | 1573 | loff_t pos, unsigned len, unsigned flags, |
1576 | struct page **pagep, void **fsdata) | 1574 | struct page **pagep, void **fsdata) |
1577 | { | 1575 | { |
1578 | struct inode *inode = mapping->host; | 1576 | struct inode *inode = mapping->host; |
1579 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 1577 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
1580 | *pagep = NULL; | 1578 | *pagep = NULL; |
1581 | return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); | 1579 | return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); |
1582 | } | 1580 | } |
1583 | 1581 | ||
1584 | static int | 1582 | static int |
1585 | shmem_write_end(struct file *file, struct address_space *mapping, | 1583 | shmem_write_end(struct file *file, struct address_space *mapping, |
1586 | loff_t pos, unsigned len, unsigned copied, | 1584 | loff_t pos, unsigned len, unsigned copied, |
1587 | struct page *page, void *fsdata) | 1585 | struct page *page, void *fsdata) |
1588 | { | 1586 | { |
1589 | struct inode *inode = mapping->host; | 1587 | struct inode *inode = mapping->host; |
1590 | 1588 | ||
1591 | if (pos + copied > inode->i_size) | 1589 | if (pos + copied > inode->i_size) |
1592 | i_size_write(inode, pos + copied); | 1590 | i_size_write(inode, pos + copied); |
1593 | 1591 | ||
1594 | unlock_page(page); | 1592 | unlock_page(page); |
1595 | set_page_dirty(page); | 1593 | set_page_dirty(page); |
1596 | page_cache_release(page); | 1594 | page_cache_release(page); |
1597 | 1595 | ||
1598 | return copied; | 1596 | return copied; |
1599 | } | 1597 | } |
1600 | 1598 | ||
1601 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) | 1599 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) |
1602 | { | 1600 | { |
1603 | struct inode *inode = filp->f_path.dentry->d_inode; | 1601 | struct inode *inode = filp->f_path.dentry->d_inode; |
1604 | struct address_space *mapping = inode->i_mapping; | 1602 | struct address_space *mapping = inode->i_mapping; |
1605 | unsigned long index, offset; | 1603 | unsigned long index, offset; |
1606 | enum sgp_type sgp = SGP_READ; | 1604 | enum sgp_type sgp = SGP_READ; |
1607 | 1605 | ||
1608 | /* | 1606 | /* |
1609 | * Might this read be for a stacking filesystem? Then when reading | 1607 | * Might this read be for a stacking filesystem? Then when reading |
1610 | * holes of a sparse file, we actually need to allocate those pages, | 1608 | * holes of a sparse file, we actually need to allocate those pages, |
1611 | * and even mark them dirty, so it cannot exceed the max_blocks limit. | 1609 | * and even mark them dirty, so it cannot exceed the max_blocks limit. |
1612 | */ | 1610 | */ |
1613 | if (segment_eq(get_fs(), KERNEL_DS)) | 1611 | if (segment_eq(get_fs(), KERNEL_DS)) |
1614 | sgp = SGP_DIRTY; | 1612 | sgp = SGP_DIRTY; |
1615 | 1613 | ||
1616 | index = *ppos >> PAGE_CACHE_SHIFT; | 1614 | index = *ppos >> PAGE_CACHE_SHIFT; |
1617 | offset = *ppos & ~PAGE_CACHE_MASK; | 1615 | offset = *ppos & ~PAGE_CACHE_MASK; |
1618 | 1616 | ||
1619 | for (;;) { | 1617 | for (;;) { |
1620 | struct page *page = NULL; | 1618 | struct page *page = NULL; |
1621 | unsigned long end_index, nr, ret; | 1619 | unsigned long end_index, nr, ret; |
1622 | loff_t i_size = i_size_read(inode); | 1620 | loff_t i_size = i_size_read(inode); |
1623 | 1621 | ||
1624 | end_index = i_size >> PAGE_CACHE_SHIFT; | 1622 | end_index = i_size >> PAGE_CACHE_SHIFT; |
1625 | if (index > end_index) | 1623 | if (index > end_index) |
1626 | break; | 1624 | break; |
1627 | if (index == end_index) { | 1625 | if (index == end_index) { |
1628 | nr = i_size & ~PAGE_CACHE_MASK; | 1626 | nr = i_size & ~PAGE_CACHE_MASK; |
1629 | if (nr <= offset) | 1627 | if (nr <= offset) |
1630 | break; | 1628 | break; |
1631 | } | 1629 | } |
1632 | 1630 | ||
1633 | desc->error = shmem_getpage(inode, index, &page, sgp, NULL); | 1631 | desc->error = shmem_getpage(inode, index, &page, sgp, NULL); |
1634 | if (desc->error) { | 1632 | if (desc->error) { |
1635 | if (desc->error == -EINVAL) | 1633 | if (desc->error == -EINVAL) |
1636 | desc->error = 0; | 1634 | desc->error = 0; |
1637 | break; | 1635 | break; |
1638 | } | 1636 | } |
1639 | if (page) | 1637 | if (page) |
1640 | unlock_page(page); | 1638 | unlock_page(page); |
1641 | 1639 | ||
1642 | /* | 1640 | /* |
1643 | * We must evaluate after, since reads (unlike writes) | 1641 | * We must evaluate after, since reads (unlike writes) |
1644 | * are called without i_mutex protection against truncate | 1642 | * are called without i_mutex protection against truncate |
1645 | */ | 1643 | */ |
1646 | nr = PAGE_CACHE_SIZE; | 1644 | nr = PAGE_CACHE_SIZE; |
1647 | i_size = i_size_read(inode); | 1645 | i_size = i_size_read(inode); |
1648 | end_index = i_size >> PAGE_CACHE_SHIFT; | 1646 | end_index = i_size >> PAGE_CACHE_SHIFT; |
1649 | if (index == end_index) { | 1647 | if (index == end_index) { |
1650 | nr = i_size & ~PAGE_CACHE_MASK; | 1648 | nr = i_size & ~PAGE_CACHE_MASK; |
1651 | if (nr <= offset) { | 1649 | if (nr <= offset) { |
1652 | if (page) | 1650 | if (page) |
1653 | page_cache_release(page); | 1651 | page_cache_release(page); |
1654 | break; | 1652 | break; |
1655 | } | 1653 | } |
1656 | } | 1654 | } |
1657 | nr -= offset; | 1655 | nr -= offset; |
1658 | 1656 | ||
1659 | if (page) { | 1657 | if (page) { |
1660 | /* | 1658 | /* |
1661 | * If users can be writing to this page using arbitrary | 1659 | * If users can be writing to this page using arbitrary |
1662 | * virtual addresses, take care about potential aliasing | 1660 | * virtual addresses, take care about potential aliasing |
1663 | * before reading the page on the kernel side. | 1661 | * before reading the page on the kernel side. |
1664 | */ | 1662 | */ |
1665 | if (mapping_writably_mapped(mapping)) | 1663 | if (mapping_writably_mapped(mapping)) |
1666 | flush_dcache_page(page); | 1664 | flush_dcache_page(page); |
1667 | /* | 1665 | /* |
1668 | * Mark the page accessed if we read the beginning. | 1666 | * Mark the page accessed if we read the beginning. |
1669 | */ | 1667 | */ |
1670 | if (!offset) | 1668 | if (!offset) |
1671 | mark_page_accessed(page); | 1669 | mark_page_accessed(page); |
1672 | } else { | 1670 | } else { |
1673 | page = ZERO_PAGE(0); | 1671 | page = ZERO_PAGE(0); |
1674 | page_cache_get(page); | 1672 | page_cache_get(page); |
1675 | } | 1673 | } |
1676 | 1674 | ||
1677 | /* | 1675 | /* |
1678 | * Ok, we have the page, and it's up-to-date, so | 1676 | * Ok, we have the page, and it's up-to-date, so |
1679 | * now we can copy it to user space... | 1677 | * now we can copy it to user space... |
1680 | * | 1678 | * |
1681 | * The actor routine returns how many bytes were actually used.. | 1679 | * The actor routine returns how many bytes were actually used.. |
1682 | * NOTE! This may not be the same as how much of a user buffer | 1680 | * NOTE! This may not be the same as how much of a user buffer |
1683 | * we filled up (we may be padding etc), so we can only update | 1681 | * we filled up (we may be padding etc), so we can only update |
1684 | * "pos" here (the actor routine has to update the user buffer | 1682 | * "pos" here (the actor routine has to update the user buffer |
1685 | * pointers and the remaining count). | 1683 | * pointers and the remaining count). |
1686 | */ | 1684 | */ |
1687 | ret = actor(desc, page, offset, nr); | 1685 | ret = actor(desc, page, offset, nr); |
1688 | offset += ret; | 1686 | offset += ret; |
1689 | index += offset >> PAGE_CACHE_SHIFT; | 1687 | index += offset >> PAGE_CACHE_SHIFT; |
1690 | offset &= ~PAGE_CACHE_MASK; | 1688 | offset &= ~PAGE_CACHE_MASK; |
1691 | 1689 | ||
1692 | page_cache_release(page); | 1690 | page_cache_release(page); |
1693 | if (ret != nr || !desc->count) | 1691 | if (ret != nr || !desc->count) |
1694 | break; | 1692 | break; |
1695 | 1693 | ||
1696 | cond_resched(); | 1694 | cond_resched(); |
1697 | } | 1695 | } |
1698 | 1696 | ||
1699 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | 1697 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; |
1700 | file_accessed(filp); | 1698 | file_accessed(filp); |
1701 | } | 1699 | } |
1702 | 1700 | ||
1703 | static ssize_t shmem_file_aio_read(struct kiocb *iocb, | 1701 | static ssize_t shmem_file_aio_read(struct kiocb *iocb, |
1704 | const struct iovec *iov, unsigned long nr_segs, loff_t pos) | 1702 | const struct iovec *iov, unsigned long nr_segs, loff_t pos) |
1705 | { | 1703 | { |
1706 | struct file *filp = iocb->ki_filp; | 1704 | struct file *filp = iocb->ki_filp; |
1707 | ssize_t retval; | 1705 | ssize_t retval; |
1708 | unsigned long seg; | 1706 | unsigned long seg; |
1709 | size_t count; | 1707 | size_t count; |
1710 | loff_t *ppos = &iocb->ki_pos; | 1708 | loff_t *ppos = &iocb->ki_pos; |
1711 | 1709 | ||
1712 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 1710 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); |
1713 | if (retval) | 1711 | if (retval) |
1714 | return retval; | 1712 | return retval; |
1715 | 1713 | ||
1716 | for (seg = 0; seg < nr_segs; seg++) { | 1714 | for (seg = 0; seg < nr_segs; seg++) { |
1717 | read_descriptor_t desc; | 1715 | read_descriptor_t desc; |
1718 | 1716 | ||
1719 | desc.written = 0; | 1717 | desc.written = 0; |
1720 | desc.arg.buf = iov[seg].iov_base; | 1718 | desc.arg.buf = iov[seg].iov_base; |
1721 | desc.count = iov[seg].iov_len; | 1719 | desc.count = iov[seg].iov_len; |
1722 | if (desc.count == 0) | 1720 | if (desc.count == 0) |
1723 | continue; | 1721 | continue; |
1724 | desc.error = 0; | 1722 | desc.error = 0; |
1725 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); | 1723 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); |
1726 | retval += desc.written; | 1724 | retval += desc.written; |
1727 | if (desc.error) { | 1725 | if (desc.error) { |
1728 | retval = retval ?: desc.error; | 1726 | retval = retval ?: desc.error; |
1729 | break; | 1727 | break; |
1730 | } | 1728 | } |
1731 | if (desc.count > 0) | 1729 | if (desc.count > 0) |
1732 | break; | 1730 | break; |
1733 | } | 1731 | } |
1734 | return retval; | 1732 | return retval; |
1735 | } | 1733 | } |
1736 | 1734 | ||
1737 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | 1735 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) |
1738 | { | 1736 | { |
1739 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); | 1737 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
1740 | 1738 | ||
1741 | buf->f_type = TMPFS_MAGIC; | 1739 | buf->f_type = TMPFS_MAGIC; |
1742 | buf->f_bsize = PAGE_CACHE_SIZE; | 1740 | buf->f_bsize = PAGE_CACHE_SIZE; |
1743 | buf->f_namelen = NAME_MAX; | 1741 | buf->f_namelen = NAME_MAX; |
1744 | spin_lock(&sbinfo->stat_lock); | 1742 | spin_lock(&sbinfo->stat_lock); |
1745 | if (sbinfo->max_blocks) { | 1743 | if (sbinfo->max_blocks) { |
1746 | buf->f_blocks = sbinfo->max_blocks; | 1744 | buf->f_blocks = sbinfo->max_blocks; |
1747 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; | 1745 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; |
1748 | } | 1746 | } |
1749 | if (sbinfo->max_inodes) { | 1747 | if (sbinfo->max_inodes) { |
1750 | buf->f_files = sbinfo->max_inodes; | 1748 | buf->f_files = sbinfo->max_inodes; |
1751 | buf->f_ffree = sbinfo->free_inodes; | 1749 | buf->f_ffree = sbinfo->free_inodes; |
1752 | } | 1750 | } |
1753 | /* else leave those fields 0 like simple_statfs */ | 1751 | /* else leave those fields 0 like simple_statfs */ |
1754 | spin_unlock(&sbinfo->stat_lock); | 1752 | spin_unlock(&sbinfo->stat_lock); |
1755 | return 0; | 1753 | return 0; |
1756 | } | 1754 | } |
1757 | 1755 | ||
1758 | /* | 1756 | /* |
1759 | * File creation. Allocate an inode, and we're done.. | 1757 | * File creation. Allocate an inode, and we're done.. |
1760 | */ | 1758 | */ |
1761 | static int | 1759 | static int |
1762 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | 1760 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) |
1763 | { | 1761 | { |
1764 | struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); | 1762 | struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); |
1765 | int error = -ENOSPC; | 1763 | int error = -ENOSPC; |
1766 | 1764 | ||
1767 | if (inode) { | 1765 | if (inode) { |
1768 | error = security_inode_init_security(inode, dir, NULL, NULL, | 1766 | error = security_inode_init_security(inode, dir, NULL, NULL, |
1769 | NULL); | 1767 | NULL); |
1770 | if (error) { | 1768 | if (error) { |
1771 | if (error != -EOPNOTSUPP) { | 1769 | if (error != -EOPNOTSUPP) { |
1772 | iput(inode); | 1770 | iput(inode); |
1773 | return error; | 1771 | return error; |
1774 | } | 1772 | } |
1775 | } | 1773 | } |
1776 | error = shmem_acl_init(inode, dir); | 1774 | error = shmem_acl_init(inode, dir); |
1777 | if (error) { | 1775 | if (error) { |
1778 | iput(inode); | 1776 | iput(inode); |
1779 | return error; | 1777 | return error; |
1780 | } | 1778 | } |
1781 | if (dir->i_mode & S_ISGID) { | 1779 | if (dir->i_mode & S_ISGID) { |
1782 | inode->i_gid = dir->i_gid; | 1780 | inode->i_gid = dir->i_gid; |
1783 | if (S_ISDIR(mode)) | 1781 | if (S_ISDIR(mode)) |
1784 | inode->i_mode |= S_ISGID; | 1782 | inode->i_mode |= S_ISGID; |
1785 | } | 1783 | } |
1786 | dir->i_size += BOGO_DIRENT_SIZE; | 1784 | dir->i_size += BOGO_DIRENT_SIZE; |
1787 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 1785 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
1788 | d_instantiate(dentry, inode); | 1786 | d_instantiate(dentry, inode); |
1789 | dget(dentry); /* Extra count - pin the dentry in core */ | 1787 | dget(dentry); /* Extra count - pin the dentry in core */ |
1790 | } | 1788 | } |
1791 | return error; | 1789 | return error; |
1792 | } | 1790 | } |
1793 | 1791 | ||
1794 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 1792 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
1795 | { | 1793 | { |
1796 | int error; | 1794 | int error; |
1797 | 1795 | ||
1798 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) | 1796 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) |
1799 | return error; | 1797 | return error; |
1800 | inc_nlink(dir); | 1798 | inc_nlink(dir); |
1801 | return 0; | 1799 | return 0; |
1802 | } | 1800 | } |
1803 | 1801 | ||
1804 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, | 1802 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, |
1805 | struct nameidata *nd) | 1803 | struct nameidata *nd) |
1806 | { | 1804 | { |
1807 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); | 1805 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); |
1808 | } | 1806 | } |
1809 | 1807 | ||
1810 | /* | 1808 | /* |
1811 | * Link a file.. | 1809 | * Link a file.. |
1812 | */ | 1810 | */ |
1813 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | 1811 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
1814 | { | 1812 | { |
1815 | struct inode *inode = old_dentry->d_inode; | 1813 | struct inode *inode = old_dentry->d_inode; |
1816 | int ret; | 1814 | int ret; |
1817 | 1815 | ||
1818 | /* | 1816 | /* |
1819 | * No ordinary (disk based) filesystem counts links as inodes; | 1817 | * No ordinary (disk based) filesystem counts links as inodes; |
1820 | * but each new link needs a new dentry, pinning lowmem, and | 1818 | * but each new link needs a new dentry, pinning lowmem, and |
1821 | * tmpfs dentries cannot be pruned until they are unlinked. | 1819 | * tmpfs dentries cannot be pruned until they are unlinked. |
1822 | */ | 1820 | */ |
1823 | ret = shmem_reserve_inode(inode->i_sb); | 1821 | ret = shmem_reserve_inode(inode->i_sb); |
1824 | if (ret) | 1822 | if (ret) |
1825 | goto out; | 1823 | goto out; |
1826 | 1824 | ||
1827 | dir->i_size += BOGO_DIRENT_SIZE; | 1825 | dir->i_size += BOGO_DIRENT_SIZE; |
1828 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 1826 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
1829 | inc_nlink(inode); | 1827 | inc_nlink(inode); |
1830 | atomic_inc(&inode->i_count); /* New dentry reference */ | 1828 | atomic_inc(&inode->i_count); /* New dentry reference */ |
1831 | dget(dentry); /* Extra pinning count for the created dentry */ | 1829 | dget(dentry); /* Extra pinning count for the created dentry */ |
1832 | d_instantiate(dentry, inode); | 1830 | d_instantiate(dentry, inode); |
1833 | out: | 1831 | out: |
1834 | return ret; | 1832 | return ret; |
1835 | } | 1833 | } |
1836 | 1834 | ||
1837 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) | 1835 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) |
1838 | { | 1836 | { |
1839 | struct inode *inode = dentry->d_inode; | 1837 | struct inode *inode = dentry->d_inode; |
1840 | 1838 | ||
1841 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) | 1839 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) |
1842 | shmem_free_inode(inode->i_sb); | 1840 | shmem_free_inode(inode->i_sb); |
1843 | 1841 | ||
1844 | dir->i_size -= BOGO_DIRENT_SIZE; | 1842 | dir->i_size -= BOGO_DIRENT_SIZE; |
1845 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 1843 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
1846 | drop_nlink(inode); | 1844 | drop_nlink(inode); |
1847 | dput(dentry); /* Undo the count from "create" - this does all the work */ | 1845 | dput(dentry); /* Undo the count from "create" - this does all the work */ |
1848 | return 0; | 1846 | return 0; |
1849 | } | 1847 | } |
1850 | 1848 | ||
1851 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) | 1849 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) |
1852 | { | 1850 | { |
1853 | if (!simple_empty(dentry)) | 1851 | if (!simple_empty(dentry)) |
1854 | return -ENOTEMPTY; | 1852 | return -ENOTEMPTY; |
1855 | 1853 | ||
1856 | drop_nlink(dentry->d_inode); | 1854 | drop_nlink(dentry->d_inode); |
1857 | drop_nlink(dir); | 1855 | drop_nlink(dir); |
1858 | return shmem_unlink(dir, dentry); | 1856 | return shmem_unlink(dir, dentry); |
1859 | } | 1857 | } |
1860 | 1858 | ||
1861 | /* | 1859 | /* |
1862 | * The VFS layer already does all the dentry stuff for rename, | 1860 | * The VFS layer already does all the dentry stuff for rename, |
1863 | * we just have to decrement the usage count for the target if | 1861 | * we just have to decrement the usage count for the target if |
1864 | * it exists so that the VFS layer correctly free's it when it | 1862 | * it exists so that the VFS layer correctly free's it when it |
1865 | * gets overwritten. | 1863 | * gets overwritten. |
1866 | */ | 1864 | */ |
1867 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) | 1865 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) |
1868 | { | 1866 | { |
1869 | struct inode *inode = old_dentry->d_inode; | 1867 | struct inode *inode = old_dentry->d_inode; |
1870 | int they_are_dirs = S_ISDIR(inode->i_mode); | 1868 | int they_are_dirs = S_ISDIR(inode->i_mode); |
1871 | 1869 | ||
1872 | if (!simple_empty(new_dentry)) | 1870 | if (!simple_empty(new_dentry)) |
1873 | return -ENOTEMPTY; | 1871 | return -ENOTEMPTY; |
1874 | 1872 | ||
1875 | if (new_dentry->d_inode) { | 1873 | if (new_dentry->d_inode) { |
1876 | (void) shmem_unlink(new_dir, new_dentry); | 1874 | (void) shmem_unlink(new_dir, new_dentry); |
1877 | if (they_are_dirs) | 1875 | if (they_are_dirs) |
1878 | drop_nlink(old_dir); | 1876 | drop_nlink(old_dir); |
1879 | } else if (they_are_dirs) { | 1877 | } else if (they_are_dirs) { |
1880 | drop_nlink(old_dir); | 1878 | drop_nlink(old_dir); |
1881 | inc_nlink(new_dir); | 1879 | inc_nlink(new_dir); |
1882 | } | 1880 | } |
1883 | 1881 | ||
1884 | old_dir->i_size -= BOGO_DIRENT_SIZE; | 1882 | old_dir->i_size -= BOGO_DIRENT_SIZE; |
1885 | new_dir->i_size += BOGO_DIRENT_SIZE; | 1883 | new_dir->i_size += BOGO_DIRENT_SIZE; |
1886 | old_dir->i_ctime = old_dir->i_mtime = | 1884 | old_dir->i_ctime = old_dir->i_mtime = |
1887 | new_dir->i_ctime = new_dir->i_mtime = | 1885 | new_dir->i_ctime = new_dir->i_mtime = |
1888 | inode->i_ctime = CURRENT_TIME; | 1886 | inode->i_ctime = CURRENT_TIME; |
1889 | return 0; | 1887 | return 0; |
1890 | } | 1888 | } |
1891 | 1889 | ||
1892 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | 1890 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) |
1893 | { | 1891 | { |
1894 | int error; | 1892 | int error; |
1895 | int len; | 1893 | int len; |
1896 | struct inode *inode; | 1894 | struct inode *inode; |
1897 | struct page *page = NULL; | 1895 | struct page *page = NULL; |
1898 | char *kaddr; | 1896 | char *kaddr; |
1899 | struct shmem_inode_info *info; | 1897 | struct shmem_inode_info *info; |
1900 | 1898 | ||
1901 | len = strlen(symname) + 1; | 1899 | len = strlen(symname) + 1; |
1902 | if (len > PAGE_CACHE_SIZE) | 1900 | if (len > PAGE_CACHE_SIZE) |
1903 | return -ENAMETOOLONG; | 1901 | return -ENAMETOOLONG; |
1904 | 1902 | ||
1905 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); | 1903 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); |
1906 | if (!inode) | 1904 | if (!inode) |
1907 | return -ENOSPC; | 1905 | return -ENOSPC; |
1908 | 1906 | ||
1909 | error = security_inode_init_security(inode, dir, NULL, NULL, | 1907 | error = security_inode_init_security(inode, dir, NULL, NULL, |
1910 | NULL); | 1908 | NULL); |
1911 | if (error) { | 1909 | if (error) { |
1912 | if (error != -EOPNOTSUPP) { | 1910 | if (error != -EOPNOTSUPP) { |
1913 | iput(inode); | 1911 | iput(inode); |
1914 | return error; | 1912 | return error; |
1915 | } | 1913 | } |
1916 | error = 0; | 1914 | error = 0; |
1917 | } | 1915 | } |
1918 | 1916 | ||
1919 | info = SHMEM_I(inode); | 1917 | info = SHMEM_I(inode); |
1920 | inode->i_size = len-1; | 1918 | inode->i_size = len-1; |
1921 | if (len <= (char *)inode - (char *)info) { | 1919 | if (len <= (char *)inode - (char *)info) { |
1922 | /* do it inline */ | 1920 | /* do it inline */ |
1923 | memcpy(info, symname, len); | 1921 | memcpy(info, symname, len); |
1924 | inode->i_op = &shmem_symlink_inline_operations; | 1922 | inode->i_op = &shmem_symlink_inline_operations; |
1925 | } else { | 1923 | } else { |
1926 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); | 1924 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); |
1927 | if (error) { | 1925 | if (error) { |
1928 | iput(inode); | 1926 | iput(inode); |
1929 | return error; | 1927 | return error; |
1930 | } | 1928 | } |
1931 | unlock_page(page); | 1929 | unlock_page(page); |
1932 | inode->i_mapping->a_ops = &shmem_aops; | 1930 | inode->i_mapping->a_ops = &shmem_aops; |
1933 | inode->i_op = &shmem_symlink_inode_operations; | 1931 | inode->i_op = &shmem_symlink_inode_operations; |
1934 | kaddr = kmap_atomic(page, KM_USER0); | 1932 | kaddr = kmap_atomic(page, KM_USER0); |
1935 | memcpy(kaddr, symname, len); | 1933 | memcpy(kaddr, symname, len); |
1936 | kunmap_atomic(kaddr, KM_USER0); | 1934 | kunmap_atomic(kaddr, KM_USER0); |
1937 | set_page_dirty(page); | 1935 | set_page_dirty(page); |
1938 | page_cache_release(page); | 1936 | page_cache_release(page); |
1939 | } | 1937 | } |
1940 | if (dir->i_mode & S_ISGID) | 1938 | if (dir->i_mode & S_ISGID) |
1941 | inode->i_gid = dir->i_gid; | 1939 | inode->i_gid = dir->i_gid; |
1942 | dir->i_size += BOGO_DIRENT_SIZE; | 1940 | dir->i_size += BOGO_DIRENT_SIZE; |
1943 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 1941 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
1944 | d_instantiate(dentry, inode); | 1942 | d_instantiate(dentry, inode); |
1945 | dget(dentry); | 1943 | dget(dentry); |
1946 | return 0; | 1944 | return 0; |
1947 | } | 1945 | } |
1948 | 1946 | ||
1949 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) | 1947 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) |
1950 | { | 1948 | { |
1951 | nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); | 1949 | nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); |
1952 | return NULL; | 1950 | return NULL; |
1953 | } | 1951 | } |
1954 | 1952 | ||
1955 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) | 1953 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) |
1956 | { | 1954 | { |
1957 | struct page *page = NULL; | 1955 | struct page *page = NULL; |
1958 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); | 1956 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); |
1959 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); | 1957 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); |
1960 | if (page) | 1958 | if (page) |
1961 | unlock_page(page); | 1959 | unlock_page(page); |
1962 | return page; | 1960 | return page; |
1963 | } | 1961 | } |
1964 | 1962 | ||
1965 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) | 1963 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) |
1966 | { | 1964 | { |
1967 | if (!IS_ERR(nd_get_link(nd))) { | 1965 | if (!IS_ERR(nd_get_link(nd))) { |
1968 | struct page *page = cookie; | 1966 | struct page *page = cookie; |
1969 | kunmap(page); | 1967 | kunmap(page); |
1970 | mark_page_accessed(page); | 1968 | mark_page_accessed(page); |
1971 | page_cache_release(page); | 1969 | page_cache_release(page); |
1972 | } | 1970 | } |
1973 | } | 1971 | } |
1974 | 1972 | ||
1975 | static const struct inode_operations shmem_symlink_inline_operations = { | 1973 | static const struct inode_operations shmem_symlink_inline_operations = { |
1976 | .readlink = generic_readlink, | 1974 | .readlink = generic_readlink, |
1977 | .follow_link = shmem_follow_link_inline, | 1975 | .follow_link = shmem_follow_link_inline, |
1978 | }; | 1976 | }; |
1979 | 1977 | ||
1980 | static const struct inode_operations shmem_symlink_inode_operations = { | 1978 | static const struct inode_operations shmem_symlink_inode_operations = { |
1981 | .truncate = shmem_truncate, | 1979 | .truncate = shmem_truncate, |
1982 | .readlink = generic_readlink, | 1980 | .readlink = generic_readlink, |
1983 | .follow_link = shmem_follow_link, | 1981 | .follow_link = shmem_follow_link, |
1984 | .put_link = shmem_put_link, | 1982 | .put_link = shmem_put_link, |
1985 | }; | 1983 | }; |
1986 | 1984 | ||
1987 | #ifdef CONFIG_TMPFS_POSIX_ACL | 1985 | #ifdef CONFIG_TMPFS_POSIX_ACL |
1988 | /* | 1986 | /* |
1989 | * Superblocks without xattr inode operations will get security.* xattr | 1987 | * Superblocks without xattr inode operations will get security.* xattr |
1990 | * support from the VFS "for free". As soon as we have any other xattrs | 1988 | * support from the VFS "for free". As soon as we have any other xattrs |
1991 | * like ACLs, we also need to implement the security.* handlers at | 1989 | * like ACLs, we also need to implement the security.* handlers at |
1992 | * filesystem level, though. | 1990 | * filesystem level, though. |
1993 | */ | 1991 | */ |
1994 | 1992 | ||
1995 | static size_t shmem_xattr_security_list(struct inode *inode, char *list, | 1993 | static size_t shmem_xattr_security_list(struct inode *inode, char *list, |
1996 | size_t list_len, const char *name, | 1994 | size_t list_len, const char *name, |
1997 | size_t name_len) | 1995 | size_t name_len) |
1998 | { | 1996 | { |
1999 | return security_inode_listsecurity(inode, list, list_len); | 1997 | return security_inode_listsecurity(inode, list, list_len); |
2000 | } | 1998 | } |
2001 | 1999 | ||
2002 | static int shmem_xattr_security_get(struct inode *inode, const char *name, | 2000 | static int shmem_xattr_security_get(struct inode *inode, const char *name, |
2003 | void *buffer, size_t size) | 2001 | void *buffer, size_t size) |
2004 | { | 2002 | { |
2005 | if (strcmp(name, "") == 0) | 2003 | if (strcmp(name, "") == 0) |
2006 | return -EINVAL; | 2004 | return -EINVAL; |
2007 | return xattr_getsecurity(inode, name, buffer, size); | 2005 | return xattr_getsecurity(inode, name, buffer, size); |
2008 | } | 2006 | } |
2009 | 2007 | ||
2010 | static int shmem_xattr_security_set(struct inode *inode, const char *name, | 2008 | static int shmem_xattr_security_set(struct inode *inode, const char *name, |
2011 | const void *value, size_t size, int flags) | 2009 | const void *value, size_t size, int flags) |
2012 | { | 2010 | { |
2013 | if (strcmp(name, "") == 0) | 2011 | if (strcmp(name, "") == 0) |
2014 | return -EINVAL; | 2012 | return -EINVAL; |
2015 | return security_inode_setsecurity(inode, name, value, size, flags); | 2013 | return security_inode_setsecurity(inode, name, value, size, flags); |
2016 | } | 2014 | } |
2017 | 2015 | ||
2018 | static struct xattr_handler shmem_xattr_security_handler = { | 2016 | static struct xattr_handler shmem_xattr_security_handler = { |
2019 | .prefix = XATTR_SECURITY_PREFIX, | 2017 | .prefix = XATTR_SECURITY_PREFIX, |
2020 | .list = shmem_xattr_security_list, | 2018 | .list = shmem_xattr_security_list, |
2021 | .get = shmem_xattr_security_get, | 2019 | .get = shmem_xattr_security_get, |
2022 | .set = shmem_xattr_security_set, | 2020 | .set = shmem_xattr_security_set, |
2023 | }; | 2021 | }; |
2024 | 2022 | ||
2025 | static struct xattr_handler *shmem_xattr_handlers[] = { | 2023 | static struct xattr_handler *shmem_xattr_handlers[] = { |
2026 | &shmem_xattr_acl_access_handler, | 2024 | &shmem_xattr_acl_access_handler, |
2027 | &shmem_xattr_acl_default_handler, | 2025 | &shmem_xattr_acl_default_handler, |
2028 | &shmem_xattr_security_handler, | 2026 | &shmem_xattr_security_handler, |
2029 | NULL | 2027 | NULL |
2030 | }; | 2028 | }; |
2031 | #endif | 2029 | #endif |
2032 | 2030 | ||
2033 | static struct dentry *shmem_get_parent(struct dentry *child) | 2031 | static struct dentry *shmem_get_parent(struct dentry *child) |
2034 | { | 2032 | { |
2035 | return ERR_PTR(-ESTALE); | 2033 | return ERR_PTR(-ESTALE); |
2036 | } | 2034 | } |
2037 | 2035 | ||
2038 | static int shmem_match(struct inode *ino, void *vfh) | 2036 | static int shmem_match(struct inode *ino, void *vfh) |
2039 | { | 2037 | { |
2040 | __u32 *fh = vfh; | 2038 | __u32 *fh = vfh; |
2041 | __u64 inum = fh[2]; | 2039 | __u64 inum = fh[2]; |
2042 | inum = (inum << 32) | fh[1]; | 2040 | inum = (inum << 32) | fh[1]; |
2043 | return ino->i_ino == inum && fh[0] == ino->i_generation; | 2041 | return ino->i_ino == inum && fh[0] == ino->i_generation; |
2044 | } | 2042 | } |
2045 | 2043 | ||
2046 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, | 2044 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, |
2047 | struct fid *fid, int fh_len, int fh_type) | 2045 | struct fid *fid, int fh_len, int fh_type) |
2048 | { | 2046 | { |
2049 | struct inode *inode; | 2047 | struct inode *inode; |
2050 | struct dentry *dentry = NULL; | 2048 | struct dentry *dentry = NULL; |
2051 | u64 inum = fid->raw[2]; | 2049 | u64 inum = fid->raw[2]; |
2052 | inum = (inum << 32) | fid->raw[1]; | 2050 | inum = (inum << 32) | fid->raw[1]; |
2053 | 2051 | ||
2054 | if (fh_len < 3) | 2052 | if (fh_len < 3) |
2055 | return NULL; | 2053 | return NULL; |
2056 | 2054 | ||
2057 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), | 2055 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), |
2058 | shmem_match, fid->raw); | 2056 | shmem_match, fid->raw); |
2059 | if (inode) { | 2057 | if (inode) { |
2060 | dentry = d_find_alias(inode); | 2058 | dentry = d_find_alias(inode); |
2061 | iput(inode); | 2059 | iput(inode); |
2062 | } | 2060 | } |
2063 | 2061 | ||
2064 | return dentry; | 2062 | return dentry; |
2065 | } | 2063 | } |
2066 | 2064 | ||
2067 | static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, | 2065 | static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, |
2068 | int connectable) | 2066 | int connectable) |
2069 | { | 2067 | { |
2070 | struct inode *inode = dentry->d_inode; | 2068 | struct inode *inode = dentry->d_inode; |
2071 | 2069 | ||
2072 | if (*len < 3) | 2070 | if (*len < 3) |
2073 | return 255; | 2071 | return 255; |
2074 | 2072 | ||
2075 | if (hlist_unhashed(&inode->i_hash)) { | 2073 | if (hlist_unhashed(&inode->i_hash)) { |
2076 | /* Unfortunately insert_inode_hash is not idempotent, | 2074 | /* Unfortunately insert_inode_hash is not idempotent, |
2077 | * so as we hash inodes here rather than at creation | 2075 | * so as we hash inodes here rather than at creation |
2078 | * time, we need a lock to ensure we only try | 2076 | * time, we need a lock to ensure we only try |
2079 | * to do it once | 2077 | * to do it once |
2080 | */ | 2078 | */ |
2081 | static DEFINE_SPINLOCK(lock); | 2079 | static DEFINE_SPINLOCK(lock); |
2082 | spin_lock(&lock); | 2080 | spin_lock(&lock); |
2083 | if (hlist_unhashed(&inode->i_hash)) | 2081 | if (hlist_unhashed(&inode->i_hash)) |
2084 | __insert_inode_hash(inode, | 2082 | __insert_inode_hash(inode, |
2085 | inode->i_ino + inode->i_generation); | 2083 | inode->i_ino + inode->i_generation); |
2086 | spin_unlock(&lock); | 2084 | spin_unlock(&lock); |
2087 | } | 2085 | } |
2088 | 2086 | ||
2089 | fh[0] = inode->i_generation; | 2087 | fh[0] = inode->i_generation; |
2090 | fh[1] = inode->i_ino; | 2088 | fh[1] = inode->i_ino; |
2091 | fh[2] = ((__u64)inode->i_ino) >> 32; | 2089 | fh[2] = ((__u64)inode->i_ino) >> 32; |
2092 | 2090 | ||
2093 | *len = 3; | 2091 | *len = 3; |
2094 | return 1; | 2092 | return 1; |
2095 | } | 2093 | } |
2096 | 2094 | ||
2097 | static const struct export_operations shmem_export_ops = { | 2095 | static const struct export_operations shmem_export_ops = { |
2098 | .get_parent = shmem_get_parent, | 2096 | .get_parent = shmem_get_parent, |
2099 | .encode_fh = shmem_encode_fh, | 2097 | .encode_fh = shmem_encode_fh, |
2100 | .fh_to_dentry = shmem_fh_to_dentry, | 2098 | .fh_to_dentry = shmem_fh_to_dentry, |
2101 | }; | 2099 | }; |
2102 | 2100 | ||
2103 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | 2101 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, |
2104 | bool remount) | 2102 | bool remount) |
2105 | { | 2103 | { |
2106 | char *this_char, *value, *rest; | 2104 | char *this_char, *value, *rest; |
2107 | 2105 | ||
2108 | while (options != NULL) { | 2106 | while (options != NULL) { |
2109 | this_char = options; | 2107 | this_char = options; |
2110 | for (;;) { | 2108 | for (;;) { |
2111 | /* | 2109 | /* |
2112 | * NUL-terminate this option: unfortunately, | 2110 | * NUL-terminate this option: unfortunately, |
2113 | * mount options form a comma-separated list, | 2111 | * mount options form a comma-separated list, |
2114 | * but mpol's nodelist may also contain commas. | 2112 | * but mpol's nodelist may also contain commas. |
2115 | */ | 2113 | */ |
2116 | options = strchr(options, ','); | 2114 | options = strchr(options, ','); |
2117 | if (options == NULL) | 2115 | if (options == NULL) |
2118 | break; | 2116 | break; |
2119 | options++; | 2117 | options++; |
2120 | if (!isdigit(*options)) { | 2118 | if (!isdigit(*options)) { |
2121 | options[-1] = '\0'; | 2119 | options[-1] = '\0'; |
2122 | break; | 2120 | break; |
2123 | } | 2121 | } |
2124 | } | 2122 | } |
2125 | if (!*this_char) | 2123 | if (!*this_char) |
2126 | continue; | 2124 | continue; |
2127 | if ((value = strchr(this_char,'=')) != NULL) { | 2125 | if ((value = strchr(this_char,'=')) != NULL) { |
2128 | *value++ = 0; | 2126 | *value++ = 0; |
2129 | } else { | 2127 | } else { |
2130 | printk(KERN_ERR | 2128 | printk(KERN_ERR |
2131 | "tmpfs: No value for mount option '%s'\n", | 2129 | "tmpfs: No value for mount option '%s'\n", |
2132 | this_char); | 2130 | this_char); |
2133 | return 1; | 2131 | return 1; |
2134 | } | 2132 | } |
2135 | 2133 | ||
2136 | if (!strcmp(this_char,"size")) { | 2134 | if (!strcmp(this_char,"size")) { |
2137 | unsigned long long size; | 2135 | unsigned long long size; |
2138 | size = memparse(value,&rest); | 2136 | size = memparse(value,&rest); |
2139 | if (*rest == '%') { | 2137 | if (*rest == '%') { |
2140 | size <<= PAGE_SHIFT; | 2138 | size <<= PAGE_SHIFT; |
2141 | size *= totalram_pages; | 2139 | size *= totalram_pages; |
2142 | do_div(size, 100); | 2140 | do_div(size, 100); |
2143 | rest++; | 2141 | rest++; |
2144 | } | 2142 | } |
2145 | if (*rest) | 2143 | if (*rest) |
2146 | goto bad_val; | 2144 | goto bad_val; |
2147 | sbinfo->max_blocks = | 2145 | sbinfo->max_blocks = |
2148 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | 2146 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); |
2149 | } else if (!strcmp(this_char,"nr_blocks")) { | 2147 | } else if (!strcmp(this_char,"nr_blocks")) { |
2150 | sbinfo->max_blocks = memparse(value, &rest); | 2148 | sbinfo->max_blocks = memparse(value, &rest); |
2151 | if (*rest) | 2149 | if (*rest) |
2152 | goto bad_val; | 2150 | goto bad_val; |
2153 | } else if (!strcmp(this_char,"nr_inodes")) { | 2151 | } else if (!strcmp(this_char,"nr_inodes")) { |
2154 | sbinfo->max_inodes = memparse(value, &rest); | 2152 | sbinfo->max_inodes = memparse(value, &rest); |
2155 | if (*rest) | 2153 | if (*rest) |
2156 | goto bad_val; | 2154 | goto bad_val; |
2157 | } else if (!strcmp(this_char,"mode")) { | 2155 | } else if (!strcmp(this_char,"mode")) { |
2158 | if (remount) | 2156 | if (remount) |
2159 | continue; | 2157 | continue; |
2160 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; | 2158 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; |
2161 | if (*rest) | 2159 | if (*rest) |
2162 | goto bad_val; | 2160 | goto bad_val; |
2163 | } else if (!strcmp(this_char,"uid")) { | 2161 | } else if (!strcmp(this_char,"uid")) { |
2164 | if (remount) | 2162 | if (remount) |
2165 | continue; | 2163 | continue; |
2166 | sbinfo->uid = simple_strtoul(value, &rest, 0); | 2164 | sbinfo->uid = simple_strtoul(value, &rest, 0); |
2167 | if (*rest) | 2165 | if (*rest) |
2168 | goto bad_val; | 2166 | goto bad_val; |
2169 | } else if (!strcmp(this_char,"gid")) { | 2167 | } else if (!strcmp(this_char,"gid")) { |
2170 | if (remount) | 2168 | if (remount) |
2171 | continue; | 2169 | continue; |
2172 | sbinfo->gid = simple_strtoul(value, &rest, 0); | 2170 | sbinfo->gid = simple_strtoul(value, &rest, 0); |
2173 | if (*rest) | 2171 | if (*rest) |
2174 | goto bad_val; | 2172 | goto bad_val; |
2175 | } else if (!strcmp(this_char,"mpol")) { | 2173 | } else if (!strcmp(this_char,"mpol")) { |
2176 | if (mpol_parse_str(value, &sbinfo->mpol, 1)) | 2174 | if (mpol_parse_str(value, &sbinfo->mpol, 1)) |
2177 | goto bad_val; | 2175 | goto bad_val; |
2178 | } else { | 2176 | } else { |
2179 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | 2177 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", |
2180 | this_char); | 2178 | this_char); |
2181 | return 1; | 2179 | return 1; |
2182 | } | 2180 | } |
2183 | } | 2181 | } |
2184 | return 0; | 2182 | return 0; |
2185 | 2183 | ||
2186 | bad_val: | 2184 | bad_val: |
2187 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | 2185 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", |
2188 | value, this_char); | 2186 | value, this_char); |
2189 | return 1; | 2187 | return 1; |
2190 | 2188 | ||
2191 | } | 2189 | } |
2192 | 2190 | ||
2193 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | 2191 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2194 | { | 2192 | { |
2195 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 2193 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
2196 | struct shmem_sb_info config = *sbinfo; | 2194 | struct shmem_sb_info config = *sbinfo; |
2197 | unsigned long blocks; | 2195 | unsigned long blocks; |
2198 | unsigned long inodes; | 2196 | unsigned long inodes; |
2199 | int error = -EINVAL; | 2197 | int error = -EINVAL; |
2200 | 2198 | ||
2201 | if (shmem_parse_options(data, &config, true)) | 2199 | if (shmem_parse_options(data, &config, true)) |
2202 | return error; | 2200 | return error; |
2203 | 2201 | ||
2204 | spin_lock(&sbinfo->stat_lock); | 2202 | spin_lock(&sbinfo->stat_lock); |
2205 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; | 2203 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; |
2206 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | 2204 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
2207 | if (config.max_blocks < blocks) | 2205 | if (config.max_blocks < blocks) |
2208 | goto out; | 2206 | goto out; |
2209 | if (config.max_inodes < inodes) | 2207 | if (config.max_inodes < inodes) |
2210 | goto out; | 2208 | goto out; |
2211 | /* | 2209 | /* |
2212 | * Those tests also disallow limited->unlimited while any are in | 2210 | * Those tests also disallow limited->unlimited while any are in |
2213 | * use, so i_blocks will always be zero when max_blocks is zero; | 2211 | * use, so i_blocks will always be zero when max_blocks is zero; |
2214 | * but we must separately disallow unlimited->limited, because | 2212 | * but we must separately disallow unlimited->limited, because |
2215 | * in that case we have no record of how much is already in use. | 2213 | * in that case we have no record of how much is already in use. |
2216 | */ | 2214 | */ |
2217 | if (config.max_blocks && !sbinfo->max_blocks) | 2215 | if (config.max_blocks && !sbinfo->max_blocks) |
2218 | goto out; | 2216 | goto out; |
2219 | if (config.max_inodes && !sbinfo->max_inodes) | 2217 | if (config.max_inodes && !sbinfo->max_inodes) |
2220 | goto out; | 2218 | goto out; |
2221 | 2219 | ||
2222 | error = 0; | 2220 | error = 0; |
2223 | sbinfo->max_blocks = config.max_blocks; | 2221 | sbinfo->max_blocks = config.max_blocks; |
2224 | sbinfo->free_blocks = config.max_blocks - blocks; | 2222 | sbinfo->free_blocks = config.max_blocks - blocks; |
2225 | sbinfo->max_inodes = config.max_inodes; | 2223 | sbinfo->max_inodes = config.max_inodes; |
2226 | sbinfo->free_inodes = config.max_inodes - inodes; | 2224 | sbinfo->free_inodes = config.max_inodes - inodes; |
2227 | 2225 | ||
2228 | mpol_put(sbinfo->mpol); | 2226 | mpol_put(sbinfo->mpol); |
2229 | sbinfo->mpol = config.mpol; /* transfers initial ref */ | 2227 | sbinfo->mpol = config.mpol; /* transfers initial ref */ |
2230 | out: | 2228 | out: |
2231 | spin_unlock(&sbinfo->stat_lock); | 2229 | spin_unlock(&sbinfo->stat_lock); |
2232 | return error; | 2230 | return error; |
2233 | } | 2231 | } |
2234 | 2232 | ||
2235 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) | 2233 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) |
2236 | { | 2234 | { |
2237 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); | 2235 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); |
2238 | 2236 | ||
2239 | if (sbinfo->max_blocks != shmem_default_max_blocks()) | 2237 | if (sbinfo->max_blocks != shmem_default_max_blocks()) |
2240 | seq_printf(seq, ",size=%luk", | 2238 | seq_printf(seq, ",size=%luk", |
2241 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); | 2239 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); |
2242 | if (sbinfo->max_inodes != shmem_default_max_inodes()) | 2240 | if (sbinfo->max_inodes != shmem_default_max_inodes()) |
2243 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); | 2241 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); |
2244 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) | 2242 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) |
2245 | seq_printf(seq, ",mode=%03o", sbinfo->mode); | 2243 | seq_printf(seq, ",mode=%03o", sbinfo->mode); |
2246 | if (sbinfo->uid != 0) | 2244 | if (sbinfo->uid != 0) |
2247 | seq_printf(seq, ",uid=%u", sbinfo->uid); | 2245 | seq_printf(seq, ",uid=%u", sbinfo->uid); |
2248 | if (sbinfo->gid != 0) | 2246 | if (sbinfo->gid != 0) |
2249 | seq_printf(seq, ",gid=%u", sbinfo->gid); | 2247 | seq_printf(seq, ",gid=%u", sbinfo->gid); |
2250 | shmem_show_mpol(seq, sbinfo->mpol); | 2248 | shmem_show_mpol(seq, sbinfo->mpol); |
2251 | return 0; | 2249 | return 0; |
2252 | } | 2250 | } |
2253 | #endif /* CONFIG_TMPFS */ | 2251 | #endif /* CONFIG_TMPFS */ |
2254 | 2252 | ||
2255 | static void shmem_put_super(struct super_block *sb) | 2253 | static void shmem_put_super(struct super_block *sb) |
2256 | { | 2254 | { |
2257 | kfree(sb->s_fs_info); | 2255 | kfree(sb->s_fs_info); |
2258 | sb->s_fs_info = NULL; | 2256 | sb->s_fs_info = NULL; |
2259 | } | 2257 | } |
2260 | 2258 | ||
2261 | static int shmem_fill_super(struct super_block *sb, | 2259 | static int shmem_fill_super(struct super_block *sb, |
2262 | void *data, int silent) | 2260 | void *data, int silent) |
2263 | { | 2261 | { |
2264 | struct inode *inode; | 2262 | struct inode *inode; |
2265 | struct dentry *root; | 2263 | struct dentry *root; |
2266 | struct shmem_sb_info *sbinfo; | 2264 | struct shmem_sb_info *sbinfo; |
2267 | int err = -ENOMEM; | 2265 | int err = -ENOMEM; |
2268 | 2266 | ||
2269 | /* Round up to L1_CACHE_BYTES to resist false sharing */ | 2267 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
2270 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), | 2268 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), |
2271 | L1_CACHE_BYTES), GFP_KERNEL); | 2269 | L1_CACHE_BYTES), GFP_KERNEL); |
2272 | if (!sbinfo) | 2270 | if (!sbinfo) |
2273 | return -ENOMEM; | 2271 | return -ENOMEM; |
2274 | 2272 | ||
2275 | sbinfo->max_blocks = 0; | 2273 | sbinfo->max_blocks = 0; |
2276 | sbinfo->max_inodes = 0; | 2274 | sbinfo->max_inodes = 0; |
2277 | sbinfo->mode = S_IRWXUGO | S_ISVTX; | 2275 | sbinfo->mode = S_IRWXUGO | S_ISVTX; |
2278 | sbinfo->uid = current->fsuid; | 2276 | sbinfo->uid = current->fsuid; |
2279 | sbinfo->gid = current->fsgid; | 2277 | sbinfo->gid = current->fsgid; |
2280 | sbinfo->mpol = NULL; | 2278 | sbinfo->mpol = NULL; |
2281 | sb->s_fs_info = sbinfo; | 2279 | sb->s_fs_info = sbinfo; |
2282 | 2280 | ||
2283 | #ifdef CONFIG_TMPFS | 2281 | #ifdef CONFIG_TMPFS |
2284 | /* | 2282 | /* |
2285 | * Per default we only allow half of the physical ram per | 2283 | * Per default we only allow half of the physical ram per |
2286 | * tmpfs instance, limiting inodes to one per page of lowmem; | 2284 | * tmpfs instance, limiting inodes to one per page of lowmem; |
2287 | * but the internal instance is left unlimited. | 2285 | * but the internal instance is left unlimited. |
2288 | */ | 2286 | */ |
2289 | if (!(sb->s_flags & MS_NOUSER)) { | 2287 | if (!(sb->s_flags & MS_NOUSER)) { |
2290 | sbinfo->max_blocks = shmem_default_max_blocks(); | 2288 | sbinfo->max_blocks = shmem_default_max_blocks(); |
2291 | sbinfo->max_inodes = shmem_default_max_inodes(); | 2289 | sbinfo->max_inodes = shmem_default_max_inodes(); |
2292 | if (shmem_parse_options(data, sbinfo, false)) { | 2290 | if (shmem_parse_options(data, sbinfo, false)) { |
2293 | err = -EINVAL; | 2291 | err = -EINVAL; |
2294 | goto failed; | 2292 | goto failed; |
2295 | } | 2293 | } |
2296 | } | 2294 | } |
2297 | sb->s_export_op = &shmem_export_ops; | 2295 | sb->s_export_op = &shmem_export_ops; |
2298 | #else | 2296 | #else |
2299 | sb->s_flags |= MS_NOUSER; | 2297 | sb->s_flags |= MS_NOUSER; |
2300 | #endif | 2298 | #endif |
2301 | 2299 | ||
2302 | spin_lock_init(&sbinfo->stat_lock); | 2300 | spin_lock_init(&sbinfo->stat_lock); |
2303 | sbinfo->free_blocks = sbinfo->max_blocks; | 2301 | sbinfo->free_blocks = sbinfo->max_blocks; |
2304 | sbinfo->free_inodes = sbinfo->max_inodes; | 2302 | sbinfo->free_inodes = sbinfo->max_inodes; |
2305 | 2303 | ||
2306 | sb->s_maxbytes = SHMEM_MAX_BYTES; | 2304 | sb->s_maxbytes = SHMEM_MAX_BYTES; |
2307 | sb->s_blocksize = PAGE_CACHE_SIZE; | 2305 | sb->s_blocksize = PAGE_CACHE_SIZE; |
2308 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 2306 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
2309 | sb->s_magic = TMPFS_MAGIC; | 2307 | sb->s_magic = TMPFS_MAGIC; |
2310 | sb->s_op = &shmem_ops; | 2308 | sb->s_op = &shmem_ops; |
2311 | sb->s_time_gran = 1; | 2309 | sb->s_time_gran = 1; |
2312 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2310 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2313 | sb->s_xattr = shmem_xattr_handlers; | 2311 | sb->s_xattr = shmem_xattr_handlers; |
2314 | sb->s_flags |= MS_POSIXACL; | 2312 | sb->s_flags |= MS_POSIXACL; |
2315 | #endif | 2313 | #endif |
2316 | 2314 | ||
2317 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); | 2315 | inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); |
2318 | if (!inode) | 2316 | if (!inode) |
2319 | goto failed; | 2317 | goto failed; |
2320 | inode->i_uid = sbinfo->uid; | 2318 | inode->i_uid = sbinfo->uid; |
2321 | inode->i_gid = sbinfo->gid; | 2319 | inode->i_gid = sbinfo->gid; |
2322 | root = d_alloc_root(inode); | 2320 | root = d_alloc_root(inode); |
2323 | if (!root) | 2321 | if (!root) |
2324 | goto failed_iput; | 2322 | goto failed_iput; |
2325 | sb->s_root = root; | 2323 | sb->s_root = root; |
2326 | return 0; | 2324 | return 0; |
2327 | 2325 | ||
2328 | failed_iput: | 2326 | failed_iput: |
2329 | iput(inode); | 2327 | iput(inode); |
2330 | failed: | 2328 | failed: |
2331 | shmem_put_super(sb); | 2329 | shmem_put_super(sb); |
2332 | return err; | 2330 | return err; |
2333 | } | 2331 | } |
2334 | 2332 | ||
2335 | static struct kmem_cache *shmem_inode_cachep; | 2333 | static struct kmem_cache *shmem_inode_cachep; |
2336 | 2334 | ||
2337 | static struct inode *shmem_alloc_inode(struct super_block *sb) | 2335 | static struct inode *shmem_alloc_inode(struct super_block *sb) |
2338 | { | 2336 | { |
2339 | struct shmem_inode_info *p; | 2337 | struct shmem_inode_info *p; |
2340 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); | 2338 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); |
2341 | if (!p) | 2339 | if (!p) |
2342 | return NULL; | 2340 | return NULL; |
2343 | return &p->vfs_inode; | 2341 | return &p->vfs_inode; |
2344 | } | 2342 | } |
2345 | 2343 | ||
2346 | static void shmem_destroy_inode(struct inode *inode) | 2344 | static void shmem_destroy_inode(struct inode *inode) |
2347 | { | 2345 | { |
2348 | if ((inode->i_mode & S_IFMT) == S_IFREG) { | 2346 | if ((inode->i_mode & S_IFMT) == S_IFREG) { |
2349 | /* only struct inode is valid if it's an inline symlink */ | 2347 | /* only struct inode is valid if it's an inline symlink */ |
2350 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | 2348 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
2351 | } | 2349 | } |
2352 | shmem_acl_destroy_inode(inode); | 2350 | shmem_acl_destroy_inode(inode); |
2353 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | 2351 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); |
2354 | } | 2352 | } |
2355 | 2353 | ||
2356 | static void init_once(void *foo) | 2354 | static void init_once(void *foo) |
2357 | { | 2355 | { |
2358 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 2356 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; |
2359 | 2357 | ||
2360 | inode_init_once(&p->vfs_inode); | 2358 | inode_init_once(&p->vfs_inode); |
2361 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2359 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2362 | p->i_acl = NULL; | 2360 | p->i_acl = NULL; |
2363 | p->i_default_acl = NULL; | 2361 | p->i_default_acl = NULL; |
2364 | #endif | 2362 | #endif |
2365 | } | 2363 | } |
2366 | 2364 | ||
2367 | static int init_inodecache(void) | 2365 | static int init_inodecache(void) |
2368 | { | 2366 | { |
2369 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | 2367 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
2370 | sizeof(struct shmem_inode_info), | 2368 | sizeof(struct shmem_inode_info), |
2371 | 0, SLAB_PANIC, init_once); | 2369 | 0, SLAB_PANIC, init_once); |
2372 | return 0; | 2370 | return 0; |
2373 | } | 2371 | } |
2374 | 2372 | ||
2375 | static void destroy_inodecache(void) | 2373 | static void destroy_inodecache(void) |
2376 | { | 2374 | { |
2377 | kmem_cache_destroy(shmem_inode_cachep); | 2375 | kmem_cache_destroy(shmem_inode_cachep); |
2378 | } | 2376 | } |
2379 | 2377 | ||
2380 | static const struct address_space_operations shmem_aops = { | 2378 | static const struct address_space_operations shmem_aops = { |
2381 | .writepage = shmem_writepage, | 2379 | .writepage = shmem_writepage, |
2382 | .set_page_dirty = __set_page_dirty_no_writeback, | 2380 | .set_page_dirty = __set_page_dirty_no_writeback, |
2383 | #ifdef CONFIG_TMPFS | 2381 | #ifdef CONFIG_TMPFS |
2384 | .readpage = shmem_readpage, | 2382 | .readpage = shmem_readpage, |
2385 | .write_begin = shmem_write_begin, | 2383 | .write_begin = shmem_write_begin, |
2386 | .write_end = shmem_write_end, | 2384 | .write_end = shmem_write_end, |
2387 | #endif | 2385 | #endif |
2388 | .migratepage = migrate_page, | 2386 | .migratepage = migrate_page, |
2389 | }; | 2387 | }; |
2390 | 2388 | ||
2391 | static const struct file_operations shmem_file_operations = { | 2389 | static const struct file_operations shmem_file_operations = { |
2392 | .mmap = shmem_mmap, | 2390 | .mmap = shmem_mmap, |
2393 | #ifdef CONFIG_TMPFS | 2391 | #ifdef CONFIG_TMPFS |
2394 | .llseek = generic_file_llseek, | 2392 | .llseek = generic_file_llseek, |
2395 | .read = do_sync_read, | 2393 | .read = do_sync_read, |
2396 | .write = do_sync_write, | 2394 | .write = do_sync_write, |
2397 | .aio_read = shmem_file_aio_read, | 2395 | .aio_read = shmem_file_aio_read, |
2398 | .aio_write = generic_file_aio_write, | 2396 | .aio_write = generic_file_aio_write, |
2399 | .fsync = simple_sync_file, | 2397 | .fsync = simple_sync_file, |
2400 | .splice_read = generic_file_splice_read, | 2398 | .splice_read = generic_file_splice_read, |
2401 | .splice_write = generic_file_splice_write, | 2399 | .splice_write = generic_file_splice_write, |
2402 | #endif | 2400 | #endif |
2403 | }; | 2401 | }; |
2404 | 2402 | ||
2405 | static const struct inode_operations shmem_inode_operations = { | 2403 | static const struct inode_operations shmem_inode_operations = { |
2406 | .truncate = shmem_truncate, | 2404 | .truncate = shmem_truncate, |
2407 | .setattr = shmem_notify_change, | 2405 | .setattr = shmem_notify_change, |
2408 | .truncate_range = shmem_truncate_range, | 2406 | .truncate_range = shmem_truncate_range, |
2409 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2407 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2410 | .setxattr = generic_setxattr, | 2408 | .setxattr = generic_setxattr, |
2411 | .getxattr = generic_getxattr, | 2409 | .getxattr = generic_getxattr, |
2412 | .listxattr = generic_listxattr, | 2410 | .listxattr = generic_listxattr, |
2413 | .removexattr = generic_removexattr, | 2411 | .removexattr = generic_removexattr, |
2414 | .permission = shmem_permission, | 2412 | .permission = shmem_permission, |
2415 | #endif | 2413 | #endif |
2416 | 2414 | ||
2417 | }; | 2415 | }; |
2418 | 2416 | ||
2419 | static const struct inode_operations shmem_dir_inode_operations = { | 2417 | static const struct inode_operations shmem_dir_inode_operations = { |
2420 | #ifdef CONFIG_TMPFS | 2418 | #ifdef CONFIG_TMPFS |
2421 | .create = shmem_create, | 2419 | .create = shmem_create, |
2422 | .lookup = simple_lookup, | 2420 | .lookup = simple_lookup, |
2423 | .link = shmem_link, | 2421 | .link = shmem_link, |
2424 | .unlink = shmem_unlink, | 2422 | .unlink = shmem_unlink, |
2425 | .symlink = shmem_symlink, | 2423 | .symlink = shmem_symlink, |
2426 | .mkdir = shmem_mkdir, | 2424 | .mkdir = shmem_mkdir, |
2427 | .rmdir = shmem_rmdir, | 2425 | .rmdir = shmem_rmdir, |
2428 | .mknod = shmem_mknod, | 2426 | .mknod = shmem_mknod, |
2429 | .rename = shmem_rename, | 2427 | .rename = shmem_rename, |
2430 | #endif | 2428 | #endif |
2431 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2429 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2432 | .setattr = shmem_notify_change, | 2430 | .setattr = shmem_notify_change, |
2433 | .setxattr = generic_setxattr, | 2431 | .setxattr = generic_setxattr, |
2434 | .getxattr = generic_getxattr, | 2432 | .getxattr = generic_getxattr, |
2435 | .listxattr = generic_listxattr, | 2433 | .listxattr = generic_listxattr, |
2436 | .removexattr = generic_removexattr, | 2434 | .removexattr = generic_removexattr, |
2437 | .permission = shmem_permission, | 2435 | .permission = shmem_permission, |
2438 | #endif | 2436 | #endif |
2439 | }; | 2437 | }; |
2440 | 2438 | ||
2441 | static const struct inode_operations shmem_special_inode_operations = { | 2439 | static const struct inode_operations shmem_special_inode_operations = { |
2442 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2440 | #ifdef CONFIG_TMPFS_POSIX_ACL |
2443 | .setattr = shmem_notify_change, | 2441 | .setattr = shmem_notify_change, |
2444 | .setxattr = generic_setxattr, | 2442 | .setxattr = generic_setxattr, |
2445 | .getxattr = generic_getxattr, | 2443 | .getxattr = generic_getxattr, |
2446 | .listxattr = generic_listxattr, | 2444 | .listxattr = generic_listxattr, |
2447 | .removexattr = generic_removexattr, | 2445 | .removexattr = generic_removexattr, |
2448 | .permission = shmem_permission, | 2446 | .permission = shmem_permission, |
2449 | #endif | 2447 | #endif |
2450 | }; | 2448 | }; |
2451 | 2449 | ||
2452 | static const struct super_operations shmem_ops = { | 2450 | static const struct super_operations shmem_ops = { |
2453 | .alloc_inode = shmem_alloc_inode, | 2451 | .alloc_inode = shmem_alloc_inode, |
2454 | .destroy_inode = shmem_destroy_inode, | 2452 | .destroy_inode = shmem_destroy_inode, |
2455 | #ifdef CONFIG_TMPFS | 2453 | #ifdef CONFIG_TMPFS |
2456 | .statfs = shmem_statfs, | 2454 | .statfs = shmem_statfs, |
2457 | .remount_fs = shmem_remount_fs, | 2455 | .remount_fs = shmem_remount_fs, |
2458 | .show_options = shmem_show_options, | 2456 | .show_options = shmem_show_options, |
2459 | #endif | 2457 | #endif |
2460 | .delete_inode = shmem_delete_inode, | 2458 | .delete_inode = shmem_delete_inode, |
2461 | .drop_inode = generic_delete_inode, | 2459 | .drop_inode = generic_delete_inode, |
2462 | .put_super = shmem_put_super, | 2460 | .put_super = shmem_put_super, |
2463 | }; | 2461 | }; |
2464 | 2462 | ||
2465 | static struct vm_operations_struct shmem_vm_ops = { | 2463 | static struct vm_operations_struct shmem_vm_ops = { |
2466 | .fault = shmem_fault, | 2464 | .fault = shmem_fault, |
2467 | #ifdef CONFIG_NUMA | 2465 | #ifdef CONFIG_NUMA |
2468 | .set_policy = shmem_set_policy, | 2466 | .set_policy = shmem_set_policy, |
2469 | .get_policy = shmem_get_policy, | 2467 | .get_policy = shmem_get_policy, |
2470 | #endif | 2468 | #endif |
2471 | }; | 2469 | }; |
2472 | 2470 | ||
2473 | 2471 | ||
2474 | static int shmem_get_sb(struct file_system_type *fs_type, | 2472 | static int shmem_get_sb(struct file_system_type *fs_type, |
2475 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | 2473 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) |
2476 | { | 2474 | { |
2477 | return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); | 2475 | return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); |
2478 | } | 2476 | } |
2479 | 2477 | ||
2480 | static struct file_system_type tmpfs_fs_type = { | 2478 | static struct file_system_type tmpfs_fs_type = { |
2481 | .owner = THIS_MODULE, | 2479 | .owner = THIS_MODULE, |
2482 | .name = "tmpfs", | 2480 | .name = "tmpfs", |
2483 | .get_sb = shmem_get_sb, | 2481 | .get_sb = shmem_get_sb, |
2484 | .kill_sb = kill_litter_super, | 2482 | .kill_sb = kill_litter_super, |
2485 | }; | 2483 | }; |
2486 | static struct vfsmount *shm_mnt; | 2484 | static struct vfsmount *shm_mnt; |
2487 | 2485 | ||
2488 | static int __init init_tmpfs(void) | 2486 | static int __init init_tmpfs(void) |
2489 | { | 2487 | { |
2490 | int error; | 2488 | int error; |
2491 | 2489 | ||
2492 | error = bdi_init(&shmem_backing_dev_info); | 2490 | error = bdi_init(&shmem_backing_dev_info); |
2493 | if (error) | 2491 | if (error) |
2494 | goto out4; | 2492 | goto out4; |
2495 | 2493 | ||
2496 | error = init_inodecache(); | 2494 | error = init_inodecache(); |
2497 | if (error) | 2495 | if (error) |
2498 | goto out3; | 2496 | goto out3; |
2499 | 2497 | ||
2500 | error = register_filesystem(&tmpfs_fs_type); | 2498 | error = register_filesystem(&tmpfs_fs_type); |
2501 | if (error) { | 2499 | if (error) { |
2502 | printk(KERN_ERR "Could not register tmpfs\n"); | 2500 | printk(KERN_ERR "Could not register tmpfs\n"); |
2503 | goto out2; | 2501 | goto out2; |
2504 | } | 2502 | } |
2505 | 2503 | ||
2506 | shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, | 2504 | shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, |
2507 | tmpfs_fs_type.name, NULL); | 2505 | tmpfs_fs_type.name, NULL); |
2508 | if (IS_ERR(shm_mnt)) { | 2506 | if (IS_ERR(shm_mnt)) { |
2509 | error = PTR_ERR(shm_mnt); | 2507 | error = PTR_ERR(shm_mnt); |
2510 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | 2508 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); |
2511 | goto out1; | 2509 | goto out1; |
2512 | } | 2510 | } |
2513 | return 0; | 2511 | return 0; |
2514 | 2512 | ||
2515 | out1: | 2513 | out1: |
2516 | unregister_filesystem(&tmpfs_fs_type); | 2514 | unregister_filesystem(&tmpfs_fs_type); |
2517 | out2: | 2515 | out2: |
2518 | destroy_inodecache(); | 2516 | destroy_inodecache(); |
2519 | out3: | 2517 | out3: |
2520 | bdi_destroy(&shmem_backing_dev_info); | 2518 | bdi_destroy(&shmem_backing_dev_info); |
2521 | out4: | 2519 | out4: |
2522 | shm_mnt = ERR_PTR(error); | 2520 | shm_mnt = ERR_PTR(error); |
2523 | return error; | 2521 | return error; |
2524 | } | 2522 | } |
2525 | module_init(init_tmpfs) | 2523 | module_init(init_tmpfs) |
2526 | 2524 | ||
2527 | /** | 2525 | /** |
2528 | * shmem_file_setup - get an unlinked file living in tmpfs | 2526 | * shmem_file_setup - get an unlinked file living in tmpfs |
2529 | * @name: name for dentry (to be seen in /proc/<pid>/maps | 2527 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
2530 | * @size: size to be set for the file | 2528 | * @size: size to be set for the file |
2531 | * @flags: vm_flags | 2529 | * @flags: vm_flags |
2532 | */ | 2530 | */ |
2533 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | 2531 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) |
2534 | { | 2532 | { |
2535 | int error; | 2533 | int error; |
2536 | struct file *file; | 2534 | struct file *file; |
2537 | struct inode *inode; | 2535 | struct inode *inode; |
2538 | struct dentry *dentry, *root; | 2536 | struct dentry *dentry, *root; |
2539 | struct qstr this; | 2537 | struct qstr this; |
2540 | 2538 | ||
2541 | if (IS_ERR(shm_mnt)) | 2539 | if (IS_ERR(shm_mnt)) |
2542 | return (void *)shm_mnt; | 2540 | return (void *)shm_mnt; |
2543 | 2541 | ||
2544 | if (size < 0 || size > SHMEM_MAX_BYTES) | 2542 | if (size < 0 || size > SHMEM_MAX_BYTES) |
2545 | return ERR_PTR(-EINVAL); | 2543 | return ERR_PTR(-EINVAL); |
2546 | 2544 | ||
2547 | if (shmem_acct_size(flags, size)) | 2545 | if (shmem_acct_size(flags, size)) |
2548 | return ERR_PTR(-ENOMEM); | 2546 | return ERR_PTR(-ENOMEM); |
2549 | 2547 | ||
2550 | error = -ENOMEM; | 2548 | error = -ENOMEM; |
2551 | this.name = name; | 2549 | this.name = name; |
2552 | this.len = strlen(name); | 2550 | this.len = strlen(name); |
2553 | this.hash = 0; /* will go */ | 2551 | this.hash = 0; /* will go */ |
2554 | root = shm_mnt->mnt_root; | 2552 | root = shm_mnt->mnt_root; |
2555 | dentry = d_alloc(root, &this); | 2553 | dentry = d_alloc(root, &this); |
2556 | if (!dentry) | 2554 | if (!dentry) |
2557 | goto put_memory; | 2555 | goto put_memory; |
2558 | 2556 | ||
2559 | error = -ENFILE; | 2557 | error = -ENFILE; |
2560 | file = get_empty_filp(); | 2558 | file = get_empty_filp(); |
2561 | if (!file) | 2559 | if (!file) |
2562 | goto put_dentry; | 2560 | goto put_dentry; |
2563 | 2561 | ||
2564 | error = -ENOSPC; | 2562 | error = -ENOSPC; |
2565 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); | 2563 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); |
2566 | if (!inode) | 2564 | if (!inode) |
2567 | goto close_file; | 2565 | goto close_file; |
2568 | 2566 | ||
2569 | SHMEM_I(inode)->flags = flags & VM_ACCOUNT; | 2567 | SHMEM_I(inode)->flags = flags & VM_ACCOUNT; |
2570 | d_instantiate(dentry, inode); | 2568 | d_instantiate(dentry, inode); |
2571 | inode->i_size = size; | 2569 | inode->i_size = size; |
2572 | inode->i_nlink = 0; /* It is unlinked */ | 2570 | inode->i_nlink = 0; /* It is unlinked */ |
2573 | init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, | 2571 | init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, |
2574 | &shmem_file_operations); | 2572 | &shmem_file_operations); |
2575 | return file; | 2573 | return file; |
2576 | 2574 | ||
2577 | close_file: | 2575 | close_file: |
2578 | put_filp(file); | 2576 | put_filp(file); |
2579 | put_dentry: | 2577 | put_dentry: |
2580 | dput(dentry); | 2578 | dput(dentry); |
2581 | put_memory: | 2579 | put_memory: |
2582 | shmem_unacct_size(flags, size); | 2580 | shmem_unacct_size(flags, size); |
2583 | return ERR_PTR(error); | 2581 | return ERR_PTR(error); |
2584 | } | 2582 | } |
2585 | 2583 | ||
2586 | /** | 2584 | /** |
2587 | * shmem_zero_setup - setup a shared anonymous mapping | 2585 | * shmem_zero_setup - setup a shared anonymous mapping |
2588 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | 2586 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
2589 | */ | 2587 | */ |
2590 | int shmem_zero_setup(struct vm_area_struct *vma) | 2588 | int shmem_zero_setup(struct vm_area_struct *vma) |
2591 | { | 2589 | { |
2592 | struct file *file; | 2590 | struct file *file; |
2593 | loff_t size = vma->vm_end - vma->vm_start; | 2591 | loff_t size = vma->vm_end - vma->vm_start; |
2594 | 2592 | ||
2595 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); | 2593 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); |
2596 | if (IS_ERR(file)) | 2594 | if (IS_ERR(file)) |
2597 | return PTR_ERR(file); | 2595 | return PTR_ERR(file); |
2598 | 2596 | ||
2599 | if (vma->vm_file) | 2597 | if (vma->vm_file) |
2600 | fput(vma->vm_file); | 2598 | fput(vma->vm_file); |
2601 | vma->vm_file = file; | 2599 | vma->vm_file = file; |
2602 | vma->vm_ops = &shmem_vm_ops; | 2600 | vma->vm_ops = &shmem_vm_ops; |
2603 | return 0; | 2601 | return 0; |
2604 | } | 2602 | } |
security/inode.c
1 | /* | 1 | /* |
2 | * inode.c - securityfs | 2 | * inode.c - securityfs |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de> | 4 | * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License version | 7 | * modify it under the terms of the GNU General Public License version |
8 | * 2 as published by the Free Software Foundation. | 8 | * 2 as published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * Based on fs/debugfs/inode.c which had the following copyright notice: | 10 | * Based on fs/debugfs/inode.c which had the following copyright notice: |
11 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | 11 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
12 | * Copyright (C) 2004 IBM Inc. | 12 | * Copyright (C) 2004 IBM Inc. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /* #define DEBUG */ | 15 | /* #define DEBUG */ |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/mount.h> | 18 | #include <linux/mount.h> |
19 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/namei.h> | 21 | #include <linux/namei.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | 23 | #include <linux/magic.h> | |
24 | #define SECURITYFS_MAGIC 0x73636673 | ||
25 | 24 | ||
26 | static struct vfsmount *mount; | 25 | static struct vfsmount *mount; |
27 | static int mount_count; | 26 | static int mount_count; |
28 | 27 | ||
29 | /* | 28 | /* |
30 | * TODO: | 29 | * TODO: |
31 | * I think I can get rid of these default_file_ops, but not quite sure... | 30 | * I think I can get rid of these default_file_ops, but not quite sure... |
32 | */ | 31 | */ |
33 | static ssize_t default_read_file(struct file *file, char __user *buf, | 32 | static ssize_t default_read_file(struct file *file, char __user *buf, |
34 | size_t count, loff_t *ppos) | 33 | size_t count, loff_t *ppos) |
35 | { | 34 | { |
36 | return 0; | 35 | return 0; |
37 | } | 36 | } |
38 | 37 | ||
39 | static ssize_t default_write_file(struct file *file, const char __user *buf, | 38 | static ssize_t default_write_file(struct file *file, const char __user *buf, |
40 | size_t count, loff_t *ppos) | 39 | size_t count, loff_t *ppos) |
41 | { | 40 | { |
42 | return count; | 41 | return count; |
43 | } | 42 | } |
44 | 43 | ||
45 | static int default_open(struct inode *inode, struct file *file) | 44 | static int default_open(struct inode *inode, struct file *file) |
46 | { | 45 | { |
47 | if (inode->i_private) | 46 | if (inode->i_private) |
48 | file->private_data = inode->i_private; | 47 | file->private_data = inode->i_private; |
49 | 48 | ||
50 | return 0; | 49 | return 0; |
51 | } | 50 | } |
52 | 51 | ||
53 | static const struct file_operations default_file_ops = { | 52 | static const struct file_operations default_file_ops = { |
54 | .read = default_read_file, | 53 | .read = default_read_file, |
55 | .write = default_write_file, | 54 | .write = default_write_file, |
56 | .open = default_open, | 55 | .open = default_open, |
57 | }; | 56 | }; |
58 | 57 | ||
59 | static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) | 58 | static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) |
60 | { | 59 | { |
61 | struct inode *inode = new_inode(sb); | 60 | struct inode *inode = new_inode(sb); |
62 | 61 | ||
63 | if (inode) { | 62 | if (inode) { |
64 | inode->i_mode = mode; | 63 | inode->i_mode = mode; |
65 | inode->i_uid = 0; | 64 | inode->i_uid = 0; |
66 | inode->i_gid = 0; | 65 | inode->i_gid = 0; |
67 | inode->i_blocks = 0; | 66 | inode->i_blocks = 0; |
68 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 67 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
69 | switch (mode & S_IFMT) { | 68 | switch (mode & S_IFMT) { |
70 | default: | 69 | default: |
71 | init_special_inode(inode, mode, dev); | 70 | init_special_inode(inode, mode, dev); |
72 | break; | 71 | break; |
73 | case S_IFREG: | 72 | case S_IFREG: |
74 | inode->i_fop = &default_file_ops; | 73 | inode->i_fop = &default_file_ops; |
75 | break; | 74 | break; |
76 | case S_IFDIR: | 75 | case S_IFDIR: |
77 | inode->i_op = &simple_dir_inode_operations; | 76 | inode->i_op = &simple_dir_inode_operations; |
78 | inode->i_fop = &simple_dir_operations; | 77 | inode->i_fop = &simple_dir_operations; |
79 | 78 | ||
80 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | 79 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
81 | inc_nlink(inode); | 80 | inc_nlink(inode); |
82 | break; | 81 | break; |
83 | } | 82 | } |
84 | } | 83 | } |
85 | return inode; | 84 | return inode; |
86 | } | 85 | } |
87 | 86 | ||
88 | /* SMP-safe */ | 87 | /* SMP-safe */ |
89 | static int mknod(struct inode *dir, struct dentry *dentry, | 88 | static int mknod(struct inode *dir, struct dentry *dentry, |
90 | int mode, dev_t dev) | 89 | int mode, dev_t dev) |
91 | { | 90 | { |
92 | struct inode *inode; | 91 | struct inode *inode; |
93 | int error = -EPERM; | 92 | int error = -EPERM; |
94 | 93 | ||
95 | if (dentry->d_inode) | 94 | if (dentry->d_inode) |
96 | return -EEXIST; | 95 | return -EEXIST; |
97 | 96 | ||
98 | inode = get_inode(dir->i_sb, mode, dev); | 97 | inode = get_inode(dir->i_sb, mode, dev); |
99 | if (inode) { | 98 | if (inode) { |
100 | d_instantiate(dentry, inode); | 99 | d_instantiate(dentry, inode); |
101 | dget(dentry); | 100 | dget(dentry); |
102 | error = 0; | 101 | error = 0; |
103 | } | 102 | } |
104 | return error; | 103 | return error; |
105 | } | 104 | } |
106 | 105 | ||
107 | static int mkdir(struct inode *dir, struct dentry *dentry, int mode) | 106 | static int mkdir(struct inode *dir, struct dentry *dentry, int mode) |
108 | { | 107 | { |
109 | int res; | 108 | int res; |
110 | 109 | ||
111 | mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; | 110 | mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; |
112 | res = mknod(dir, dentry, mode, 0); | 111 | res = mknod(dir, dentry, mode, 0); |
113 | if (!res) | 112 | if (!res) |
114 | inc_nlink(dir); | 113 | inc_nlink(dir); |
115 | return res; | 114 | return res; |
116 | } | 115 | } |
117 | 116 | ||
118 | static int create(struct inode *dir, struct dentry *dentry, int mode) | 117 | static int create(struct inode *dir, struct dentry *dentry, int mode) |
119 | { | 118 | { |
120 | mode = (mode & S_IALLUGO) | S_IFREG; | 119 | mode = (mode & S_IALLUGO) | S_IFREG; |
121 | return mknod(dir, dentry, mode, 0); | 120 | return mknod(dir, dentry, mode, 0); |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline int positive(struct dentry *dentry) | 123 | static inline int positive(struct dentry *dentry) |
125 | { | 124 | { |
126 | return dentry->d_inode && !d_unhashed(dentry); | 125 | return dentry->d_inode && !d_unhashed(dentry); |
127 | } | 126 | } |
128 | 127 | ||
129 | static int fill_super(struct super_block *sb, void *data, int silent) | 128 | static int fill_super(struct super_block *sb, void *data, int silent) |
130 | { | 129 | { |
131 | static struct tree_descr files[] = {{""}}; | 130 | static struct tree_descr files[] = {{""}}; |
132 | 131 | ||
133 | return simple_fill_super(sb, SECURITYFS_MAGIC, files); | 132 | return simple_fill_super(sb, SECURITYFS_MAGIC, files); |
134 | } | 133 | } |
135 | 134 | ||
136 | static int get_sb(struct file_system_type *fs_type, | 135 | static int get_sb(struct file_system_type *fs_type, |
137 | int flags, const char *dev_name, | 136 | int flags, const char *dev_name, |
138 | void *data, struct vfsmount *mnt) | 137 | void *data, struct vfsmount *mnt) |
139 | { | 138 | { |
140 | return get_sb_single(fs_type, flags, data, fill_super, mnt); | 139 | return get_sb_single(fs_type, flags, data, fill_super, mnt); |
141 | } | 140 | } |
142 | 141 | ||
143 | static struct file_system_type fs_type = { | 142 | static struct file_system_type fs_type = { |
144 | .owner = THIS_MODULE, | 143 | .owner = THIS_MODULE, |
145 | .name = "securityfs", | 144 | .name = "securityfs", |
146 | .get_sb = get_sb, | 145 | .get_sb = get_sb, |
147 | .kill_sb = kill_litter_super, | 146 | .kill_sb = kill_litter_super, |
148 | }; | 147 | }; |
149 | 148 | ||
150 | static int create_by_name(const char *name, mode_t mode, | 149 | static int create_by_name(const char *name, mode_t mode, |
151 | struct dentry *parent, | 150 | struct dentry *parent, |
152 | struct dentry **dentry) | 151 | struct dentry **dentry) |
153 | { | 152 | { |
154 | int error = 0; | 153 | int error = 0; |
155 | 154 | ||
156 | *dentry = NULL; | 155 | *dentry = NULL; |
157 | 156 | ||
158 | /* If the parent is not specified, we create it in the root. | 157 | /* If the parent is not specified, we create it in the root. |
159 | * We need the root dentry to do this, which is in the super | 158 | * We need the root dentry to do this, which is in the super |
160 | * block. A pointer to that is in the struct vfsmount that we | 159 | * block. A pointer to that is in the struct vfsmount that we |
161 | * have around. | 160 | * have around. |
162 | */ | 161 | */ |
163 | if (!parent ) { | 162 | if (!parent ) { |
164 | if (mount && mount->mnt_sb) { | 163 | if (mount && mount->mnt_sb) { |
165 | parent = mount->mnt_sb->s_root; | 164 | parent = mount->mnt_sb->s_root; |
166 | } | 165 | } |
167 | } | 166 | } |
168 | if (!parent) { | 167 | if (!parent) { |
169 | pr_debug("securityfs: Ah! can not find a parent!\n"); | 168 | pr_debug("securityfs: Ah! can not find a parent!\n"); |
170 | return -EFAULT; | 169 | return -EFAULT; |
171 | } | 170 | } |
172 | 171 | ||
173 | mutex_lock(&parent->d_inode->i_mutex); | 172 | mutex_lock(&parent->d_inode->i_mutex); |
174 | *dentry = lookup_one_len(name, parent, strlen(name)); | 173 | *dentry = lookup_one_len(name, parent, strlen(name)); |
175 | if (!IS_ERR(dentry)) { | 174 | if (!IS_ERR(dentry)) { |
176 | if ((mode & S_IFMT) == S_IFDIR) | 175 | if ((mode & S_IFMT) == S_IFDIR) |
177 | error = mkdir(parent->d_inode, *dentry, mode); | 176 | error = mkdir(parent->d_inode, *dentry, mode); |
178 | else | 177 | else |
179 | error = create(parent->d_inode, *dentry, mode); | 178 | error = create(parent->d_inode, *dentry, mode); |
180 | } else | 179 | } else |
181 | error = PTR_ERR(dentry); | 180 | error = PTR_ERR(dentry); |
182 | mutex_unlock(&parent->d_inode->i_mutex); | 181 | mutex_unlock(&parent->d_inode->i_mutex); |
183 | 182 | ||
184 | return error; | 183 | return error; |
185 | } | 184 | } |
186 | 185 | ||
187 | /** | 186 | /** |
188 | * securityfs_create_file - create a file in the securityfs filesystem | 187 | * securityfs_create_file - create a file in the securityfs filesystem |
189 | * | 188 | * |
190 | * @name: a pointer to a string containing the name of the file to create. | 189 | * @name: a pointer to a string containing the name of the file to create. |
191 | * @mode: the permission that the file should have | 190 | * @mode: the permission that the file should have |
192 | * @parent: a pointer to the parent dentry for this file. This should be a | 191 | * @parent: a pointer to the parent dentry for this file. This should be a |
193 | * directory dentry if set. If this parameter is %NULL, then the | 192 | * directory dentry if set. If this parameter is %NULL, then the |
194 | * file will be created in the root of the securityfs filesystem. | 193 | * file will be created in the root of the securityfs filesystem. |
195 | * @data: a pointer to something that the caller will want to get to later | 194 | * @data: a pointer to something that the caller will want to get to later |
196 | * on. The inode.i_private pointer will point to this value on | 195 | * on. The inode.i_private pointer will point to this value on |
197 | * the open() call. | 196 | * the open() call. |
198 | * @fops: a pointer to a struct file_operations that should be used for | 197 | * @fops: a pointer to a struct file_operations that should be used for |
199 | * this file. | 198 | * this file. |
200 | * | 199 | * |
201 | * This is the basic "create a file" function for securityfs. It allows for a | 200 | * This is the basic "create a file" function for securityfs. It allows for a |
202 | * wide range of flexibility in creating a file, or a directory (if you | 201 | * wide range of flexibility in creating a file, or a directory (if you |
203 | * want to create a directory, the securityfs_create_dir() function is | 202 | * want to create a directory, the securityfs_create_dir() function is |
204 | * recommended to be used instead). | 203 | * recommended to be used instead). |
205 | * | 204 | * |
206 | * This function returns a pointer to a dentry if it succeeds. This | 205 | * This function returns a pointer to a dentry if it succeeds. This |
207 | * pointer must be passed to the securityfs_remove() function when the file is | 206 | * pointer must be passed to the securityfs_remove() function when the file is |
208 | * to be removed (no automatic cleanup happens if your module is unloaded, | 207 | * to be removed (no automatic cleanup happens if your module is unloaded, |
209 | * you are responsible here). If an error occurs, %NULL is returned. | 208 | * you are responsible here). If an error occurs, %NULL is returned. |
210 | * | 209 | * |
211 | * If securityfs is not enabled in the kernel, the value %-ENODEV is | 210 | * If securityfs is not enabled in the kernel, the value %-ENODEV is |
212 | * returned. It is not wise to check for this value, but rather, check for | 211 | * returned. It is not wise to check for this value, but rather, check for |
213 | * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling | 212 | * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling |
214 | * code. | 213 | * code. |
215 | */ | 214 | */ |
216 | struct dentry *securityfs_create_file(const char *name, mode_t mode, | 215 | struct dentry *securityfs_create_file(const char *name, mode_t mode, |
217 | struct dentry *parent, void *data, | 216 | struct dentry *parent, void *data, |
218 | const struct file_operations *fops) | 217 | const struct file_operations *fops) |
219 | { | 218 | { |
220 | struct dentry *dentry = NULL; | 219 | struct dentry *dentry = NULL; |
221 | int error; | 220 | int error; |
222 | 221 | ||
223 | pr_debug("securityfs: creating file '%s'\n",name); | 222 | pr_debug("securityfs: creating file '%s'\n",name); |
224 | 223 | ||
225 | error = simple_pin_fs(&fs_type, &mount, &mount_count); | 224 | error = simple_pin_fs(&fs_type, &mount, &mount_count); |
226 | if (error) { | 225 | if (error) { |
227 | dentry = ERR_PTR(error); | 226 | dentry = ERR_PTR(error); |
228 | goto exit; | 227 | goto exit; |
229 | } | 228 | } |
230 | 229 | ||
231 | error = create_by_name(name, mode, parent, &dentry); | 230 | error = create_by_name(name, mode, parent, &dentry); |
232 | if (error) { | 231 | if (error) { |
233 | dentry = ERR_PTR(error); | 232 | dentry = ERR_PTR(error); |
234 | simple_release_fs(&mount, &mount_count); | 233 | simple_release_fs(&mount, &mount_count); |
235 | goto exit; | 234 | goto exit; |
236 | } | 235 | } |
237 | 236 | ||
238 | if (dentry->d_inode) { | 237 | if (dentry->d_inode) { |
239 | if (fops) | 238 | if (fops) |
240 | dentry->d_inode->i_fop = fops; | 239 | dentry->d_inode->i_fop = fops; |
241 | if (data) | 240 | if (data) |
242 | dentry->d_inode->i_private = data; | 241 | dentry->d_inode->i_private = data; |
243 | } | 242 | } |
244 | exit: | 243 | exit: |
245 | return dentry; | 244 | return dentry; |
246 | } | 245 | } |
247 | EXPORT_SYMBOL_GPL(securityfs_create_file); | 246 | EXPORT_SYMBOL_GPL(securityfs_create_file); |
248 | 247 | ||
249 | /** | 248 | /** |
250 | * securityfs_create_dir - create a directory in the securityfs filesystem | 249 | * securityfs_create_dir - create a directory in the securityfs filesystem |
251 | * | 250 | * |
252 | * @name: a pointer to a string containing the name of the directory to | 251 | * @name: a pointer to a string containing the name of the directory to |
253 | * create. | 252 | * create. |
254 | * @parent: a pointer to the parent dentry for this file. This should be a | 253 | * @parent: a pointer to the parent dentry for this file. This should be a |
255 | * directory dentry if set. If this parameter is %NULL, then the | 254 | * directory dentry if set. If this parameter is %NULL, then the |
256 | * directory will be created in the root of the securityfs filesystem. | 255 | * directory will be created in the root of the securityfs filesystem. |
257 | * | 256 | * |
258 | * This function creates a directory in securityfs with the given @name. | 257 | * This function creates a directory in securityfs with the given @name. |
259 | * | 258 | * |
260 | * This function returns a pointer to a dentry if it succeeds. This | 259 | * This function returns a pointer to a dentry if it succeeds. This |
261 | * pointer must be passed to the securityfs_remove() function when the file is | 260 | * pointer must be passed to the securityfs_remove() function when the file is |
262 | * to be removed (no automatic cleanup happens if your module is unloaded, | 261 | * to be removed (no automatic cleanup happens if your module is unloaded, |
263 | * you are responsible here). If an error occurs, %NULL will be returned. | 262 | * you are responsible here). If an error occurs, %NULL will be returned. |
264 | * | 263 | * |
265 | * If securityfs is not enabled in the kernel, the value %-ENODEV is | 264 | * If securityfs is not enabled in the kernel, the value %-ENODEV is |
266 | * returned. It is not wise to check for this value, but rather, check for | 265 | * returned. It is not wise to check for this value, but rather, check for |
267 | * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling | 266 | * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling |
268 | * code. | 267 | * code. |
269 | */ | 268 | */ |
270 | struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) | 269 | struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) |
271 | { | 270 | { |
272 | return securityfs_create_file(name, | 271 | return securityfs_create_file(name, |
273 | S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, | 272 | S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, |
274 | parent, NULL, NULL); | 273 | parent, NULL, NULL); |
275 | } | 274 | } |
276 | EXPORT_SYMBOL_GPL(securityfs_create_dir); | 275 | EXPORT_SYMBOL_GPL(securityfs_create_dir); |
277 | 276 | ||
278 | /** | 277 | /** |
279 | * securityfs_remove - removes a file or directory from the securityfs filesystem | 278 | * securityfs_remove - removes a file or directory from the securityfs filesystem |
280 | * | 279 | * |
281 | * @dentry: a pointer to a the dentry of the file or directory to be removed. | 280 | * @dentry: a pointer to a the dentry of the file or directory to be removed. |
282 | * | 281 | * |
283 | * This function removes a file or directory in securityfs that was previously | 282 | * This function removes a file or directory in securityfs that was previously |
284 | * created with a call to another securityfs function (like | 283 | * created with a call to another securityfs function (like |
285 | * securityfs_create_file() or variants thereof.) | 284 | * securityfs_create_file() or variants thereof.) |
286 | * | 285 | * |
287 | * This function is required to be called in order for the file to be | 286 | * This function is required to be called in order for the file to be |
288 | * removed. No automatic cleanup of files will happen when a module is | 287 | * removed. No automatic cleanup of files will happen when a module is |
289 | * removed; you are responsible here. | 288 | * removed; you are responsible here. |
290 | */ | 289 | */ |
291 | void securityfs_remove(struct dentry *dentry) | 290 | void securityfs_remove(struct dentry *dentry) |
292 | { | 291 | { |
293 | struct dentry *parent; | 292 | struct dentry *parent; |
294 | 293 | ||
295 | if (!dentry) | 294 | if (!dentry) |
296 | return; | 295 | return; |
297 | 296 | ||
298 | parent = dentry->d_parent; | 297 | parent = dentry->d_parent; |
299 | if (!parent || !parent->d_inode) | 298 | if (!parent || !parent->d_inode) |
300 | return; | 299 | return; |
301 | 300 | ||
302 | mutex_lock(&parent->d_inode->i_mutex); | 301 | mutex_lock(&parent->d_inode->i_mutex); |
303 | if (positive(dentry)) { | 302 | if (positive(dentry)) { |
304 | if (dentry->d_inode) { | 303 | if (dentry->d_inode) { |
305 | if (S_ISDIR(dentry->d_inode->i_mode)) | 304 | if (S_ISDIR(dentry->d_inode->i_mode)) |
306 | simple_rmdir(parent->d_inode, dentry); | 305 | simple_rmdir(parent->d_inode, dentry); |
307 | else | 306 | else |
308 | simple_unlink(parent->d_inode, dentry); | 307 | simple_unlink(parent->d_inode, dentry); |
309 | dput(dentry); | 308 | dput(dentry); |
310 | } | 309 | } |
311 | } | 310 | } |
312 | mutex_unlock(&parent->d_inode->i_mutex); | 311 | mutex_unlock(&parent->d_inode->i_mutex); |
313 | simple_release_fs(&mount, &mount_count); | 312 | simple_release_fs(&mount, &mount_count); |
314 | } | 313 | } |
315 | EXPORT_SYMBOL_GPL(securityfs_remove); | 314 | EXPORT_SYMBOL_GPL(securityfs_remove); |
316 | 315 | ||
317 | static struct kobject *security_kobj; | 316 | static struct kobject *security_kobj; |
318 | 317 | ||
319 | static int __init securityfs_init(void) | 318 | static int __init securityfs_init(void) |
320 | { | 319 | { |
321 | int retval; | 320 | int retval; |
322 | 321 | ||
323 | security_kobj = kobject_create_and_add("security", kernel_kobj); | 322 | security_kobj = kobject_create_and_add("security", kernel_kobj); |
324 | if (!security_kobj) | 323 | if (!security_kobj) |
325 | return -EINVAL; | 324 | return -EINVAL; |
326 | 325 | ||
327 | retval = register_filesystem(&fs_type); | 326 | retval = register_filesystem(&fs_type); |
328 | if (retval) | 327 | if (retval) |
329 | kobject_put(security_kobj); | 328 | kobject_put(security_kobj); |
330 | return retval; | 329 | return retval; |
331 | } | 330 | } |
332 | 331 | ||
333 | core_initcall(securityfs_init); | 332 | core_initcall(securityfs_init); |
334 | MODULE_LICENSE("GPL"); | 333 | MODULE_LICENSE("GPL"); |
335 | 334 | ||
336 | 335 |