Commit b6520c81934848cef126d93951f7ce242e0f656d
Committed by
Al Viro
1 parent
2b1c6bd77d
Exists in
master
and in
7 other branches
cleanup d_add_ci
Make sure that comments describe what's going on and not how, and always use __d_instantiate instead of two separate branches, one with d_instantiate and one with __d_instantiate. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Showing 1 changed file with 18 additions and 30 deletions Inline Diff
fs/dcache.c
1 | /* | 1 | /* |
2 | * fs/dcache.c | 2 | * fs/dcache.c |
3 | * | 3 | * |
4 | * Complete reimplementation | 4 | * Complete reimplementation |
5 | * (C) 1997 Thomas Schoebel-Theuer, | 5 | * (C) 1997 Thomas Schoebel-Theuer, |
6 | * with heavy changes by Linus Torvalds | 6 | * with heavy changes by Linus Torvalds |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Notes on the allocation strategy: | 10 | * Notes on the allocation strategy: |
11 | * | 11 | * |
12 | * The dcache is a master of the icache - whenever a dcache entry | 12 | * The dcache is a master of the icache - whenever a dcache entry |
13 | * exists, the inode will always exist. "iput()" is done either when | 13 | * exists, the inode will always exist. "iput()" is done either when |
14 | * the dcache entry is deleted or garbage collected. | 14 | * the dcache entry is deleted or garbage collected. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/fdtable.h> | 20 | #include <linux/fdtable.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/fsnotify.h> | 22 | #include <linux/fsnotify.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/hash.h> | 25 | #include <linux/hash.h> |
26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/mount.h> | 28 | #include <linux/mount.h> |
29 | #include <linux/file.h> | 29 | #include <linux/file.h> |
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <linux/seqlock.h> | 32 | #include <linux/seqlock.h> |
33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
34 | #include <linux/bootmem.h> | 34 | #include <linux/bootmem.h> |
35 | #include "internal.h" | 35 | #include "internal.h" |
36 | 36 | ||
37 | int sysctl_vfs_cache_pressure __read_mostly = 100; | 37 | int sysctl_vfs_cache_pressure __read_mostly = 100; |
38 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); | 38 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); |
39 | 39 | ||
40 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); | 40 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); |
41 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); | 41 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); |
42 | 42 | ||
43 | EXPORT_SYMBOL(dcache_lock); | 43 | EXPORT_SYMBOL(dcache_lock); |
44 | 44 | ||
45 | static struct kmem_cache *dentry_cache __read_mostly; | 45 | static struct kmem_cache *dentry_cache __read_mostly; |
46 | 46 | ||
47 | #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) | 47 | #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * This is the single most critical data structure when it comes | 50 | * This is the single most critical data structure when it comes |
51 | * to the dcache: the hashtable for lookups. Somebody should try | 51 | * to the dcache: the hashtable for lookups. Somebody should try |
52 | * to make this good - I've just made it work. | 52 | * to make this good - I've just made it work. |
53 | * | 53 | * |
54 | * This hash-function tries to avoid losing too many bits of hash | 54 | * This hash-function tries to avoid losing too many bits of hash |
55 | * information, yet avoid using a prime hash-size or similar. | 55 | * information, yet avoid using a prime hash-size or similar. |
56 | */ | 56 | */ |
57 | #define D_HASHBITS d_hash_shift | 57 | #define D_HASHBITS d_hash_shift |
58 | #define D_HASHMASK d_hash_mask | 58 | #define D_HASHMASK d_hash_mask |
59 | 59 | ||
60 | static unsigned int d_hash_mask __read_mostly; | 60 | static unsigned int d_hash_mask __read_mostly; |
61 | static unsigned int d_hash_shift __read_mostly; | 61 | static unsigned int d_hash_shift __read_mostly; |
62 | static struct hlist_head *dentry_hashtable __read_mostly; | 62 | static struct hlist_head *dentry_hashtable __read_mostly; |
63 | 63 | ||
64 | /* Statistics gathering. */ | 64 | /* Statistics gathering. */ |
65 | struct dentry_stat_t dentry_stat = { | 65 | struct dentry_stat_t dentry_stat = { |
66 | .age_limit = 45, | 66 | .age_limit = 45, |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static void __d_free(struct dentry *dentry) | 69 | static void __d_free(struct dentry *dentry) |
70 | { | 70 | { |
71 | WARN_ON(!list_empty(&dentry->d_alias)); | 71 | WARN_ON(!list_empty(&dentry->d_alias)); |
72 | if (dname_external(dentry)) | 72 | if (dname_external(dentry)) |
73 | kfree(dentry->d_name.name); | 73 | kfree(dentry->d_name.name); |
74 | kmem_cache_free(dentry_cache, dentry); | 74 | kmem_cache_free(dentry_cache, dentry); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void d_callback(struct rcu_head *head) | 77 | static void d_callback(struct rcu_head *head) |
78 | { | 78 | { |
79 | struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); | 79 | struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); |
80 | __d_free(dentry); | 80 | __d_free(dentry); |
81 | } | 81 | } |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry | 84 | * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry |
85 | * inside dcache_lock. | 85 | * inside dcache_lock. |
86 | */ | 86 | */ |
87 | static void d_free(struct dentry *dentry) | 87 | static void d_free(struct dentry *dentry) |
88 | { | 88 | { |
89 | if (dentry->d_op && dentry->d_op->d_release) | 89 | if (dentry->d_op && dentry->d_op->d_release) |
90 | dentry->d_op->d_release(dentry); | 90 | dentry->d_op->d_release(dentry); |
91 | /* if dentry was never inserted into hash, immediate free is OK */ | 91 | /* if dentry was never inserted into hash, immediate free is OK */ |
92 | if (hlist_unhashed(&dentry->d_hash)) | 92 | if (hlist_unhashed(&dentry->d_hash)) |
93 | __d_free(dentry); | 93 | __d_free(dentry); |
94 | else | 94 | else |
95 | call_rcu(&dentry->d_u.d_rcu, d_callback); | 95 | call_rcu(&dentry->d_u.d_rcu, d_callback); |
96 | } | 96 | } |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * Release the dentry's inode, using the filesystem | 99 | * Release the dentry's inode, using the filesystem |
100 | * d_iput() operation if defined. | 100 | * d_iput() operation if defined. |
101 | */ | 101 | */ |
102 | static void dentry_iput(struct dentry * dentry) | 102 | static void dentry_iput(struct dentry * dentry) |
103 | __releases(dentry->d_lock) | 103 | __releases(dentry->d_lock) |
104 | __releases(dcache_lock) | 104 | __releases(dcache_lock) |
105 | { | 105 | { |
106 | struct inode *inode = dentry->d_inode; | 106 | struct inode *inode = dentry->d_inode; |
107 | if (inode) { | 107 | if (inode) { |
108 | dentry->d_inode = NULL; | 108 | dentry->d_inode = NULL; |
109 | list_del_init(&dentry->d_alias); | 109 | list_del_init(&dentry->d_alias); |
110 | spin_unlock(&dentry->d_lock); | 110 | spin_unlock(&dentry->d_lock); |
111 | spin_unlock(&dcache_lock); | 111 | spin_unlock(&dcache_lock); |
112 | if (!inode->i_nlink) | 112 | if (!inode->i_nlink) |
113 | fsnotify_inoderemove(inode); | 113 | fsnotify_inoderemove(inode); |
114 | if (dentry->d_op && dentry->d_op->d_iput) | 114 | if (dentry->d_op && dentry->d_op->d_iput) |
115 | dentry->d_op->d_iput(dentry, inode); | 115 | dentry->d_op->d_iput(dentry, inode); |
116 | else | 116 | else |
117 | iput(inode); | 117 | iput(inode); |
118 | } else { | 118 | } else { |
119 | spin_unlock(&dentry->d_lock); | 119 | spin_unlock(&dentry->d_lock); |
120 | spin_unlock(&dcache_lock); | 120 | spin_unlock(&dcache_lock); |
121 | } | 121 | } |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. | 125 | * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. |
126 | */ | 126 | */ |
127 | static void dentry_lru_add(struct dentry *dentry) | 127 | static void dentry_lru_add(struct dentry *dentry) |
128 | { | 128 | { |
129 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | 129 | list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
130 | dentry->d_sb->s_nr_dentry_unused++; | 130 | dentry->d_sb->s_nr_dentry_unused++; |
131 | dentry_stat.nr_unused++; | 131 | dentry_stat.nr_unused++; |
132 | } | 132 | } |
133 | 133 | ||
134 | static void dentry_lru_add_tail(struct dentry *dentry) | 134 | static void dentry_lru_add_tail(struct dentry *dentry) |
135 | { | 135 | { |
136 | list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); | 136 | list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); |
137 | dentry->d_sb->s_nr_dentry_unused++; | 137 | dentry->d_sb->s_nr_dentry_unused++; |
138 | dentry_stat.nr_unused++; | 138 | dentry_stat.nr_unused++; |
139 | } | 139 | } |
140 | 140 | ||
141 | static void dentry_lru_del(struct dentry *dentry) | 141 | static void dentry_lru_del(struct dentry *dentry) |
142 | { | 142 | { |
143 | if (!list_empty(&dentry->d_lru)) { | 143 | if (!list_empty(&dentry->d_lru)) { |
144 | list_del(&dentry->d_lru); | 144 | list_del(&dentry->d_lru); |
145 | dentry->d_sb->s_nr_dentry_unused--; | 145 | dentry->d_sb->s_nr_dentry_unused--; |
146 | dentry_stat.nr_unused--; | 146 | dentry_stat.nr_unused--; |
147 | } | 147 | } |
148 | } | 148 | } |
149 | 149 | ||
150 | static void dentry_lru_del_init(struct dentry *dentry) | 150 | static void dentry_lru_del_init(struct dentry *dentry) |
151 | { | 151 | { |
152 | if (likely(!list_empty(&dentry->d_lru))) { | 152 | if (likely(!list_empty(&dentry->d_lru))) { |
153 | list_del_init(&dentry->d_lru); | 153 | list_del_init(&dentry->d_lru); |
154 | dentry->d_sb->s_nr_dentry_unused--; | 154 | dentry->d_sb->s_nr_dentry_unused--; |
155 | dentry_stat.nr_unused--; | 155 | dentry_stat.nr_unused--; |
156 | } | 156 | } |
157 | } | 157 | } |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * d_kill - kill dentry and return parent | 160 | * d_kill - kill dentry and return parent |
161 | * @dentry: dentry to kill | 161 | * @dentry: dentry to kill |
162 | * | 162 | * |
163 | * The dentry must already be unhashed and removed from the LRU. | 163 | * The dentry must already be unhashed and removed from the LRU. |
164 | * | 164 | * |
165 | * If this is the root of the dentry tree, return NULL. | 165 | * If this is the root of the dentry tree, return NULL. |
166 | */ | 166 | */ |
167 | static struct dentry *d_kill(struct dentry *dentry) | 167 | static struct dentry *d_kill(struct dentry *dentry) |
168 | __releases(dentry->d_lock) | 168 | __releases(dentry->d_lock) |
169 | __releases(dcache_lock) | 169 | __releases(dcache_lock) |
170 | { | 170 | { |
171 | struct dentry *parent; | 171 | struct dentry *parent; |
172 | 172 | ||
173 | list_del(&dentry->d_u.d_child); | 173 | list_del(&dentry->d_u.d_child); |
174 | dentry_stat.nr_dentry--; /* For d_free, below */ | 174 | dentry_stat.nr_dentry--; /* For d_free, below */ |
175 | /*drops the locks, at that point nobody can reach this dentry */ | 175 | /*drops the locks, at that point nobody can reach this dentry */ |
176 | dentry_iput(dentry); | 176 | dentry_iput(dentry); |
177 | if (IS_ROOT(dentry)) | 177 | if (IS_ROOT(dentry)) |
178 | parent = NULL; | 178 | parent = NULL; |
179 | else | 179 | else |
180 | parent = dentry->d_parent; | 180 | parent = dentry->d_parent; |
181 | d_free(dentry); | 181 | d_free(dentry); |
182 | return parent; | 182 | return parent; |
183 | } | 183 | } |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * This is dput | 186 | * This is dput |
187 | * | 187 | * |
188 | * This is complicated by the fact that we do not want to put | 188 | * This is complicated by the fact that we do not want to put |
189 | * dentries that are no longer on any hash chain on the unused | 189 | * dentries that are no longer on any hash chain on the unused |
190 | * list: we'd much rather just get rid of them immediately. | 190 | * list: we'd much rather just get rid of them immediately. |
191 | * | 191 | * |
192 | * However, that implies that we have to traverse the dentry | 192 | * However, that implies that we have to traverse the dentry |
193 | * tree upwards to the parents which might _also_ now be | 193 | * tree upwards to the parents which might _also_ now be |
194 | * scheduled for deletion (it may have been only waiting for | 194 | * scheduled for deletion (it may have been only waiting for |
195 | * its last child to go away). | 195 | * its last child to go away). |
196 | * | 196 | * |
197 | * This tail recursion is done by hand as we don't want to depend | 197 | * This tail recursion is done by hand as we don't want to depend |
198 | * on the compiler to always get this right (gcc generally doesn't). | 198 | * on the compiler to always get this right (gcc generally doesn't). |
199 | * Real recursion would eat up our stack space. | 199 | * Real recursion would eat up our stack space. |
200 | */ | 200 | */ |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * dput - release a dentry | 203 | * dput - release a dentry |
204 | * @dentry: dentry to release | 204 | * @dentry: dentry to release |
205 | * | 205 | * |
206 | * Release a dentry. This will drop the usage count and if appropriate | 206 | * Release a dentry. This will drop the usage count and if appropriate |
207 | * call the dentry unlink method as well as removing it from the queues and | 207 | * call the dentry unlink method as well as removing it from the queues and |
208 | * releasing its resources. If the parent dentries were scheduled for release | 208 | * releasing its resources. If the parent dentries were scheduled for release |
209 | * they too may now get deleted. | 209 | * they too may now get deleted. |
210 | * | 210 | * |
211 | * no dcache lock, please. | 211 | * no dcache lock, please. |
212 | */ | 212 | */ |
213 | 213 | ||
214 | void dput(struct dentry *dentry) | 214 | void dput(struct dentry *dentry) |
215 | { | 215 | { |
216 | if (!dentry) | 216 | if (!dentry) |
217 | return; | 217 | return; |
218 | 218 | ||
219 | repeat: | 219 | repeat: |
220 | if (atomic_read(&dentry->d_count) == 1) | 220 | if (atomic_read(&dentry->d_count) == 1) |
221 | might_sleep(); | 221 | might_sleep(); |
222 | if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) | 222 | if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) |
223 | return; | 223 | return; |
224 | 224 | ||
225 | spin_lock(&dentry->d_lock); | 225 | spin_lock(&dentry->d_lock); |
226 | if (atomic_read(&dentry->d_count)) { | 226 | if (atomic_read(&dentry->d_count)) { |
227 | spin_unlock(&dentry->d_lock); | 227 | spin_unlock(&dentry->d_lock); |
228 | spin_unlock(&dcache_lock); | 228 | spin_unlock(&dcache_lock); |
229 | return; | 229 | return; |
230 | } | 230 | } |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * AV: ->d_delete() is _NOT_ allowed to block now. | 233 | * AV: ->d_delete() is _NOT_ allowed to block now. |
234 | */ | 234 | */ |
235 | if (dentry->d_op && dentry->d_op->d_delete) { | 235 | if (dentry->d_op && dentry->d_op->d_delete) { |
236 | if (dentry->d_op->d_delete(dentry)) | 236 | if (dentry->d_op->d_delete(dentry)) |
237 | goto unhash_it; | 237 | goto unhash_it; |
238 | } | 238 | } |
239 | /* Unreachable? Get rid of it */ | 239 | /* Unreachable? Get rid of it */ |
240 | if (d_unhashed(dentry)) | 240 | if (d_unhashed(dentry)) |
241 | goto kill_it; | 241 | goto kill_it; |
242 | if (list_empty(&dentry->d_lru)) { | 242 | if (list_empty(&dentry->d_lru)) { |
243 | dentry->d_flags |= DCACHE_REFERENCED; | 243 | dentry->d_flags |= DCACHE_REFERENCED; |
244 | dentry_lru_add(dentry); | 244 | dentry_lru_add(dentry); |
245 | } | 245 | } |
246 | spin_unlock(&dentry->d_lock); | 246 | spin_unlock(&dentry->d_lock); |
247 | spin_unlock(&dcache_lock); | 247 | spin_unlock(&dcache_lock); |
248 | return; | 248 | return; |
249 | 249 | ||
250 | unhash_it: | 250 | unhash_it: |
251 | __d_drop(dentry); | 251 | __d_drop(dentry); |
252 | kill_it: | 252 | kill_it: |
253 | /* if dentry was on the d_lru list delete it from there */ | 253 | /* if dentry was on the d_lru list delete it from there */ |
254 | dentry_lru_del(dentry); | 254 | dentry_lru_del(dentry); |
255 | dentry = d_kill(dentry); | 255 | dentry = d_kill(dentry); |
256 | if (dentry) | 256 | if (dentry) |
257 | goto repeat; | 257 | goto repeat; |
258 | } | 258 | } |
259 | 259 | ||
260 | /** | 260 | /** |
261 | * d_invalidate - invalidate a dentry | 261 | * d_invalidate - invalidate a dentry |
262 | * @dentry: dentry to invalidate | 262 | * @dentry: dentry to invalidate |
263 | * | 263 | * |
264 | * Try to invalidate the dentry if it turns out to be | 264 | * Try to invalidate the dentry if it turns out to be |
265 | * possible. If there are other dentries that can be | 265 | * possible. If there are other dentries that can be |
266 | * reached through this one we can't delete it and we | 266 | * reached through this one we can't delete it and we |
267 | * return -EBUSY. On success we return 0. | 267 | * return -EBUSY. On success we return 0. |
268 | * | 268 | * |
269 | * no dcache lock. | 269 | * no dcache lock. |
270 | */ | 270 | */ |
271 | 271 | ||
272 | int d_invalidate(struct dentry * dentry) | 272 | int d_invalidate(struct dentry * dentry) |
273 | { | 273 | { |
274 | /* | 274 | /* |
275 | * If it's already been dropped, return OK. | 275 | * If it's already been dropped, return OK. |
276 | */ | 276 | */ |
277 | spin_lock(&dcache_lock); | 277 | spin_lock(&dcache_lock); |
278 | if (d_unhashed(dentry)) { | 278 | if (d_unhashed(dentry)) { |
279 | spin_unlock(&dcache_lock); | 279 | spin_unlock(&dcache_lock); |
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | /* | 282 | /* |
283 | * Check whether to do a partial shrink_dcache | 283 | * Check whether to do a partial shrink_dcache |
284 | * to get rid of unused child entries. | 284 | * to get rid of unused child entries. |
285 | */ | 285 | */ |
286 | if (!list_empty(&dentry->d_subdirs)) { | 286 | if (!list_empty(&dentry->d_subdirs)) { |
287 | spin_unlock(&dcache_lock); | 287 | spin_unlock(&dcache_lock); |
288 | shrink_dcache_parent(dentry); | 288 | shrink_dcache_parent(dentry); |
289 | spin_lock(&dcache_lock); | 289 | spin_lock(&dcache_lock); |
290 | } | 290 | } |
291 | 291 | ||
292 | /* | 292 | /* |
293 | * Somebody else still using it? | 293 | * Somebody else still using it? |
294 | * | 294 | * |
295 | * If it's a directory, we can't drop it | 295 | * If it's a directory, we can't drop it |
296 | * for fear of somebody re-populating it | 296 | * for fear of somebody re-populating it |
297 | * with children (even though dropping it | 297 | * with children (even though dropping it |
298 | * would make it unreachable from the root, | 298 | * would make it unreachable from the root, |
299 | * we might still populate it if it was a | 299 | * we might still populate it if it was a |
300 | * working directory or similar). | 300 | * working directory or similar). |
301 | */ | 301 | */ |
302 | spin_lock(&dentry->d_lock); | 302 | spin_lock(&dentry->d_lock); |
303 | if (atomic_read(&dentry->d_count) > 1) { | 303 | if (atomic_read(&dentry->d_count) > 1) { |
304 | if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { | 304 | if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { |
305 | spin_unlock(&dentry->d_lock); | 305 | spin_unlock(&dentry->d_lock); |
306 | spin_unlock(&dcache_lock); | 306 | spin_unlock(&dcache_lock); |
307 | return -EBUSY; | 307 | return -EBUSY; |
308 | } | 308 | } |
309 | } | 309 | } |
310 | 310 | ||
311 | __d_drop(dentry); | 311 | __d_drop(dentry); |
312 | spin_unlock(&dentry->d_lock); | 312 | spin_unlock(&dentry->d_lock); |
313 | spin_unlock(&dcache_lock); | 313 | spin_unlock(&dcache_lock); |
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* This should be called _only_ with dcache_lock held */ | 317 | /* This should be called _only_ with dcache_lock held */ |
318 | 318 | ||
319 | static inline struct dentry * __dget_locked(struct dentry *dentry) | 319 | static inline struct dentry * __dget_locked(struct dentry *dentry) |
320 | { | 320 | { |
321 | atomic_inc(&dentry->d_count); | 321 | atomic_inc(&dentry->d_count); |
322 | dentry_lru_del_init(dentry); | 322 | dentry_lru_del_init(dentry); |
323 | return dentry; | 323 | return dentry; |
324 | } | 324 | } |
325 | 325 | ||
326 | struct dentry * dget_locked(struct dentry *dentry) | 326 | struct dentry * dget_locked(struct dentry *dentry) |
327 | { | 327 | { |
328 | return __dget_locked(dentry); | 328 | return __dget_locked(dentry); |
329 | } | 329 | } |
330 | 330 | ||
331 | /** | 331 | /** |
332 | * d_find_alias - grab a hashed alias of inode | 332 | * d_find_alias - grab a hashed alias of inode |
333 | * @inode: inode in question | 333 | * @inode: inode in question |
334 | * @want_discon: flag, used by d_splice_alias, to request | 334 | * @want_discon: flag, used by d_splice_alias, to request |
335 | * that only a DISCONNECTED alias be returned. | 335 | * that only a DISCONNECTED alias be returned. |
336 | * | 336 | * |
337 | * If inode has a hashed alias, or is a directory and has any alias, | 337 | * If inode has a hashed alias, or is a directory and has any alias, |
338 | * acquire the reference to alias and return it. Otherwise return NULL. | 338 | * acquire the reference to alias and return it. Otherwise return NULL. |
339 | * Notice that if inode is a directory there can be only one alias and | 339 | * Notice that if inode is a directory there can be only one alias and |
340 | * it can be unhashed only if it has no children, or if it is the root | 340 | * it can be unhashed only if it has no children, or if it is the root |
341 | * of a filesystem. | 341 | * of a filesystem. |
342 | * | 342 | * |
343 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer | 343 | * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer |
344 | * any other hashed alias over that one unless @want_discon is set, | 344 | * any other hashed alias over that one unless @want_discon is set, |
345 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. | 345 | * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. |
346 | */ | 346 | */ |
347 | 347 | ||
348 | static struct dentry * __d_find_alias(struct inode *inode, int want_discon) | 348 | static struct dentry * __d_find_alias(struct inode *inode, int want_discon) |
349 | { | 349 | { |
350 | struct list_head *head, *next, *tmp; | 350 | struct list_head *head, *next, *tmp; |
351 | struct dentry *alias, *discon_alias=NULL; | 351 | struct dentry *alias, *discon_alias=NULL; |
352 | 352 | ||
353 | head = &inode->i_dentry; | 353 | head = &inode->i_dentry; |
354 | next = inode->i_dentry.next; | 354 | next = inode->i_dentry.next; |
355 | while (next != head) { | 355 | while (next != head) { |
356 | tmp = next; | 356 | tmp = next; |
357 | next = tmp->next; | 357 | next = tmp->next; |
358 | prefetch(next); | 358 | prefetch(next); |
359 | alias = list_entry(tmp, struct dentry, d_alias); | 359 | alias = list_entry(tmp, struct dentry, d_alias); |
360 | if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { | 360 | if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { |
361 | if (IS_ROOT(alias) && | 361 | if (IS_ROOT(alias) && |
362 | (alias->d_flags & DCACHE_DISCONNECTED)) | 362 | (alias->d_flags & DCACHE_DISCONNECTED)) |
363 | discon_alias = alias; | 363 | discon_alias = alias; |
364 | else if (!want_discon) { | 364 | else if (!want_discon) { |
365 | __dget_locked(alias); | 365 | __dget_locked(alias); |
366 | return alias; | 366 | return alias; |
367 | } | 367 | } |
368 | } | 368 | } |
369 | } | 369 | } |
370 | if (discon_alias) | 370 | if (discon_alias) |
371 | __dget_locked(discon_alias); | 371 | __dget_locked(discon_alias); |
372 | return discon_alias; | 372 | return discon_alias; |
373 | } | 373 | } |
374 | 374 | ||
375 | struct dentry * d_find_alias(struct inode *inode) | 375 | struct dentry * d_find_alias(struct inode *inode) |
376 | { | 376 | { |
377 | struct dentry *de = NULL; | 377 | struct dentry *de = NULL; |
378 | 378 | ||
379 | if (!list_empty(&inode->i_dentry)) { | 379 | if (!list_empty(&inode->i_dentry)) { |
380 | spin_lock(&dcache_lock); | 380 | spin_lock(&dcache_lock); |
381 | de = __d_find_alias(inode, 0); | 381 | de = __d_find_alias(inode, 0); |
382 | spin_unlock(&dcache_lock); | 382 | spin_unlock(&dcache_lock); |
383 | } | 383 | } |
384 | return de; | 384 | return de; |
385 | } | 385 | } |
386 | 386 | ||
387 | /* | 387 | /* |
388 | * Try to kill dentries associated with this inode. | 388 | * Try to kill dentries associated with this inode. |
389 | * WARNING: you must own a reference to inode. | 389 | * WARNING: you must own a reference to inode. |
390 | */ | 390 | */ |
391 | void d_prune_aliases(struct inode *inode) | 391 | void d_prune_aliases(struct inode *inode) |
392 | { | 392 | { |
393 | struct dentry *dentry; | 393 | struct dentry *dentry; |
394 | restart: | 394 | restart: |
395 | spin_lock(&dcache_lock); | 395 | spin_lock(&dcache_lock); |
396 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { | 396 | list_for_each_entry(dentry, &inode->i_dentry, d_alias) { |
397 | spin_lock(&dentry->d_lock); | 397 | spin_lock(&dentry->d_lock); |
398 | if (!atomic_read(&dentry->d_count)) { | 398 | if (!atomic_read(&dentry->d_count)) { |
399 | __dget_locked(dentry); | 399 | __dget_locked(dentry); |
400 | __d_drop(dentry); | 400 | __d_drop(dentry); |
401 | spin_unlock(&dentry->d_lock); | 401 | spin_unlock(&dentry->d_lock); |
402 | spin_unlock(&dcache_lock); | 402 | spin_unlock(&dcache_lock); |
403 | dput(dentry); | 403 | dput(dentry); |
404 | goto restart; | 404 | goto restart; |
405 | } | 405 | } |
406 | spin_unlock(&dentry->d_lock); | 406 | spin_unlock(&dentry->d_lock); |
407 | } | 407 | } |
408 | spin_unlock(&dcache_lock); | 408 | spin_unlock(&dcache_lock); |
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * Throw away a dentry - free the inode, dput the parent. This requires that | 412 | * Throw away a dentry - free the inode, dput the parent. This requires that |
413 | * the LRU list has already been removed. | 413 | * the LRU list has already been removed. |
414 | * | 414 | * |
415 | * Try to prune ancestors as well. This is necessary to prevent | 415 | * Try to prune ancestors as well. This is necessary to prevent |
416 | * quadratic behavior of shrink_dcache_parent(), but is also expected | 416 | * quadratic behavior of shrink_dcache_parent(), but is also expected |
417 | * to be beneficial in reducing dentry cache fragmentation. | 417 | * to be beneficial in reducing dentry cache fragmentation. |
418 | */ | 418 | */ |
419 | static void prune_one_dentry(struct dentry * dentry) | 419 | static void prune_one_dentry(struct dentry * dentry) |
420 | __releases(dentry->d_lock) | 420 | __releases(dentry->d_lock) |
421 | __releases(dcache_lock) | 421 | __releases(dcache_lock) |
422 | __acquires(dcache_lock) | 422 | __acquires(dcache_lock) |
423 | { | 423 | { |
424 | __d_drop(dentry); | 424 | __d_drop(dentry); |
425 | dentry = d_kill(dentry); | 425 | dentry = d_kill(dentry); |
426 | 426 | ||
427 | /* | 427 | /* |
428 | * Prune ancestors. Locking is simpler than in dput(), | 428 | * Prune ancestors. Locking is simpler than in dput(), |
429 | * because dcache_lock needs to be taken anyway. | 429 | * because dcache_lock needs to be taken anyway. |
430 | */ | 430 | */ |
431 | spin_lock(&dcache_lock); | 431 | spin_lock(&dcache_lock); |
432 | while (dentry) { | 432 | while (dentry) { |
433 | if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) | 433 | if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) |
434 | return; | 434 | return; |
435 | 435 | ||
436 | if (dentry->d_op && dentry->d_op->d_delete) | 436 | if (dentry->d_op && dentry->d_op->d_delete) |
437 | dentry->d_op->d_delete(dentry); | 437 | dentry->d_op->d_delete(dentry); |
438 | dentry_lru_del_init(dentry); | 438 | dentry_lru_del_init(dentry); |
439 | __d_drop(dentry); | 439 | __d_drop(dentry); |
440 | dentry = d_kill(dentry); | 440 | dentry = d_kill(dentry); |
441 | spin_lock(&dcache_lock); | 441 | spin_lock(&dcache_lock); |
442 | } | 442 | } |
443 | } | 443 | } |
444 | 444 | ||
445 | /* | 445 | /* |
446 | * Shrink the dentry LRU on a given superblock. | 446 | * Shrink the dentry LRU on a given superblock. |
447 | * @sb : superblock to shrink dentry LRU. | 447 | * @sb : superblock to shrink dentry LRU. |
448 | * @count: If count is NULL, we prune all dentries on superblock. | 448 | * @count: If count is NULL, we prune all dentries on superblock. |
449 | * @flags: If flags is non-zero, we need to do special processing based on | 449 | * @flags: If flags is non-zero, we need to do special processing based on |
450 | * which flags are set. This means we don't need to maintain multiple | 450 | * which flags are set. This means we don't need to maintain multiple |
451 | * similar copies of this loop. | 451 | * similar copies of this loop. |
452 | */ | 452 | */ |
453 | static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) | 453 | static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) |
454 | { | 454 | { |
455 | LIST_HEAD(referenced); | 455 | LIST_HEAD(referenced); |
456 | LIST_HEAD(tmp); | 456 | LIST_HEAD(tmp); |
457 | struct dentry *dentry; | 457 | struct dentry *dentry; |
458 | int cnt = 0; | 458 | int cnt = 0; |
459 | 459 | ||
460 | BUG_ON(!sb); | 460 | BUG_ON(!sb); |
461 | BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); | 461 | BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); |
462 | spin_lock(&dcache_lock); | 462 | spin_lock(&dcache_lock); |
463 | if (count != NULL) | 463 | if (count != NULL) |
464 | /* called from prune_dcache() and shrink_dcache_parent() */ | 464 | /* called from prune_dcache() and shrink_dcache_parent() */ |
465 | cnt = *count; | 465 | cnt = *count; |
466 | restart: | 466 | restart: |
467 | if (count == NULL) | 467 | if (count == NULL) |
468 | list_splice_init(&sb->s_dentry_lru, &tmp); | 468 | list_splice_init(&sb->s_dentry_lru, &tmp); |
469 | else { | 469 | else { |
470 | while (!list_empty(&sb->s_dentry_lru)) { | 470 | while (!list_empty(&sb->s_dentry_lru)) { |
471 | dentry = list_entry(sb->s_dentry_lru.prev, | 471 | dentry = list_entry(sb->s_dentry_lru.prev, |
472 | struct dentry, d_lru); | 472 | struct dentry, d_lru); |
473 | BUG_ON(dentry->d_sb != sb); | 473 | BUG_ON(dentry->d_sb != sb); |
474 | 474 | ||
475 | spin_lock(&dentry->d_lock); | 475 | spin_lock(&dentry->d_lock); |
476 | /* | 476 | /* |
477 | * If we are honouring the DCACHE_REFERENCED flag and | 477 | * If we are honouring the DCACHE_REFERENCED flag and |
478 | * the dentry has this flag set, don't free it. Clear | 478 | * the dentry has this flag set, don't free it. Clear |
479 | * the flag and put it back on the LRU. | 479 | * the flag and put it back on the LRU. |
480 | */ | 480 | */ |
481 | if ((flags & DCACHE_REFERENCED) | 481 | if ((flags & DCACHE_REFERENCED) |
482 | && (dentry->d_flags & DCACHE_REFERENCED)) { | 482 | && (dentry->d_flags & DCACHE_REFERENCED)) { |
483 | dentry->d_flags &= ~DCACHE_REFERENCED; | 483 | dentry->d_flags &= ~DCACHE_REFERENCED; |
484 | list_move_tail(&dentry->d_lru, &referenced); | 484 | list_move_tail(&dentry->d_lru, &referenced); |
485 | spin_unlock(&dentry->d_lock); | 485 | spin_unlock(&dentry->d_lock); |
486 | } else { | 486 | } else { |
487 | list_move_tail(&dentry->d_lru, &tmp); | 487 | list_move_tail(&dentry->d_lru, &tmp); |
488 | spin_unlock(&dentry->d_lock); | 488 | spin_unlock(&dentry->d_lock); |
489 | cnt--; | 489 | cnt--; |
490 | if (!cnt) | 490 | if (!cnt) |
491 | break; | 491 | break; |
492 | } | 492 | } |
493 | cond_resched_lock(&dcache_lock); | 493 | cond_resched_lock(&dcache_lock); |
494 | } | 494 | } |
495 | } | 495 | } |
496 | while (!list_empty(&tmp)) { | 496 | while (!list_empty(&tmp)) { |
497 | dentry = list_entry(tmp.prev, struct dentry, d_lru); | 497 | dentry = list_entry(tmp.prev, struct dentry, d_lru); |
498 | dentry_lru_del_init(dentry); | 498 | dentry_lru_del_init(dentry); |
499 | spin_lock(&dentry->d_lock); | 499 | spin_lock(&dentry->d_lock); |
500 | /* | 500 | /* |
501 | * We found an inuse dentry which was not removed from | 501 | * We found an inuse dentry which was not removed from |
502 | * the LRU because of laziness during lookup. Do not free | 502 | * the LRU because of laziness during lookup. Do not free |
503 | * it - just keep it off the LRU list. | 503 | * it - just keep it off the LRU list. |
504 | */ | 504 | */ |
505 | if (atomic_read(&dentry->d_count)) { | 505 | if (atomic_read(&dentry->d_count)) { |
506 | spin_unlock(&dentry->d_lock); | 506 | spin_unlock(&dentry->d_lock); |
507 | continue; | 507 | continue; |
508 | } | 508 | } |
509 | prune_one_dentry(dentry); | 509 | prune_one_dentry(dentry); |
510 | /* dentry->d_lock was dropped in prune_one_dentry() */ | 510 | /* dentry->d_lock was dropped in prune_one_dentry() */ |
511 | cond_resched_lock(&dcache_lock); | 511 | cond_resched_lock(&dcache_lock); |
512 | } | 512 | } |
513 | if (count == NULL && !list_empty(&sb->s_dentry_lru)) | 513 | if (count == NULL && !list_empty(&sb->s_dentry_lru)) |
514 | goto restart; | 514 | goto restart; |
515 | if (count != NULL) | 515 | if (count != NULL) |
516 | *count = cnt; | 516 | *count = cnt; |
517 | if (!list_empty(&referenced)) | 517 | if (!list_empty(&referenced)) |
518 | list_splice(&referenced, &sb->s_dentry_lru); | 518 | list_splice(&referenced, &sb->s_dentry_lru); |
519 | spin_unlock(&dcache_lock); | 519 | spin_unlock(&dcache_lock); |
520 | } | 520 | } |
521 | 521 | ||
522 | /** | 522 | /** |
523 | * prune_dcache - shrink the dcache | 523 | * prune_dcache - shrink the dcache |
524 | * @count: number of entries to try to free | 524 | * @count: number of entries to try to free |
525 | * | 525 | * |
526 | * Shrink the dcache. This is done when we need more memory, or simply when we | 526 | * Shrink the dcache. This is done when we need more memory, or simply when we |
527 | * need to unmount something (at which point we need to unuse all dentries). | 527 | * need to unmount something (at which point we need to unuse all dentries). |
528 | * | 528 | * |
529 | * This function may fail to free any resources if all the dentries are in use. | 529 | * This function may fail to free any resources if all the dentries are in use. |
530 | */ | 530 | */ |
531 | static void prune_dcache(int count) | 531 | static void prune_dcache(int count) |
532 | { | 532 | { |
533 | struct super_block *sb; | 533 | struct super_block *sb; |
534 | int w_count; | 534 | int w_count; |
535 | int unused = dentry_stat.nr_unused; | 535 | int unused = dentry_stat.nr_unused; |
536 | int prune_ratio; | 536 | int prune_ratio; |
537 | int pruned; | 537 | int pruned; |
538 | 538 | ||
539 | if (unused == 0 || count == 0) | 539 | if (unused == 0 || count == 0) |
540 | return; | 540 | return; |
541 | spin_lock(&dcache_lock); | 541 | spin_lock(&dcache_lock); |
542 | restart: | 542 | restart: |
543 | if (count >= unused) | 543 | if (count >= unused) |
544 | prune_ratio = 1; | 544 | prune_ratio = 1; |
545 | else | 545 | else |
546 | prune_ratio = unused / count; | 546 | prune_ratio = unused / count; |
547 | spin_lock(&sb_lock); | 547 | spin_lock(&sb_lock); |
548 | list_for_each_entry(sb, &super_blocks, s_list) { | 548 | list_for_each_entry(sb, &super_blocks, s_list) { |
549 | if (sb->s_nr_dentry_unused == 0) | 549 | if (sb->s_nr_dentry_unused == 0) |
550 | continue; | 550 | continue; |
551 | sb->s_count++; | 551 | sb->s_count++; |
552 | /* Now, we reclaim unused dentrins with fairness. | 552 | /* Now, we reclaim unused dentrins with fairness. |
553 | * We reclaim them same percentage from each superblock. | 553 | * We reclaim them same percentage from each superblock. |
554 | * We calculate number of dentries to scan on this sb | 554 | * We calculate number of dentries to scan on this sb |
555 | * as follows, but the implementation is arranged to avoid | 555 | * as follows, but the implementation is arranged to avoid |
556 | * overflows: | 556 | * overflows: |
557 | * number of dentries to scan on this sb = | 557 | * number of dentries to scan on this sb = |
558 | * count * (number of dentries on this sb / | 558 | * count * (number of dentries on this sb / |
559 | * number of dentries in the machine) | 559 | * number of dentries in the machine) |
560 | */ | 560 | */ |
561 | spin_unlock(&sb_lock); | 561 | spin_unlock(&sb_lock); |
562 | if (prune_ratio != 1) | 562 | if (prune_ratio != 1) |
563 | w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; | 563 | w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; |
564 | else | 564 | else |
565 | w_count = sb->s_nr_dentry_unused; | 565 | w_count = sb->s_nr_dentry_unused; |
566 | pruned = w_count; | 566 | pruned = w_count; |
567 | /* | 567 | /* |
568 | * We need to be sure this filesystem isn't being unmounted, | 568 | * We need to be sure this filesystem isn't being unmounted, |
569 | * otherwise we could race with generic_shutdown_super(), and | 569 | * otherwise we could race with generic_shutdown_super(), and |
570 | * end up holding a reference to an inode while the filesystem | 570 | * end up holding a reference to an inode while the filesystem |
571 | * is unmounted. So we try to get s_umount, and make sure | 571 | * is unmounted. So we try to get s_umount, and make sure |
572 | * s_root isn't NULL. | 572 | * s_root isn't NULL. |
573 | */ | 573 | */ |
574 | if (down_read_trylock(&sb->s_umount)) { | 574 | if (down_read_trylock(&sb->s_umount)) { |
575 | if ((sb->s_root != NULL) && | 575 | if ((sb->s_root != NULL) && |
576 | (!list_empty(&sb->s_dentry_lru))) { | 576 | (!list_empty(&sb->s_dentry_lru))) { |
577 | spin_unlock(&dcache_lock); | 577 | spin_unlock(&dcache_lock); |
578 | __shrink_dcache_sb(sb, &w_count, | 578 | __shrink_dcache_sb(sb, &w_count, |
579 | DCACHE_REFERENCED); | 579 | DCACHE_REFERENCED); |
580 | pruned -= w_count; | 580 | pruned -= w_count; |
581 | spin_lock(&dcache_lock); | 581 | spin_lock(&dcache_lock); |
582 | } | 582 | } |
583 | up_read(&sb->s_umount); | 583 | up_read(&sb->s_umount); |
584 | } | 584 | } |
585 | spin_lock(&sb_lock); | 585 | spin_lock(&sb_lock); |
586 | count -= pruned; | 586 | count -= pruned; |
587 | /* | 587 | /* |
588 | * restart only when sb is no longer on the list and | 588 | * restart only when sb is no longer on the list and |
589 | * we have more work to do. | 589 | * we have more work to do. |
590 | */ | 590 | */ |
591 | if (__put_super_and_need_restart(sb) && count > 0) { | 591 | if (__put_super_and_need_restart(sb) && count > 0) { |
592 | spin_unlock(&sb_lock); | 592 | spin_unlock(&sb_lock); |
593 | goto restart; | 593 | goto restart; |
594 | } | 594 | } |
595 | } | 595 | } |
596 | spin_unlock(&sb_lock); | 596 | spin_unlock(&sb_lock); |
597 | spin_unlock(&dcache_lock); | 597 | spin_unlock(&dcache_lock); |
598 | } | 598 | } |
599 | 599 | ||
600 | /** | 600 | /** |
601 | * shrink_dcache_sb - shrink dcache for a superblock | 601 | * shrink_dcache_sb - shrink dcache for a superblock |
602 | * @sb: superblock | 602 | * @sb: superblock |
603 | * | 603 | * |
604 | * Shrink the dcache for the specified super block. This | 604 | * Shrink the dcache for the specified super block. This |
605 | * is used to free the dcache before unmounting a file | 605 | * is used to free the dcache before unmounting a file |
606 | * system | 606 | * system |
607 | */ | 607 | */ |
608 | void shrink_dcache_sb(struct super_block * sb) | 608 | void shrink_dcache_sb(struct super_block * sb) |
609 | { | 609 | { |
610 | __shrink_dcache_sb(sb, NULL, 0); | 610 | __shrink_dcache_sb(sb, NULL, 0); |
611 | } | 611 | } |
612 | 612 | ||
613 | /* | 613 | /* |
614 | * destroy a single subtree of dentries for unmount | 614 | * destroy a single subtree of dentries for unmount |
615 | * - see the comments on shrink_dcache_for_umount() for a description of the | 615 | * - see the comments on shrink_dcache_for_umount() for a description of the |
616 | * locking | 616 | * locking |
617 | */ | 617 | */ |
618 | static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | 618 | static void shrink_dcache_for_umount_subtree(struct dentry *dentry) |
619 | { | 619 | { |
620 | struct dentry *parent; | 620 | struct dentry *parent; |
621 | unsigned detached = 0; | 621 | unsigned detached = 0; |
622 | 622 | ||
623 | BUG_ON(!IS_ROOT(dentry)); | 623 | BUG_ON(!IS_ROOT(dentry)); |
624 | 624 | ||
625 | /* detach this root from the system */ | 625 | /* detach this root from the system */ |
626 | spin_lock(&dcache_lock); | 626 | spin_lock(&dcache_lock); |
627 | dentry_lru_del_init(dentry); | 627 | dentry_lru_del_init(dentry); |
628 | __d_drop(dentry); | 628 | __d_drop(dentry); |
629 | spin_unlock(&dcache_lock); | 629 | spin_unlock(&dcache_lock); |
630 | 630 | ||
631 | for (;;) { | 631 | for (;;) { |
632 | /* descend to the first leaf in the current subtree */ | 632 | /* descend to the first leaf in the current subtree */ |
633 | while (!list_empty(&dentry->d_subdirs)) { | 633 | while (!list_empty(&dentry->d_subdirs)) { |
634 | struct dentry *loop; | 634 | struct dentry *loop; |
635 | 635 | ||
636 | /* this is a branch with children - detach all of them | 636 | /* this is a branch with children - detach all of them |
637 | * from the system in one go */ | 637 | * from the system in one go */ |
638 | spin_lock(&dcache_lock); | 638 | spin_lock(&dcache_lock); |
639 | list_for_each_entry(loop, &dentry->d_subdirs, | 639 | list_for_each_entry(loop, &dentry->d_subdirs, |
640 | d_u.d_child) { | 640 | d_u.d_child) { |
641 | dentry_lru_del_init(loop); | 641 | dentry_lru_del_init(loop); |
642 | __d_drop(loop); | 642 | __d_drop(loop); |
643 | cond_resched_lock(&dcache_lock); | 643 | cond_resched_lock(&dcache_lock); |
644 | } | 644 | } |
645 | spin_unlock(&dcache_lock); | 645 | spin_unlock(&dcache_lock); |
646 | 646 | ||
647 | /* move to the first child */ | 647 | /* move to the first child */ |
648 | dentry = list_entry(dentry->d_subdirs.next, | 648 | dentry = list_entry(dentry->d_subdirs.next, |
649 | struct dentry, d_u.d_child); | 649 | struct dentry, d_u.d_child); |
650 | } | 650 | } |
651 | 651 | ||
652 | /* consume the dentries from this leaf up through its parents | 652 | /* consume the dentries from this leaf up through its parents |
653 | * until we find one with children or run out altogether */ | 653 | * until we find one with children or run out altogether */ |
654 | do { | 654 | do { |
655 | struct inode *inode; | 655 | struct inode *inode; |
656 | 656 | ||
657 | if (atomic_read(&dentry->d_count) != 0) { | 657 | if (atomic_read(&dentry->d_count) != 0) { |
658 | printk(KERN_ERR | 658 | printk(KERN_ERR |
659 | "BUG: Dentry %p{i=%lx,n=%s}" | 659 | "BUG: Dentry %p{i=%lx,n=%s}" |
660 | " still in use (%d)" | 660 | " still in use (%d)" |
661 | " [unmount of %s %s]\n", | 661 | " [unmount of %s %s]\n", |
662 | dentry, | 662 | dentry, |
663 | dentry->d_inode ? | 663 | dentry->d_inode ? |
664 | dentry->d_inode->i_ino : 0UL, | 664 | dentry->d_inode->i_ino : 0UL, |
665 | dentry->d_name.name, | 665 | dentry->d_name.name, |
666 | atomic_read(&dentry->d_count), | 666 | atomic_read(&dentry->d_count), |
667 | dentry->d_sb->s_type->name, | 667 | dentry->d_sb->s_type->name, |
668 | dentry->d_sb->s_id); | 668 | dentry->d_sb->s_id); |
669 | BUG(); | 669 | BUG(); |
670 | } | 670 | } |
671 | 671 | ||
672 | if (IS_ROOT(dentry)) | 672 | if (IS_ROOT(dentry)) |
673 | parent = NULL; | 673 | parent = NULL; |
674 | else { | 674 | else { |
675 | parent = dentry->d_parent; | 675 | parent = dentry->d_parent; |
676 | atomic_dec(&parent->d_count); | 676 | atomic_dec(&parent->d_count); |
677 | } | 677 | } |
678 | 678 | ||
679 | list_del(&dentry->d_u.d_child); | 679 | list_del(&dentry->d_u.d_child); |
680 | detached++; | 680 | detached++; |
681 | 681 | ||
682 | inode = dentry->d_inode; | 682 | inode = dentry->d_inode; |
683 | if (inode) { | 683 | if (inode) { |
684 | dentry->d_inode = NULL; | 684 | dentry->d_inode = NULL; |
685 | list_del_init(&dentry->d_alias); | 685 | list_del_init(&dentry->d_alias); |
686 | if (dentry->d_op && dentry->d_op->d_iput) | 686 | if (dentry->d_op && dentry->d_op->d_iput) |
687 | dentry->d_op->d_iput(dentry, inode); | 687 | dentry->d_op->d_iput(dentry, inode); |
688 | else | 688 | else |
689 | iput(inode); | 689 | iput(inode); |
690 | } | 690 | } |
691 | 691 | ||
692 | d_free(dentry); | 692 | d_free(dentry); |
693 | 693 | ||
694 | /* finished when we fall off the top of the tree, | 694 | /* finished when we fall off the top of the tree, |
695 | * otherwise we ascend to the parent and move to the | 695 | * otherwise we ascend to the parent and move to the |
696 | * next sibling if there is one */ | 696 | * next sibling if there is one */ |
697 | if (!parent) | 697 | if (!parent) |
698 | goto out; | 698 | goto out; |
699 | 699 | ||
700 | dentry = parent; | 700 | dentry = parent; |
701 | 701 | ||
702 | } while (list_empty(&dentry->d_subdirs)); | 702 | } while (list_empty(&dentry->d_subdirs)); |
703 | 703 | ||
704 | dentry = list_entry(dentry->d_subdirs.next, | 704 | dentry = list_entry(dentry->d_subdirs.next, |
705 | struct dentry, d_u.d_child); | 705 | struct dentry, d_u.d_child); |
706 | } | 706 | } |
707 | out: | 707 | out: |
708 | /* several dentries were freed, need to correct nr_dentry */ | 708 | /* several dentries were freed, need to correct nr_dentry */ |
709 | spin_lock(&dcache_lock); | 709 | spin_lock(&dcache_lock); |
710 | dentry_stat.nr_dentry -= detached; | 710 | dentry_stat.nr_dentry -= detached; |
711 | spin_unlock(&dcache_lock); | 711 | spin_unlock(&dcache_lock); |
712 | } | 712 | } |
713 | 713 | ||
714 | /* | 714 | /* |
715 | * destroy the dentries attached to a superblock on unmounting | 715 | * destroy the dentries attached to a superblock on unmounting |
716 | * - we don't need to use dentry->d_lock, and only need dcache_lock when | 716 | * - we don't need to use dentry->d_lock, and only need dcache_lock when |
717 | * removing the dentry from the system lists and hashes because: | 717 | * removing the dentry from the system lists and hashes because: |
718 | * - the superblock is detached from all mountings and open files, so the | 718 | * - the superblock is detached from all mountings and open files, so the |
719 | * dentry trees will not be rearranged by the VFS | 719 | * dentry trees will not be rearranged by the VFS |
720 | * - s_umount is write-locked, so the memory pressure shrinker will ignore | 720 | * - s_umount is write-locked, so the memory pressure shrinker will ignore |
721 | * any dentries belonging to this superblock that it comes across | 721 | * any dentries belonging to this superblock that it comes across |
722 | * - the filesystem itself is no longer permitted to rearrange the dentries | 722 | * - the filesystem itself is no longer permitted to rearrange the dentries |
723 | * in this superblock | 723 | * in this superblock |
724 | */ | 724 | */ |
725 | void shrink_dcache_for_umount(struct super_block *sb) | 725 | void shrink_dcache_for_umount(struct super_block *sb) |
726 | { | 726 | { |
727 | struct dentry *dentry; | 727 | struct dentry *dentry; |
728 | 728 | ||
729 | if (down_read_trylock(&sb->s_umount)) | 729 | if (down_read_trylock(&sb->s_umount)) |
730 | BUG(); | 730 | BUG(); |
731 | 731 | ||
732 | dentry = sb->s_root; | 732 | dentry = sb->s_root; |
733 | sb->s_root = NULL; | 733 | sb->s_root = NULL; |
734 | atomic_dec(&dentry->d_count); | 734 | atomic_dec(&dentry->d_count); |
735 | shrink_dcache_for_umount_subtree(dentry); | 735 | shrink_dcache_for_umount_subtree(dentry); |
736 | 736 | ||
737 | while (!hlist_empty(&sb->s_anon)) { | 737 | while (!hlist_empty(&sb->s_anon)) { |
738 | dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); | 738 | dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); |
739 | shrink_dcache_for_umount_subtree(dentry); | 739 | shrink_dcache_for_umount_subtree(dentry); |
740 | } | 740 | } |
741 | } | 741 | } |
742 | 742 | ||
743 | /* | 743 | /* |
744 | * Search for at least 1 mount point in the dentry's subdirs. | 744 | * Search for at least 1 mount point in the dentry's subdirs. |
745 | * We descend to the next level whenever the d_subdirs | 745 | * We descend to the next level whenever the d_subdirs |
746 | * list is non-empty and continue searching. | 746 | * list is non-empty and continue searching. |
747 | */ | 747 | */ |
748 | 748 | ||
749 | /** | 749 | /** |
750 | * have_submounts - check for mounts over a dentry | 750 | * have_submounts - check for mounts over a dentry |
751 | * @parent: dentry to check. | 751 | * @parent: dentry to check. |
752 | * | 752 | * |
753 | * Return true if the parent or its subdirectories contain | 753 | * Return true if the parent or its subdirectories contain |
754 | * a mount point | 754 | * a mount point |
755 | */ | 755 | */ |
756 | 756 | ||
757 | int have_submounts(struct dentry *parent) | 757 | int have_submounts(struct dentry *parent) |
758 | { | 758 | { |
759 | struct dentry *this_parent = parent; | 759 | struct dentry *this_parent = parent; |
760 | struct list_head *next; | 760 | struct list_head *next; |
761 | 761 | ||
762 | spin_lock(&dcache_lock); | 762 | spin_lock(&dcache_lock); |
763 | if (d_mountpoint(parent)) | 763 | if (d_mountpoint(parent)) |
764 | goto positive; | 764 | goto positive; |
765 | repeat: | 765 | repeat: |
766 | next = this_parent->d_subdirs.next; | 766 | next = this_parent->d_subdirs.next; |
767 | resume: | 767 | resume: |
768 | while (next != &this_parent->d_subdirs) { | 768 | while (next != &this_parent->d_subdirs) { |
769 | struct list_head *tmp = next; | 769 | struct list_head *tmp = next; |
770 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); | 770 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
771 | next = tmp->next; | 771 | next = tmp->next; |
772 | /* Have we found a mount point ? */ | 772 | /* Have we found a mount point ? */ |
773 | if (d_mountpoint(dentry)) | 773 | if (d_mountpoint(dentry)) |
774 | goto positive; | 774 | goto positive; |
775 | if (!list_empty(&dentry->d_subdirs)) { | 775 | if (!list_empty(&dentry->d_subdirs)) { |
776 | this_parent = dentry; | 776 | this_parent = dentry; |
777 | goto repeat; | 777 | goto repeat; |
778 | } | 778 | } |
779 | } | 779 | } |
780 | /* | 780 | /* |
781 | * All done at this level ... ascend and resume the search. | 781 | * All done at this level ... ascend and resume the search. |
782 | */ | 782 | */ |
783 | if (this_parent != parent) { | 783 | if (this_parent != parent) { |
784 | next = this_parent->d_u.d_child.next; | 784 | next = this_parent->d_u.d_child.next; |
785 | this_parent = this_parent->d_parent; | 785 | this_parent = this_parent->d_parent; |
786 | goto resume; | 786 | goto resume; |
787 | } | 787 | } |
788 | spin_unlock(&dcache_lock); | 788 | spin_unlock(&dcache_lock); |
789 | return 0; /* No mount points found in tree */ | 789 | return 0; /* No mount points found in tree */ |
790 | positive: | 790 | positive: |
791 | spin_unlock(&dcache_lock); | 791 | spin_unlock(&dcache_lock); |
792 | return 1; | 792 | return 1; |
793 | } | 793 | } |
794 | 794 | ||
795 | /* | 795 | /* |
796 | * Search the dentry child list for the specified parent, | 796 | * Search the dentry child list for the specified parent, |
797 | * and move any unused dentries to the end of the unused | 797 | * and move any unused dentries to the end of the unused |
798 | * list for prune_dcache(). We descend to the next level | 798 | * list for prune_dcache(). We descend to the next level |
799 | * whenever the d_subdirs list is non-empty and continue | 799 | * whenever the d_subdirs list is non-empty and continue |
800 | * searching. | 800 | * searching. |
801 | * | 801 | * |
802 | * It returns zero iff there are no unused children, | 802 | * It returns zero iff there are no unused children, |
803 | * otherwise it returns the number of children moved to | 803 | * otherwise it returns the number of children moved to |
804 | * the end of the unused list. This may not be the total | 804 | * the end of the unused list. This may not be the total |
805 | * number of unused children, because select_parent can | 805 | * number of unused children, because select_parent can |
806 | * drop the lock and return early due to latency | 806 | * drop the lock and return early due to latency |
807 | * constraints. | 807 | * constraints. |
808 | */ | 808 | */ |
809 | static int select_parent(struct dentry * parent) | 809 | static int select_parent(struct dentry * parent) |
810 | { | 810 | { |
811 | struct dentry *this_parent = parent; | 811 | struct dentry *this_parent = parent; |
812 | struct list_head *next; | 812 | struct list_head *next; |
813 | int found = 0; | 813 | int found = 0; |
814 | 814 | ||
815 | spin_lock(&dcache_lock); | 815 | spin_lock(&dcache_lock); |
816 | repeat: | 816 | repeat: |
817 | next = this_parent->d_subdirs.next; | 817 | next = this_parent->d_subdirs.next; |
818 | resume: | 818 | resume: |
819 | while (next != &this_parent->d_subdirs) { | 819 | while (next != &this_parent->d_subdirs) { |
820 | struct list_head *tmp = next; | 820 | struct list_head *tmp = next; |
821 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); | 821 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
822 | next = tmp->next; | 822 | next = tmp->next; |
823 | 823 | ||
824 | dentry_lru_del_init(dentry); | 824 | dentry_lru_del_init(dentry); |
825 | /* | 825 | /* |
826 | * move only zero ref count dentries to the end | 826 | * move only zero ref count dentries to the end |
827 | * of the unused list for prune_dcache | 827 | * of the unused list for prune_dcache |
828 | */ | 828 | */ |
829 | if (!atomic_read(&dentry->d_count)) { | 829 | if (!atomic_read(&dentry->d_count)) { |
830 | dentry_lru_add_tail(dentry); | 830 | dentry_lru_add_tail(dentry); |
831 | found++; | 831 | found++; |
832 | } | 832 | } |
833 | 833 | ||
834 | /* | 834 | /* |
835 | * We can return to the caller if we have found some (this | 835 | * We can return to the caller if we have found some (this |
836 | * ensures forward progress). We'll be coming back to find | 836 | * ensures forward progress). We'll be coming back to find |
837 | * the rest. | 837 | * the rest. |
838 | */ | 838 | */ |
839 | if (found && need_resched()) | 839 | if (found && need_resched()) |
840 | goto out; | 840 | goto out; |
841 | 841 | ||
842 | /* | 842 | /* |
843 | * Descend a level if the d_subdirs list is non-empty. | 843 | * Descend a level if the d_subdirs list is non-empty. |
844 | */ | 844 | */ |
845 | if (!list_empty(&dentry->d_subdirs)) { | 845 | if (!list_empty(&dentry->d_subdirs)) { |
846 | this_parent = dentry; | 846 | this_parent = dentry; |
847 | goto repeat; | 847 | goto repeat; |
848 | } | 848 | } |
849 | } | 849 | } |
850 | /* | 850 | /* |
851 | * All done at this level ... ascend and resume the search. | 851 | * All done at this level ... ascend and resume the search. |
852 | */ | 852 | */ |
853 | if (this_parent != parent) { | 853 | if (this_parent != parent) { |
854 | next = this_parent->d_u.d_child.next; | 854 | next = this_parent->d_u.d_child.next; |
855 | this_parent = this_parent->d_parent; | 855 | this_parent = this_parent->d_parent; |
856 | goto resume; | 856 | goto resume; |
857 | } | 857 | } |
858 | out: | 858 | out: |
859 | spin_unlock(&dcache_lock); | 859 | spin_unlock(&dcache_lock); |
860 | return found; | 860 | return found; |
861 | } | 861 | } |
862 | 862 | ||
863 | /** | 863 | /** |
864 | * shrink_dcache_parent - prune dcache | 864 | * shrink_dcache_parent - prune dcache |
865 | * @parent: parent of entries to prune | 865 | * @parent: parent of entries to prune |
866 | * | 866 | * |
867 | * Prune the dcache to remove unused children of the parent dentry. | 867 | * Prune the dcache to remove unused children of the parent dentry. |
868 | */ | 868 | */ |
869 | 869 | ||
870 | void shrink_dcache_parent(struct dentry * parent) | 870 | void shrink_dcache_parent(struct dentry * parent) |
871 | { | 871 | { |
872 | struct super_block *sb = parent->d_sb; | 872 | struct super_block *sb = parent->d_sb; |
873 | int found; | 873 | int found; |
874 | 874 | ||
875 | while ((found = select_parent(parent)) != 0) | 875 | while ((found = select_parent(parent)) != 0) |
876 | __shrink_dcache_sb(sb, &found, 0); | 876 | __shrink_dcache_sb(sb, &found, 0); |
877 | } | 877 | } |
878 | 878 | ||
879 | /* | 879 | /* |
880 | * Scan `nr' dentries and return the number which remain. | 880 | * Scan `nr' dentries and return the number which remain. |
881 | * | 881 | * |
882 | * We need to avoid reentering the filesystem if the caller is performing a | 882 | * We need to avoid reentering the filesystem if the caller is performing a |
883 | * GFP_NOFS allocation attempt. One example deadlock is: | 883 | * GFP_NOFS allocation attempt. One example deadlock is: |
884 | * | 884 | * |
885 | * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> | 885 | * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> |
886 | * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> | 886 | * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> |
887 | * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. | 887 | * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. |
888 | * | 888 | * |
889 | * In this case we return -1 to tell the caller that we baled. | 889 | * In this case we return -1 to tell the caller that we baled. |
890 | */ | 890 | */ |
891 | static int shrink_dcache_memory(int nr, gfp_t gfp_mask) | 891 | static int shrink_dcache_memory(int nr, gfp_t gfp_mask) |
892 | { | 892 | { |
893 | if (nr) { | 893 | if (nr) { |
894 | if (!(gfp_mask & __GFP_FS)) | 894 | if (!(gfp_mask & __GFP_FS)) |
895 | return -1; | 895 | return -1; |
896 | prune_dcache(nr); | 896 | prune_dcache(nr); |
897 | } | 897 | } |
898 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | 898 | return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; |
899 | } | 899 | } |
900 | 900 | ||
901 | static struct shrinker dcache_shrinker = { | 901 | static struct shrinker dcache_shrinker = { |
902 | .shrink = shrink_dcache_memory, | 902 | .shrink = shrink_dcache_memory, |
903 | .seeks = DEFAULT_SEEKS, | 903 | .seeks = DEFAULT_SEEKS, |
904 | }; | 904 | }; |
905 | 905 | ||
906 | /** | 906 | /** |
907 | * d_alloc - allocate a dcache entry | 907 | * d_alloc - allocate a dcache entry |
908 | * @parent: parent of entry to allocate | 908 | * @parent: parent of entry to allocate |
909 | * @name: qstr of the name | 909 | * @name: qstr of the name |
910 | * | 910 | * |
911 | * Allocates a dentry. It returns %NULL if there is insufficient memory | 911 | * Allocates a dentry. It returns %NULL if there is insufficient memory |
912 | * available. On a success the dentry is returned. The name passed in is | 912 | * available. On a success the dentry is returned. The name passed in is |
913 | * copied and the copy passed in may be reused after this call. | 913 | * copied and the copy passed in may be reused after this call. |
914 | */ | 914 | */ |
915 | 915 | ||
916 | struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | 916 | struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) |
917 | { | 917 | { |
918 | struct dentry *dentry; | 918 | struct dentry *dentry; |
919 | char *dname; | 919 | char *dname; |
920 | 920 | ||
921 | dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); | 921 | dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); |
922 | if (!dentry) | 922 | if (!dentry) |
923 | return NULL; | 923 | return NULL; |
924 | 924 | ||
925 | if (name->len > DNAME_INLINE_LEN-1) { | 925 | if (name->len > DNAME_INLINE_LEN-1) { |
926 | dname = kmalloc(name->len + 1, GFP_KERNEL); | 926 | dname = kmalloc(name->len + 1, GFP_KERNEL); |
927 | if (!dname) { | 927 | if (!dname) { |
928 | kmem_cache_free(dentry_cache, dentry); | 928 | kmem_cache_free(dentry_cache, dentry); |
929 | return NULL; | 929 | return NULL; |
930 | } | 930 | } |
931 | } else { | 931 | } else { |
932 | dname = dentry->d_iname; | 932 | dname = dentry->d_iname; |
933 | } | 933 | } |
934 | dentry->d_name.name = dname; | 934 | dentry->d_name.name = dname; |
935 | 935 | ||
936 | dentry->d_name.len = name->len; | 936 | dentry->d_name.len = name->len; |
937 | dentry->d_name.hash = name->hash; | 937 | dentry->d_name.hash = name->hash; |
938 | memcpy(dname, name->name, name->len); | 938 | memcpy(dname, name->name, name->len); |
939 | dname[name->len] = 0; | 939 | dname[name->len] = 0; |
940 | 940 | ||
941 | atomic_set(&dentry->d_count, 1); | 941 | atomic_set(&dentry->d_count, 1); |
942 | dentry->d_flags = DCACHE_UNHASHED; | 942 | dentry->d_flags = DCACHE_UNHASHED; |
943 | spin_lock_init(&dentry->d_lock); | 943 | spin_lock_init(&dentry->d_lock); |
944 | dentry->d_inode = NULL; | 944 | dentry->d_inode = NULL; |
945 | dentry->d_parent = NULL; | 945 | dentry->d_parent = NULL; |
946 | dentry->d_sb = NULL; | 946 | dentry->d_sb = NULL; |
947 | dentry->d_op = NULL; | 947 | dentry->d_op = NULL; |
948 | dentry->d_fsdata = NULL; | 948 | dentry->d_fsdata = NULL; |
949 | dentry->d_mounted = 0; | 949 | dentry->d_mounted = 0; |
950 | INIT_HLIST_NODE(&dentry->d_hash); | 950 | INIT_HLIST_NODE(&dentry->d_hash); |
951 | INIT_LIST_HEAD(&dentry->d_lru); | 951 | INIT_LIST_HEAD(&dentry->d_lru); |
952 | INIT_LIST_HEAD(&dentry->d_subdirs); | 952 | INIT_LIST_HEAD(&dentry->d_subdirs); |
953 | INIT_LIST_HEAD(&dentry->d_alias); | 953 | INIT_LIST_HEAD(&dentry->d_alias); |
954 | 954 | ||
955 | if (parent) { | 955 | if (parent) { |
956 | dentry->d_parent = dget(parent); | 956 | dentry->d_parent = dget(parent); |
957 | dentry->d_sb = parent->d_sb; | 957 | dentry->d_sb = parent->d_sb; |
958 | } else { | 958 | } else { |
959 | INIT_LIST_HEAD(&dentry->d_u.d_child); | 959 | INIT_LIST_HEAD(&dentry->d_u.d_child); |
960 | } | 960 | } |
961 | 961 | ||
962 | spin_lock(&dcache_lock); | 962 | spin_lock(&dcache_lock); |
963 | if (parent) | 963 | if (parent) |
964 | list_add(&dentry->d_u.d_child, &parent->d_subdirs); | 964 | list_add(&dentry->d_u.d_child, &parent->d_subdirs); |
965 | dentry_stat.nr_dentry++; | 965 | dentry_stat.nr_dentry++; |
966 | spin_unlock(&dcache_lock); | 966 | spin_unlock(&dcache_lock); |
967 | 967 | ||
968 | return dentry; | 968 | return dentry; |
969 | } | 969 | } |
970 | 970 | ||
971 | struct dentry *d_alloc_name(struct dentry *parent, const char *name) | 971 | struct dentry *d_alloc_name(struct dentry *parent, const char *name) |
972 | { | 972 | { |
973 | struct qstr q; | 973 | struct qstr q; |
974 | 974 | ||
975 | q.name = name; | 975 | q.name = name; |
976 | q.len = strlen(name); | 976 | q.len = strlen(name); |
977 | q.hash = full_name_hash(q.name, q.len); | 977 | q.hash = full_name_hash(q.name, q.len); |
978 | return d_alloc(parent, &q); | 978 | return d_alloc(parent, &q); |
979 | } | 979 | } |
980 | 980 | ||
981 | /* the caller must hold dcache_lock */ | 981 | /* the caller must hold dcache_lock */ |
982 | static void __d_instantiate(struct dentry *dentry, struct inode *inode) | 982 | static void __d_instantiate(struct dentry *dentry, struct inode *inode) |
983 | { | 983 | { |
984 | if (inode) | 984 | if (inode) |
985 | list_add(&dentry->d_alias, &inode->i_dentry); | 985 | list_add(&dentry->d_alias, &inode->i_dentry); |
986 | dentry->d_inode = inode; | 986 | dentry->d_inode = inode; |
987 | fsnotify_d_instantiate(dentry, inode); | 987 | fsnotify_d_instantiate(dentry, inode); |
988 | } | 988 | } |
989 | 989 | ||
990 | /** | 990 | /** |
991 | * d_instantiate - fill in inode information for a dentry | 991 | * d_instantiate - fill in inode information for a dentry |
992 | * @entry: dentry to complete | 992 | * @entry: dentry to complete |
993 | * @inode: inode to attach to this dentry | 993 | * @inode: inode to attach to this dentry |
994 | * | 994 | * |
995 | * Fill in inode information in the entry. | 995 | * Fill in inode information in the entry. |
996 | * | 996 | * |
997 | * This turns negative dentries into productive full members | 997 | * This turns negative dentries into productive full members |
998 | * of society. | 998 | * of society. |
999 | * | 999 | * |
1000 | * NOTE! This assumes that the inode count has been incremented | 1000 | * NOTE! This assumes that the inode count has been incremented |
1001 | * (or otherwise set) by the caller to indicate that it is now | 1001 | * (or otherwise set) by the caller to indicate that it is now |
1002 | * in use by the dcache. | 1002 | * in use by the dcache. |
1003 | */ | 1003 | */ |
1004 | 1004 | ||
1005 | void d_instantiate(struct dentry *entry, struct inode * inode) | 1005 | void d_instantiate(struct dentry *entry, struct inode * inode) |
1006 | { | 1006 | { |
1007 | BUG_ON(!list_empty(&entry->d_alias)); | 1007 | BUG_ON(!list_empty(&entry->d_alias)); |
1008 | spin_lock(&dcache_lock); | 1008 | spin_lock(&dcache_lock); |
1009 | __d_instantiate(entry, inode); | 1009 | __d_instantiate(entry, inode); |
1010 | spin_unlock(&dcache_lock); | 1010 | spin_unlock(&dcache_lock); |
1011 | security_d_instantiate(entry, inode); | 1011 | security_d_instantiate(entry, inode); |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | /** | 1014 | /** |
1015 | * d_instantiate_unique - instantiate a non-aliased dentry | 1015 | * d_instantiate_unique - instantiate a non-aliased dentry |
1016 | * @entry: dentry to instantiate | 1016 | * @entry: dentry to instantiate |
1017 | * @inode: inode to attach to this dentry | 1017 | * @inode: inode to attach to this dentry |
1018 | * | 1018 | * |
1019 | * Fill in inode information in the entry. On success, it returns NULL. | 1019 | * Fill in inode information in the entry. On success, it returns NULL. |
1020 | * If an unhashed alias of "entry" already exists, then we return the | 1020 | * If an unhashed alias of "entry" already exists, then we return the |
1021 | * aliased dentry instead and drop one reference to inode. | 1021 | * aliased dentry instead and drop one reference to inode. |
1022 | * | 1022 | * |
1023 | * Note that in order to avoid conflicts with rename() etc, the caller | 1023 | * Note that in order to avoid conflicts with rename() etc, the caller |
1024 | * had better be holding the parent directory semaphore. | 1024 | * had better be holding the parent directory semaphore. |
1025 | * | 1025 | * |
1026 | * This also assumes that the inode count has been incremented | 1026 | * This also assumes that the inode count has been incremented |
1027 | * (or otherwise set) by the caller to indicate that it is now | 1027 | * (or otherwise set) by the caller to indicate that it is now |
1028 | * in use by the dcache. | 1028 | * in use by the dcache. |
1029 | */ | 1029 | */ |
1030 | static struct dentry *__d_instantiate_unique(struct dentry *entry, | 1030 | static struct dentry *__d_instantiate_unique(struct dentry *entry, |
1031 | struct inode *inode) | 1031 | struct inode *inode) |
1032 | { | 1032 | { |
1033 | struct dentry *alias; | 1033 | struct dentry *alias; |
1034 | int len = entry->d_name.len; | 1034 | int len = entry->d_name.len; |
1035 | const char *name = entry->d_name.name; | 1035 | const char *name = entry->d_name.name; |
1036 | unsigned int hash = entry->d_name.hash; | 1036 | unsigned int hash = entry->d_name.hash; |
1037 | 1037 | ||
1038 | if (!inode) { | 1038 | if (!inode) { |
1039 | __d_instantiate(entry, NULL); | 1039 | __d_instantiate(entry, NULL); |
1040 | return NULL; | 1040 | return NULL; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { | 1043 | list_for_each_entry(alias, &inode->i_dentry, d_alias) { |
1044 | struct qstr *qstr = &alias->d_name; | 1044 | struct qstr *qstr = &alias->d_name; |
1045 | 1045 | ||
1046 | if (qstr->hash != hash) | 1046 | if (qstr->hash != hash) |
1047 | continue; | 1047 | continue; |
1048 | if (alias->d_parent != entry->d_parent) | 1048 | if (alias->d_parent != entry->d_parent) |
1049 | continue; | 1049 | continue; |
1050 | if (qstr->len != len) | 1050 | if (qstr->len != len) |
1051 | continue; | 1051 | continue; |
1052 | if (memcmp(qstr->name, name, len)) | 1052 | if (memcmp(qstr->name, name, len)) |
1053 | continue; | 1053 | continue; |
1054 | dget_locked(alias); | 1054 | dget_locked(alias); |
1055 | return alias; | 1055 | return alias; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | __d_instantiate(entry, inode); | 1058 | __d_instantiate(entry, inode); |
1059 | return NULL; | 1059 | return NULL; |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) | 1062 | struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) |
1063 | { | 1063 | { |
1064 | struct dentry *result; | 1064 | struct dentry *result; |
1065 | 1065 | ||
1066 | BUG_ON(!list_empty(&entry->d_alias)); | 1066 | BUG_ON(!list_empty(&entry->d_alias)); |
1067 | 1067 | ||
1068 | spin_lock(&dcache_lock); | 1068 | spin_lock(&dcache_lock); |
1069 | result = __d_instantiate_unique(entry, inode); | 1069 | result = __d_instantiate_unique(entry, inode); |
1070 | spin_unlock(&dcache_lock); | 1070 | spin_unlock(&dcache_lock); |
1071 | 1071 | ||
1072 | if (!result) { | 1072 | if (!result) { |
1073 | security_d_instantiate(entry, inode); | 1073 | security_d_instantiate(entry, inode); |
1074 | return NULL; | 1074 | return NULL; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | BUG_ON(!d_unhashed(result)); | 1077 | BUG_ON(!d_unhashed(result)); |
1078 | iput(inode); | 1078 | iput(inode); |
1079 | return result; | 1079 | return result; |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | EXPORT_SYMBOL(d_instantiate_unique); | 1082 | EXPORT_SYMBOL(d_instantiate_unique); |
1083 | 1083 | ||
1084 | /** | 1084 | /** |
1085 | * d_alloc_root - allocate root dentry | 1085 | * d_alloc_root - allocate root dentry |
1086 | * @root_inode: inode to allocate the root for | 1086 | * @root_inode: inode to allocate the root for |
1087 | * | 1087 | * |
1088 | * Allocate a root ("/") dentry for the inode given. The inode is | 1088 | * Allocate a root ("/") dentry for the inode given. The inode is |
1089 | * instantiated and returned. %NULL is returned if there is insufficient | 1089 | * instantiated and returned. %NULL is returned if there is insufficient |
1090 | * memory or the inode passed is %NULL. | 1090 | * memory or the inode passed is %NULL. |
1091 | */ | 1091 | */ |
1092 | 1092 | ||
1093 | struct dentry * d_alloc_root(struct inode * root_inode) | 1093 | struct dentry * d_alloc_root(struct inode * root_inode) |
1094 | { | 1094 | { |
1095 | struct dentry *res = NULL; | 1095 | struct dentry *res = NULL; |
1096 | 1096 | ||
1097 | if (root_inode) { | 1097 | if (root_inode) { |
1098 | static const struct qstr name = { .name = "/", .len = 1 }; | 1098 | static const struct qstr name = { .name = "/", .len = 1 }; |
1099 | 1099 | ||
1100 | res = d_alloc(NULL, &name); | 1100 | res = d_alloc(NULL, &name); |
1101 | if (res) { | 1101 | if (res) { |
1102 | res->d_sb = root_inode->i_sb; | 1102 | res->d_sb = root_inode->i_sb; |
1103 | res->d_parent = res; | 1103 | res->d_parent = res; |
1104 | d_instantiate(res, root_inode); | 1104 | d_instantiate(res, root_inode); |
1105 | } | 1105 | } |
1106 | } | 1106 | } |
1107 | return res; | 1107 | return res; |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | static inline struct hlist_head *d_hash(struct dentry *parent, | 1110 | static inline struct hlist_head *d_hash(struct dentry *parent, |
1111 | unsigned long hash) | 1111 | unsigned long hash) |
1112 | { | 1112 | { |
1113 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; | 1113 | hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; |
1114 | hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); | 1114 | hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); |
1115 | return dentry_hashtable + (hash & D_HASHMASK); | 1115 | return dentry_hashtable + (hash & D_HASHMASK); |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | /** | 1118 | /** |
1119 | * d_obtain_alias - find or allocate a dentry for a given inode | 1119 | * d_obtain_alias - find or allocate a dentry for a given inode |
1120 | * @inode: inode to allocate the dentry for | 1120 | * @inode: inode to allocate the dentry for |
1121 | * | 1121 | * |
1122 | * Obtain a dentry for an inode resulting from NFS filehandle conversion or | 1122 | * Obtain a dentry for an inode resulting from NFS filehandle conversion or |
1123 | * similar open by handle operations. The returned dentry may be anonymous, | 1123 | * similar open by handle operations. The returned dentry may be anonymous, |
1124 | * or may have a full name (if the inode was already in the cache). | 1124 | * or may have a full name (if the inode was already in the cache). |
1125 | * | 1125 | * |
1126 | * When called on a directory inode, we must ensure that the inode only ever | 1126 | * When called on a directory inode, we must ensure that the inode only ever |
1127 | * has one dentry. If a dentry is found, that is returned instead of | 1127 | * has one dentry. If a dentry is found, that is returned instead of |
1128 | * allocating a new one. | 1128 | * allocating a new one. |
1129 | * | 1129 | * |
1130 | * On successful return, the reference to the inode has been transferred | 1130 | * On successful return, the reference to the inode has been transferred |
1131 | * to the dentry. In case of an error the reference on the inode is released. | 1131 | * to the dentry. In case of an error the reference on the inode is released. |
1132 | * To make it easier to use in export operations a %NULL or IS_ERR inode may | 1132 | * To make it easier to use in export operations a %NULL or IS_ERR inode may |
1133 | * be passed in and will be the error will be propagate to the return value, | 1133 | * be passed in and will be the error will be propagate to the return value, |
1134 | * with a %NULL @inode replaced by ERR_PTR(-ESTALE). | 1134 | * with a %NULL @inode replaced by ERR_PTR(-ESTALE). |
1135 | */ | 1135 | */ |
1136 | struct dentry *d_obtain_alias(struct inode *inode) | 1136 | struct dentry *d_obtain_alias(struct inode *inode) |
1137 | { | 1137 | { |
1138 | static const struct qstr anonstring = { .name = "" }; | 1138 | static const struct qstr anonstring = { .name = "" }; |
1139 | struct dentry *tmp; | 1139 | struct dentry *tmp; |
1140 | struct dentry *res; | 1140 | struct dentry *res; |
1141 | 1141 | ||
1142 | if (!inode) | 1142 | if (!inode) |
1143 | return ERR_PTR(-ESTALE); | 1143 | return ERR_PTR(-ESTALE); |
1144 | if (IS_ERR(inode)) | 1144 | if (IS_ERR(inode)) |
1145 | return ERR_CAST(inode); | 1145 | return ERR_CAST(inode); |
1146 | 1146 | ||
1147 | res = d_find_alias(inode); | 1147 | res = d_find_alias(inode); |
1148 | if (res) | 1148 | if (res) |
1149 | goto out_iput; | 1149 | goto out_iput; |
1150 | 1150 | ||
1151 | tmp = d_alloc(NULL, &anonstring); | 1151 | tmp = d_alloc(NULL, &anonstring); |
1152 | if (!tmp) { | 1152 | if (!tmp) { |
1153 | res = ERR_PTR(-ENOMEM); | 1153 | res = ERR_PTR(-ENOMEM); |
1154 | goto out_iput; | 1154 | goto out_iput; |
1155 | } | 1155 | } |
1156 | tmp->d_parent = tmp; /* make sure dput doesn't croak */ | 1156 | tmp->d_parent = tmp; /* make sure dput doesn't croak */ |
1157 | 1157 | ||
1158 | spin_lock(&dcache_lock); | 1158 | spin_lock(&dcache_lock); |
1159 | res = __d_find_alias(inode, 0); | 1159 | res = __d_find_alias(inode, 0); |
1160 | if (res) { | 1160 | if (res) { |
1161 | spin_unlock(&dcache_lock); | 1161 | spin_unlock(&dcache_lock); |
1162 | dput(tmp); | 1162 | dput(tmp); |
1163 | goto out_iput; | 1163 | goto out_iput; |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | /* attach a disconnected dentry */ | 1166 | /* attach a disconnected dentry */ |
1167 | spin_lock(&tmp->d_lock); | 1167 | spin_lock(&tmp->d_lock); |
1168 | tmp->d_sb = inode->i_sb; | 1168 | tmp->d_sb = inode->i_sb; |
1169 | tmp->d_inode = inode; | 1169 | tmp->d_inode = inode; |
1170 | tmp->d_flags |= DCACHE_DISCONNECTED; | 1170 | tmp->d_flags |= DCACHE_DISCONNECTED; |
1171 | tmp->d_flags &= ~DCACHE_UNHASHED; | 1171 | tmp->d_flags &= ~DCACHE_UNHASHED; |
1172 | list_add(&tmp->d_alias, &inode->i_dentry); | 1172 | list_add(&tmp->d_alias, &inode->i_dentry); |
1173 | hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon); | 1173 | hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon); |
1174 | spin_unlock(&tmp->d_lock); | 1174 | spin_unlock(&tmp->d_lock); |
1175 | 1175 | ||
1176 | spin_unlock(&dcache_lock); | 1176 | spin_unlock(&dcache_lock); |
1177 | return tmp; | 1177 | return tmp; |
1178 | 1178 | ||
1179 | out_iput: | 1179 | out_iput: |
1180 | iput(inode); | 1180 | iput(inode); |
1181 | return res; | 1181 | return res; |
1182 | } | 1182 | } |
1183 | EXPORT_SYMBOL(d_obtain_alias); | 1183 | EXPORT_SYMBOL(d_obtain_alias); |
1184 | 1184 | ||
1185 | /** | 1185 | /** |
1186 | * d_splice_alias - splice a disconnected dentry into the tree if one exists | 1186 | * d_splice_alias - splice a disconnected dentry into the tree if one exists |
1187 | * @inode: the inode which may have a disconnected dentry | 1187 | * @inode: the inode which may have a disconnected dentry |
1188 | * @dentry: a negative dentry which we want to point to the inode. | 1188 | * @dentry: a negative dentry which we want to point to the inode. |
1189 | * | 1189 | * |
1190 | * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and | 1190 | * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and |
1191 | * DCACHE_DISCONNECTED), then d_move that in place of the given dentry | 1191 | * DCACHE_DISCONNECTED), then d_move that in place of the given dentry |
1192 | * and return it, else simply d_add the inode to the dentry and return NULL. | 1192 | * and return it, else simply d_add the inode to the dentry and return NULL. |
1193 | * | 1193 | * |
1194 | * This is needed in the lookup routine of any filesystem that is exportable | 1194 | * This is needed in the lookup routine of any filesystem that is exportable |
1195 | * (via knfsd) so that we can build dcache paths to directories effectively. | 1195 | * (via knfsd) so that we can build dcache paths to directories effectively. |
1196 | * | 1196 | * |
1197 | * If a dentry was found and moved, then it is returned. Otherwise NULL | 1197 | * If a dentry was found and moved, then it is returned. Otherwise NULL |
1198 | * is returned. This matches the expected return value of ->lookup. | 1198 | * is returned. This matches the expected return value of ->lookup. |
1199 | * | 1199 | * |
1200 | */ | 1200 | */ |
1201 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | 1201 | struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) |
1202 | { | 1202 | { |
1203 | struct dentry *new = NULL; | 1203 | struct dentry *new = NULL; |
1204 | 1204 | ||
1205 | if (inode && S_ISDIR(inode->i_mode)) { | 1205 | if (inode && S_ISDIR(inode->i_mode)) { |
1206 | spin_lock(&dcache_lock); | 1206 | spin_lock(&dcache_lock); |
1207 | new = __d_find_alias(inode, 1); | 1207 | new = __d_find_alias(inode, 1); |
1208 | if (new) { | 1208 | if (new) { |
1209 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); | 1209 | BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); |
1210 | spin_unlock(&dcache_lock); | 1210 | spin_unlock(&dcache_lock); |
1211 | security_d_instantiate(new, inode); | 1211 | security_d_instantiate(new, inode); |
1212 | d_rehash(dentry); | 1212 | d_rehash(dentry); |
1213 | d_move(new, dentry); | 1213 | d_move(new, dentry); |
1214 | iput(inode); | 1214 | iput(inode); |
1215 | } else { | 1215 | } else { |
1216 | /* already taking dcache_lock, so d_add() by hand */ | 1216 | /* already taking dcache_lock, so d_add() by hand */ |
1217 | __d_instantiate(dentry, inode); | 1217 | __d_instantiate(dentry, inode); |
1218 | spin_unlock(&dcache_lock); | 1218 | spin_unlock(&dcache_lock); |
1219 | security_d_instantiate(dentry, inode); | 1219 | security_d_instantiate(dentry, inode); |
1220 | d_rehash(dentry); | 1220 | d_rehash(dentry); |
1221 | } | 1221 | } |
1222 | } else | 1222 | } else |
1223 | d_add(dentry, inode); | 1223 | d_add(dentry, inode); |
1224 | return new; | 1224 | return new; |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | /** | 1227 | /** |
1228 | * d_add_ci - lookup or allocate new dentry with case-exact name | 1228 | * d_add_ci - lookup or allocate new dentry with case-exact name |
1229 | * @inode: the inode case-insensitive lookup has found | 1229 | * @inode: the inode case-insensitive lookup has found |
1230 | * @dentry: the negative dentry that was passed to the parent's lookup func | 1230 | * @dentry: the negative dentry that was passed to the parent's lookup func |
1231 | * @name: the case-exact name to be associated with the returned dentry | 1231 | * @name: the case-exact name to be associated with the returned dentry |
1232 | * | 1232 | * |
1233 | * This is to avoid filling the dcache with case-insensitive names to the | 1233 | * This is to avoid filling the dcache with case-insensitive names to the |
1234 | * same inode, only the actual correct case is stored in the dcache for | 1234 | * same inode, only the actual correct case is stored in the dcache for |
1235 | * case-insensitive filesystems. | 1235 | * case-insensitive filesystems. |
1236 | * | 1236 | * |
1237 | * For a case-insensitive lookup match and if the the case-exact dentry | 1237 | * For a case-insensitive lookup match and if the the case-exact dentry |
1238 | * already exists in in the dcache, use it and return it. | 1238 | * already exists in in the dcache, use it and return it. |
1239 | * | 1239 | * |
1240 | * If no entry exists with the exact case name, allocate new dentry with | 1240 | * If no entry exists with the exact case name, allocate new dentry with |
1241 | * the exact case, and return the spliced entry. | 1241 | * the exact case, and return the spliced entry. |
1242 | */ | 1242 | */ |
1243 | struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, | 1243 | struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, |
1244 | struct qstr *name) | 1244 | struct qstr *name) |
1245 | { | 1245 | { |
1246 | int error; | 1246 | int error; |
1247 | struct dentry *found; | 1247 | struct dentry *found; |
1248 | struct dentry *new; | 1248 | struct dentry *new; |
1249 | 1249 | ||
1250 | /* Does a dentry matching the name exist already? */ | 1250 | /* |
1251 | * First check if a dentry matching the name already exists, | ||
1252 | * if not go ahead and create it now. | ||
1253 | */ | ||
1251 | found = d_hash_and_lookup(dentry->d_parent, name); | 1254 | found = d_hash_and_lookup(dentry->d_parent, name); |
1252 | /* If not, create it now and return */ | ||
1253 | if (!found) { | 1255 | if (!found) { |
1254 | new = d_alloc(dentry->d_parent, name); | 1256 | new = d_alloc(dentry->d_parent, name); |
1255 | if (!new) { | 1257 | if (!new) { |
1256 | error = -ENOMEM; | 1258 | error = -ENOMEM; |
1257 | goto err_out; | 1259 | goto err_out; |
1258 | } | 1260 | } |
1261 | |||
1259 | found = d_splice_alias(inode, new); | 1262 | found = d_splice_alias(inode, new); |
1260 | if (found) { | 1263 | if (found) { |
1261 | dput(new); | 1264 | dput(new); |
1262 | return found; | 1265 | return found; |
1263 | } | 1266 | } |
1264 | return new; | 1267 | return new; |
1265 | } | 1268 | } |
1266 | /* Matching dentry exists, check if it is negative. */ | 1269 | |
1270 | /* | ||
1271 | * If a matching dentry exists, and it's not negative use it. | ||
1272 | * | ||
1273 | * Decrement the reference count to balance the iget() done | ||
1274 | * earlier on. | ||
1275 | */ | ||
1267 | if (found->d_inode) { | 1276 | if (found->d_inode) { |
1268 | if (unlikely(found->d_inode != inode)) { | 1277 | if (unlikely(found->d_inode != inode)) { |
1269 | /* This can't happen because bad inodes are unhashed. */ | 1278 | /* This can't happen because bad inodes are unhashed. */ |
1270 | BUG_ON(!is_bad_inode(inode)); | 1279 | BUG_ON(!is_bad_inode(inode)); |
1271 | BUG_ON(!is_bad_inode(found->d_inode)); | 1280 | BUG_ON(!is_bad_inode(found->d_inode)); |
1272 | } | 1281 | } |
1273 | /* | ||
1274 | * Already have the inode and the dentry attached, decrement | ||
1275 | * the reference count to balance the iget() done | ||
1276 | * earlier on. We found the dentry using d_lookup() so it | ||
1277 | * cannot be disconnected and thus we do not need to worry | ||
1278 | * about any NFS/disconnectedness issues here. | ||
1279 | */ | ||
1280 | iput(inode); | 1282 | iput(inode); |
1281 | return found; | 1283 | return found; |
1282 | } | 1284 | } |
1285 | |||
1283 | /* | 1286 | /* |
1284 | * Negative dentry: instantiate it unless the inode is a directory and | 1287 | * Negative dentry: instantiate it unless the inode is a directory and |
1285 | * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED), | 1288 | * already has a dentry. |
1286 | * in which case d_move() that in place of the found dentry. | ||
1287 | */ | 1289 | */ |
1288 | if (!S_ISDIR(inode->i_mode)) { | ||
1289 | /* Not a directory; everything is easy. */ | ||
1290 | d_instantiate(found, inode); | ||
1291 | return found; | ||
1292 | } | ||
1293 | spin_lock(&dcache_lock); | 1290 | spin_lock(&dcache_lock); |
1294 | if (list_empty(&inode->i_dentry)) { | 1291 | if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { |
1295 | /* | ||
1296 | * Directory without a 'disconnected' dentry; we need to do | ||
1297 | * d_instantiate() by hand because it takes dcache_lock which | ||
1298 | * we already hold. | ||
1299 | */ | ||
1300 | __d_instantiate(found, inode); | 1292 | __d_instantiate(found, inode); |
1301 | spin_unlock(&dcache_lock); | 1293 | spin_unlock(&dcache_lock); |
1302 | security_d_instantiate(found, inode); | 1294 | security_d_instantiate(found, inode); |
1303 | return found; | 1295 | return found; |
1304 | } | 1296 | } |
1297 | |||
1305 | /* | 1298 | /* |
1306 | * Directory with a 'disconnected' dentry; get a reference to the | 1299 | * In case a directory already has a (disconnected) entry grab a |
1307 | * 'disconnected' dentry. | 1300 | * reference to it, move it in place and use it. |
1308 | */ | 1301 | */ |
1309 | new = list_entry(inode->i_dentry.next, struct dentry, d_alias); | 1302 | new = list_entry(inode->i_dentry.next, struct dentry, d_alias); |
1310 | dget_locked(new); | 1303 | dget_locked(new); |
1311 | spin_unlock(&dcache_lock); | 1304 | spin_unlock(&dcache_lock); |
1312 | /* Do security vodoo. */ | ||
1313 | security_d_instantiate(found, inode); | 1305 | security_d_instantiate(found, inode); |
1314 | /* Move new in place of found. */ | ||
1315 | d_move(new, found); | 1306 | d_move(new, found); |
1316 | /* Balance the iget() we did above. */ | ||
1317 | iput(inode); | 1307 | iput(inode); |
1318 | /* Throw away found. */ | ||
1319 | dput(found); | 1308 | dput(found); |
1320 | /* Use new as the actual dentry. */ | ||
1321 | return new; | 1309 | return new; |
1322 | 1310 | ||
1323 | err_out: | 1311 | err_out: |
1324 | iput(inode); | 1312 | iput(inode); |
1325 | return ERR_PTR(error); | 1313 | return ERR_PTR(error); |
1326 | } | 1314 | } |
1327 | 1315 | ||
1328 | /** | 1316 | /** |
1329 | * d_lookup - search for a dentry | 1317 | * d_lookup - search for a dentry |
1330 | * @parent: parent dentry | 1318 | * @parent: parent dentry |
1331 | * @name: qstr of name we wish to find | 1319 | * @name: qstr of name we wish to find |
1332 | * | 1320 | * |
1333 | * Searches the children of the parent dentry for the name in question. If | 1321 | * Searches the children of the parent dentry for the name in question. If |
1334 | * the dentry is found its reference count is incremented and the dentry | 1322 | * the dentry is found its reference count is incremented and the dentry |
1335 | * is returned. The caller must use dput to free the entry when it has | 1323 | * is returned. The caller must use dput to free the entry when it has |
1336 | * finished using it. %NULL is returned on failure. | 1324 | * finished using it. %NULL is returned on failure. |
1337 | * | 1325 | * |
1338 | * __d_lookup is dcache_lock free. The hash list is protected using RCU. | 1326 | * __d_lookup is dcache_lock free. The hash list is protected using RCU. |
1339 | * Memory barriers are used while updating and doing lockless traversal. | 1327 | * Memory barriers are used while updating and doing lockless traversal. |
1340 | * To avoid races with d_move while rename is happening, d_lock is used. | 1328 | * To avoid races with d_move while rename is happening, d_lock is used. |
1341 | * | 1329 | * |
1342 | * Overflows in memcmp(), while d_move, are avoided by keeping the length | 1330 | * Overflows in memcmp(), while d_move, are avoided by keeping the length |
1343 | * and name pointer in one structure pointed by d_qstr. | 1331 | * and name pointer in one structure pointed by d_qstr. |
1344 | * | 1332 | * |
1345 | * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while | 1333 | * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while |
1346 | * lookup is going on. | 1334 | * lookup is going on. |
1347 | * | 1335 | * |
1348 | * The dentry unused LRU is not updated even if lookup finds the required dentry | 1336 | * The dentry unused LRU is not updated even if lookup finds the required dentry |
1349 | * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, | 1337 | * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, |
1350 | * select_parent and __dget_locked. This laziness saves lookup from dcache_lock | 1338 | * select_parent and __dget_locked. This laziness saves lookup from dcache_lock |
1351 | * acquisition. | 1339 | * acquisition. |
1352 | * | 1340 | * |
1353 | * d_lookup() is protected against the concurrent renames in some unrelated | 1341 | * d_lookup() is protected against the concurrent renames in some unrelated |
1354 | * directory using the seqlockt_t rename_lock. | 1342 | * directory using the seqlockt_t rename_lock. |
1355 | */ | 1343 | */ |
1356 | 1344 | ||
1357 | struct dentry * d_lookup(struct dentry * parent, struct qstr * name) | 1345 | struct dentry * d_lookup(struct dentry * parent, struct qstr * name) |
1358 | { | 1346 | { |
1359 | struct dentry * dentry = NULL; | 1347 | struct dentry * dentry = NULL; |
1360 | unsigned long seq; | 1348 | unsigned long seq; |
1361 | 1349 | ||
1362 | do { | 1350 | do { |
1363 | seq = read_seqbegin(&rename_lock); | 1351 | seq = read_seqbegin(&rename_lock); |
1364 | dentry = __d_lookup(parent, name); | 1352 | dentry = __d_lookup(parent, name); |
1365 | if (dentry) | 1353 | if (dentry) |
1366 | break; | 1354 | break; |
1367 | } while (read_seqretry(&rename_lock, seq)); | 1355 | } while (read_seqretry(&rename_lock, seq)); |
1368 | return dentry; | 1356 | return dentry; |
1369 | } | 1357 | } |
1370 | 1358 | ||
1371 | struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | 1359 | struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) |
1372 | { | 1360 | { |
1373 | unsigned int len = name->len; | 1361 | unsigned int len = name->len; |
1374 | unsigned int hash = name->hash; | 1362 | unsigned int hash = name->hash; |
1375 | const unsigned char *str = name->name; | 1363 | const unsigned char *str = name->name; |
1376 | struct hlist_head *head = d_hash(parent,hash); | 1364 | struct hlist_head *head = d_hash(parent,hash); |
1377 | struct dentry *found = NULL; | 1365 | struct dentry *found = NULL; |
1378 | struct hlist_node *node; | 1366 | struct hlist_node *node; |
1379 | struct dentry *dentry; | 1367 | struct dentry *dentry; |
1380 | 1368 | ||
1381 | rcu_read_lock(); | 1369 | rcu_read_lock(); |
1382 | 1370 | ||
1383 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { | 1371 | hlist_for_each_entry_rcu(dentry, node, head, d_hash) { |
1384 | struct qstr *qstr; | 1372 | struct qstr *qstr; |
1385 | 1373 | ||
1386 | if (dentry->d_name.hash != hash) | 1374 | if (dentry->d_name.hash != hash) |
1387 | continue; | 1375 | continue; |
1388 | if (dentry->d_parent != parent) | 1376 | if (dentry->d_parent != parent) |
1389 | continue; | 1377 | continue; |
1390 | 1378 | ||
1391 | spin_lock(&dentry->d_lock); | 1379 | spin_lock(&dentry->d_lock); |
1392 | 1380 | ||
1393 | /* | 1381 | /* |
1394 | * Recheck the dentry after taking the lock - d_move may have | 1382 | * Recheck the dentry after taking the lock - d_move may have |
1395 | * changed things. Don't bother checking the hash because we're | 1383 | * changed things. Don't bother checking the hash because we're |
1396 | * about to compare the whole name anyway. | 1384 | * about to compare the whole name anyway. |
1397 | */ | 1385 | */ |
1398 | if (dentry->d_parent != parent) | 1386 | if (dentry->d_parent != parent) |
1399 | goto next; | 1387 | goto next; |
1400 | 1388 | ||
1401 | /* non-existing due to RCU? */ | 1389 | /* non-existing due to RCU? */ |
1402 | if (d_unhashed(dentry)) | 1390 | if (d_unhashed(dentry)) |
1403 | goto next; | 1391 | goto next; |
1404 | 1392 | ||
1405 | /* | 1393 | /* |
1406 | * It is safe to compare names since d_move() cannot | 1394 | * It is safe to compare names since d_move() cannot |
1407 | * change the qstr (protected by d_lock). | 1395 | * change the qstr (protected by d_lock). |
1408 | */ | 1396 | */ |
1409 | qstr = &dentry->d_name; | 1397 | qstr = &dentry->d_name; |
1410 | if (parent->d_op && parent->d_op->d_compare) { | 1398 | if (parent->d_op && parent->d_op->d_compare) { |
1411 | if (parent->d_op->d_compare(parent, qstr, name)) | 1399 | if (parent->d_op->d_compare(parent, qstr, name)) |
1412 | goto next; | 1400 | goto next; |
1413 | } else { | 1401 | } else { |
1414 | if (qstr->len != len) | 1402 | if (qstr->len != len) |
1415 | goto next; | 1403 | goto next; |
1416 | if (memcmp(qstr->name, str, len)) | 1404 | if (memcmp(qstr->name, str, len)) |
1417 | goto next; | 1405 | goto next; |
1418 | } | 1406 | } |
1419 | 1407 | ||
1420 | atomic_inc(&dentry->d_count); | 1408 | atomic_inc(&dentry->d_count); |
1421 | found = dentry; | 1409 | found = dentry; |
1422 | spin_unlock(&dentry->d_lock); | 1410 | spin_unlock(&dentry->d_lock); |
1423 | break; | 1411 | break; |
1424 | next: | 1412 | next: |
1425 | spin_unlock(&dentry->d_lock); | 1413 | spin_unlock(&dentry->d_lock); |
1426 | } | 1414 | } |
1427 | rcu_read_unlock(); | 1415 | rcu_read_unlock(); |
1428 | 1416 | ||
1429 | return found; | 1417 | return found; |
1430 | } | 1418 | } |
1431 | 1419 | ||
1432 | /** | 1420 | /** |
1433 | * d_hash_and_lookup - hash the qstr then search for a dentry | 1421 | * d_hash_and_lookup - hash the qstr then search for a dentry |
1434 | * @dir: Directory to search in | 1422 | * @dir: Directory to search in |
1435 | * @name: qstr of name we wish to find | 1423 | * @name: qstr of name we wish to find |
1436 | * | 1424 | * |
1437 | * On hash failure or on lookup failure NULL is returned. | 1425 | * On hash failure or on lookup failure NULL is returned. |
1438 | */ | 1426 | */ |
1439 | struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) | 1427 | struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) |
1440 | { | 1428 | { |
1441 | struct dentry *dentry = NULL; | 1429 | struct dentry *dentry = NULL; |
1442 | 1430 | ||
1443 | /* | 1431 | /* |
1444 | * Check for a fs-specific hash function. Note that we must | 1432 | * Check for a fs-specific hash function. Note that we must |
1445 | * calculate the standard hash first, as the d_op->d_hash() | 1433 | * calculate the standard hash first, as the d_op->d_hash() |
1446 | * routine may choose to leave the hash value unchanged. | 1434 | * routine may choose to leave the hash value unchanged. |
1447 | */ | 1435 | */ |
1448 | name->hash = full_name_hash(name->name, name->len); | 1436 | name->hash = full_name_hash(name->name, name->len); |
1449 | if (dir->d_op && dir->d_op->d_hash) { | 1437 | if (dir->d_op && dir->d_op->d_hash) { |
1450 | if (dir->d_op->d_hash(dir, name) < 0) | 1438 | if (dir->d_op->d_hash(dir, name) < 0) |
1451 | goto out; | 1439 | goto out; |
1452 | } | 1440 | } |
1453 | dentry = d_lookup(dir, name); | 1441 | dentry = d_lookup(dir, name); |
1454 | out: | 1442 | out: |
1455 | return dentry; | 1443 | return dentry; |
1456 | } | 1444 | } |
1457 | 1445 | ||
1458 | /** | 1446 | /** |
1459 | * d_validate - verify dentry provided from insecure source | 1447 | * d_validate - verify dentry provided from insecure source |
1460 | * @dentry: The dentry alleged to be valid child of @dparent | 1448 | * @dentry: The dentry alleged to be valid child of @dparent |
1461 | * @dparent: The parent dentry (known to be valid) | 1449 | * @dparent: The parent dentry (known to be valid) |
1462 | * | 1450 | * |
1463 | * An insecure source has sent us a dentry, here we verify it and dget() it. | 1451 | * An insecure source has sent us a dentry, here we verify it and dget() it. |
1464 | * This is used by ncpfs in its readdir implementation. | 1452 | * This is used by ncpfs in its readdir implementation. |
1465 | * Zero is returned in the dentry is invalid. | 1453 | * Zero is returned in the dentry is invalid. |
1466 | */ | 1454 | */ |
1467 | 1455 | ||
1468 | int d_validate(struct dentry *dentry, struct dentry *dparent) | 1456 | int d_validate(struct dentry *dentry, struct dentry *dparent) |
1469 | { | 1457 | { |
1470 | struct hlist_head *base; | 1458 | struct hlist_head *base; |
1471 | struct hlist_node *lhp; | 1459 | struct hlist_node *lhp; |
1472 | 1460 | ||
1473 | /* Check whether the ptr might be valid at all.. */ | 1461 | /* Check whether the ptr might be valid at all.. */ |
1474 | if (!kmem_ptr_validate(dentry_cache, dentry)) | 1462 | if (!kmem_ptr_validate(dentry_cache, dentry)) |
1475 | goto out; | 1463 | goto out; |
1476 | 1464 | ||
1477 | if (dentry->d_parent != dparent) | 1465 | if (dentry->d_parent != dparent) |
1478 | goto out; | 1466 | goto out; |
1479 | 1467 | ||
1480 | spin_lock(&dcache_lock); | 1468 | spin_lock(&dcache_lock); |
1481 | base = d_hash(dparent, dentry->d_name.hash); | 1469 | base = d_hash(dparent, dentry->d_name.hash); |
1482 | hlist_for_each(lhp,base) { | 1470 | hlist_for_each(lhp,base) { |
1483 | /* hlist_for_each_entry_rcu() not required for d_hash list | 1471 | /* hlist_for_each_entry_rcu() not required for d_hash list |
1484 | * as it is parsed under dcache_lock | 1472 | * as it is parsed under dcache_lock |
1485 | */ | 1473 | */ |
1486 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { | 1474 | if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { |
1487 | __dget_locked(dentry); | 1475 | __dget_locked(dentry); |
1488 | spin_unlock(&dcache_lock); | 1476 | spin_unlock(&dcache_lock); |
1489 | return 1; | 1477 | return 1; |
1490 | } | 1478 | } |
1491 | } | 1479 | } |
1492 | spin_unlock(&dcache_lock); | 1480 | spin_unlock(&dcache_lock); |
1493 | out: | 1481 | out: |
1494 | return 0; | 1482 | return 0; |
1495 | } | 1483 | } |
1496 | 1484 | ||
1497 | /* | 1485 | /* |
1498 | * When a file is deleted, we have two options: | 1486 | * When a file is deleted, we have two options: |
1499 | * - turn this dentry into a negative dentry | 1487 | * - turn this dentry into a negative dentry |
1500 | * - unhash this dentry and free it. | 1488 | * - unhash this dentry and free it. |
1501 | * | 1489 | * |
1502 | * Usually, we want to just turn this into | 1490 | * Usually, we want to just turn this into |
1503 | * a negative dentry, but if anybody else is | 1491 | * a negative dentry, but if anybody else is |
1504 | * currently using the dentry or the inode | 1492 | * currently using the dentry or the inode |
1505 | * we can't do that and we fall back on removing | 1493 | * we can't do that and we fall back on removing |
1506 | * it from the hash queues and waiting for | 1494 | * it from the hash queues and waiting for |
1507 | * it to be deleted later when it has no users | 1495 | * it to be deleted later when it has no users |
1508 | */ | 1496 | */ |
1509 | 1497 | ||
1510 | /** | 1498 | /** |
1511 | * d_delete - delete a dentry | 1499 | * d_delete - delete a dentry |
1512 | * @dentry: The dentry to delete | 1500 | * @dentry: The dentry to delete |
1513 | * | 1501 | * |
1514 | * Turn the dentry into a negative dentry if possible, otherwise | 1502 | * Turn the dentry into a negative dentry if possible, otherwise |
1515 | * remove it from the hash queues so it can be deleted later | 1503 | * remove it from the hash queues so it can be deleted later |
1516 | */ | 1504 | */ |
1517 | 1505 | ||
1518 | void d_delete(struct dentry * dentry) | 1506 | void d_delete(struct dentry * dentry) |
1519 | { | 1507 | { |
1520 | int isdir = 0; | 1508 | int isdir = 0; |
1521 | /* | 1509 | /* |
1522 | * Are we the only user? | 1510 | * Are we the only user? |
1523 | */ | 1511 | */ |
1524 | spin_lock(&dcache_lock); | 1512 | spin_lock(&dcache_lock); |
1525 | spin_lock(&dentry->d_lock); | 1513 | spin_lock(&dentry->d_lock); |
1526 | isdir = S_ISDIR(dentry->d_inode->i_mode); | 1514 | isdir = S_ISDIR(dentry->d_inode->i_mode); |
1527 | if (atomic_read(&dentry->d_count) == 1) { | 1515 | if (atomic_read(&dentry->d_count) == 1) { |
1528 | dentry_iput(dentry); | 1516 | dentry_iput(dentry); |
1529 | fsnotify_nameremove(dentry, isdir); | 1517 | fsnotify_nameremove(dentry, isdir); |
1530 | return; | 1518 | return; |
1531 | } | 1519 | } |
1532 | 1520 | ||
1533 | if (!d_unhashed(dentry)) | 1521 | if (!d_unhashed(dentry)) |
1534 | __d_drop(dentry); | 1522 | __d_drop(dentry); |
1535 | 1523 | ||
1536 | spin_unlock(&dentry->d_lock); | 1524 | spin_unlock(&dentry->d_lock); |
1537 | spin_unlock(&dcache_lock); | 1525 | spin_unlock(&dcache_lock); |
1538 | 1526 | ||
1539 | fsnotify_nameremove(dentry, isdir); | 1527 | fsnotify_nameremove(dentry, isdir); |
1540 | } | 1528 | } |
1541 | 1529 | ||
1542 | static void __d_rehash(struct dentry * entry, struct hlist_head *list) | 1530 | static void __d_rehash(struct dentry * entry, struct hlist_head *list) |
1543 | { | 1531 | { |
1544 | 1532 | ||
1545 | entry->d_flags &= ~DCACHE_UNHASHED; | 1533 | entry->d_flags &= ~DCACHE_UNHASHED; |
1546 | hlist_add_head_rcu(&entry->d_hash, list); | 1534 | hlist_add_head_rcu(&entry->d_hash, list); |
1547 | } | 1535 | } |
1548 | 1536 | ||
1549 | static void _d_rehash(struct dentry * entry) | 1537 | static void _d_rehash(struct dentry * entry) |
1550 | { | 1538 | { |
1551 | __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); | 1539 | __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); |
1552 | } | 1540 | } |
1553 | 1541 | ||
1554 | /** | 1542 | /** |
1555 | * d_rehash - add an entry back to the hash | 1543 | * d_rehash - add an entry back to the hash |
1556 | * @entry: dentry to add to the hash | 1544 | * @entry: dentry to add to the hash |
1557 | * | 1545 | * |
1558 | * Adds a dentry to the hash according to its name. | 1546 | * Adds a dentry to the hash according to its name. |
1559 | */ | 1547 | */ |
1560 | 1548 | ||
1561 | void d_rehash(struct dentry * entry) | 1549 | void d_rehash(struct dentry * entry) |
1562 | { | 1550 | { |
1563 | spin_lock(&dcache_lock); | 1551 | spin_lock(&dcache_lock); |
1564 | spin_lock(&entry->d_lock); | 1552 | spin_lock(&entry->d_lock); |
1565 | _d_rehash(entry); | 1553 | _d_rehash(entry); |
1566 | spin_unlock(&entry->d_lock); | 1554 | spin_unlock(&entry->d_lock); |
1567 | spin_unlock(&dcache_lock); | 1555 | spin_unlock(&dcache_lock); |
1568 | } | 1556 | } |
1569 | 1557 | ||
1570 | /* | 1558 | /* |
1571 | * When switching names, the actual string doesn't strictly have to | 1559 | * When switching names, the actual string doesn't strictly have to |
1572 | * be preserved in the target - because we're dropping the target | 1560 | * be preserved in the target - because we're dropping the target |
1573 | * anyway. As such, we can just do a simple memcpy() to copy over | 1561 | * anyway. As such, we can just do a simple memcpy() to copy over |
1574 | * the new name before we switch. | 1562 | * the new name before we switch. |
1575 | * | 1563 | * |
1576 | * Note that we have to be a lot more careful about getting the hash | 1564 | * Note that we have to be a lot more careful about getting the hash |
1577 | * switched - we have to switch the hash value properly even if it | 1565 | * switched - we have to switch the hash value properly even if it |
1578 | * then no longer matches the actual (corrupted) string of the target. | 1566 | * then no longer matches the actual (corrupted) string of the target. |
1579 | * The hash value has to match the hash queue that the dentry is on.. | 1567 | * The hash value has to match the hash queue that the dentry is on.. |
1580 | */ | 1568 | */ |
1581 | static void switch_names(struct dentry *dentry, struct dentry *target) | 1569 | static void switch_names(struct dentry *dentry, struct dentry *target) |
1582 | { | 1570 | { |
1583 | if (dname_external(target)) { | 1571 | if (dname_external(target)) { |
1584 | if (dname_external(dentry)) { | 1572 | if (dname_external(dentry)) { |
1585 | /* | 1573 | /* |
1586 | * Both external: swap the pointers | 1574 | * Both external: swap the pointers |
1587 | */ | 1575 | */ |
1588 | swap(target->d_name.name, dentry->d_name.name); | 1576 | swap(target->d_name.name, dentry->d_name.name); |
1589 | } else { | 1577 | } else { |
1590 | /* | 1578 | /* |
1591 | * dentry:internal, target:external. Steal target's | 1579 | * dentry:internal, target:external. Steal target's |
1592 | * storage and make target internal. | 1580 | * storage and make target internal. |
1593 | */ | 1581 | */ |
1594 | memcpy(target->d_iname, dentry->d_name.name, | 1582 | memcpy(target->d_iname, dentry->d_name.name, |
1595 | dentry->d_name.len + 1); | 1583 | dentry->d_name.len + 1); |
1596 | dentry->d_name.name = target->d_name.name; | 1584 | dentry->d_name.name = target->d_name.name; |
1597 | target->d_name.name = target->d_iname; | 1585 | target->d_name.name = target->d_iname; |
1598 | } | 1586 | } |
1599 | } else { | 1587 | } else { |
1600 | if (dname_external(dentry)) { | 1588 | if (dname_external(dentry)) { |
1601 | /* | 1589 | /* |
1602 | * dentry:external, target:internal. Give dentry's | 1590 | * dentry:external, target:internal. Give dentry's |
1603 | * storage to target and make dentry internal | 1591 | * storage to target and make dentry internal |
1604 | */ | 1592 | */ |
1605 | memcpy(dentry->d_iname, target->d_name.name, | 1593 | memcpy(dentry->d_iname, target->d_name.name, |
1606 | target->d_name.len + 1); | 1594 | target->d_name.len + 1); |
1607 | target->d_name.name = dentry->d_name.name; | 1595 | target->d_name.name = dentry->d_name.name; |
1608 | dentry->d_name.name = dentry->d_iname; | 1596 | dentry->d_name.name = dentry->d_iname; |
1609 | } else { | 1597 | } else { |
1610 | /* | 1598 | /* |
1611 | * Both are internal. Just copy target to dentry | 1599 | * Both are internal. Just copy target to dentry |
1612 | */ | 1600 | */ |
1613 | memcpy(dentry->d_iname, target->d_name.name, | 1601 | memcpy(dentry->d_iname, target->d_name.name, |
1614 | target->d_name.len + 1); | 1602 | target->d_name.len + 1); |
1615 | dentry->d_name.len = target->d_name.len; | 1603 | dentry->d_name.len = target->d_name.len; |
1616 | return; | 1604 | return; |
1617 | } | 1605 | } |
1618 | } | 1606 | } |
1619 | swap(dentry->d_name.len, target->d_name.len); | 1607 | swap(dentry->d_name.len, target->d_name.len); |
1620 | } | 1608 | } |
1621 | 1609 | ||
1622 | /* | 1610 | /* |
1623 | * We cannibalize "target" when moving dentry on top of it, | 1611 | * We cannibalize "target" when moving dentry on top of it, |
1624 | * because it's going to be thrown away anyway. We could be more | 1612 | * because it's going to be thrown away anyway. We could be more |
1625 | * polite about it, though. | 1613 | * polite about it, though. |
1626 | * | 1614 | * |
1627 | * This forceful removal will result in ugly /proc output if | 1615 | * This forceful removal will result in ugly /proc output if |
1628 | * somebody holds a file open that got deleted due to a rename. | 1616 | * somebody holds a file open that got deleted due to a rename. |
1629 | * We could be nicer about the deleted file, and let it show | 1617 | * We could be nicer about the deleted file, and let it show |
1630 | * up under the name it had before it was deleted rather than | 1618 | * up under the name it had before it was deleted rather than |
1631 | * under the original name of the file that was moved on top of it. | 1619 | * under the original name of the file that was moved on top of it. |
1632 | */ | 1620 | */ |
1633 | 1621 | ||
1634 | /* | 1622 | /* |
1635 | * d_move_locked - move a dentry | 1623 | * d_move_locked - move a dentry |
1636 | * @dentry: entry to move | 1624 | * @dentry: entry to move |
1637 | * @target: new dentry | 1625 | * @target: new dentry |
1638 | * | 1626 | * |
1639 | * Update the dcache to reflect the move of a file name. Negative | 1627 | * Update the dcache to reflect the move of a file name. Negative |
1640 | * dcache entries should not be moved in this way. | 1628 | * dcache entries should not be moved in this way. |
1641 | */ | 1629 | */ |
1642 | static void d_move_locked(struct dentry * dentry, struct dentry * target) | 1630 | static void d_move_locked(struct dentry * dentry, struct dentry * target) |
1643 | { | 1631 | { |
1644 | struct hlist_head *list; | 1632 | struct hlist_head *list; |
1645 | 1633 | ||
1646 | if (!dentry->d_inode) | 1634 | if (!dentry->d_inode) |
1647 | printk(KERN_WARNING "VFS: moving negative dcache entry\n"); | 1635 | printk(KERN_WARNING "VFS: moving negative dcache entry\n"); |
1648 | 1636 | ||
1649 | write_seqlock(&rename_lock); | 1637 | write_seqlock(&rename_lock); |
1650 | /* | 1638 | /* |
1651 | * XXXX: do we really need to take target->d_lock? | 1639 | * XXXX: do we really need to take target->d_lock? |
1652 | */ | 1640 | */ |
1653 | if (target < dentry) { | 1641 | if (target < dentry) { |
1654 | spin_lock(&target->d_lock); | 1642 | spin_lock(&target->d_lock); |
1655 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | 1643 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
1656 | } else { | 1644 | } else { |
1657 | spin_lock(&dentry->d_lock); | 1645 | spin_lock(&dentry->d_lock); |
1658 | spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); | 1646 | spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); |
1659 | } | 1647 | } |
1660 | 1648 | ||
1661 | /* Move the dentry to the target hash queue, if on different bucket */ | 1649 | /* Move the dentry to the target hash queue, if on different bucket */ |
1662 | if (d_unhashed(dentry)) | 1650 | if (d_unhashed(dentry)) |
1663 | goto already_unhashed; | 1651 | goto already_unhashed; |
1664 | 1652 | ||
1665 | hlist_del_rcu(&dentry->d_hash); | 1653 | hlist_del_rcu(&dentry->d_hash); |
1666 | 1654 | ||
1667 | already_unhashed: | 1655 | already_unhashed: |
1668 | list = d_hash(target->d_parent, target->d_name.hash); | 1656 | list = d_hash(target->d_parent, target->d_name.hash); |
1669 | __d_rehash(dentry, list); | 1657 | __d_rehash(dentry, list); |
1670 | 1658 | ||
1671 | /* Unhash the target: dput() will then get rid of it */ | 1659 | /* Unhash the target: dput() will then get rid of it */ |
1672 | __d_drop(target); | 1660 | __d_drop(target); |
1673 | 1661 | ||
1674 | list_del(&dentry->d_u.d_child); | 1662 | list_del(&dentry->d_u.d_child); |
1675 | list_del(&target->d_u.d_child); | 1663 | list_del(&target->d_u.d_child); |
1676 | 1664 | ||
1677 | /* Switch the names.. */ | 1665 | /* Switch the names.. */ |
1678 | switch_names(dentry, target); | 1666 | switch_names(dentry, target); |
1679 | swap(dentry->d_name.hash, target->d_name.hash); | 1667 | swap(dentry->d_name.hash, target->d_name.hash); |
1680 | 1668 | ||
1681 | /* ... and switch the parents */ | 1669 | /* ... and switch the parents */ |
1682 | if (IS_ROOT(dentry)) { | 1670 | if (IS_ROOT(dentry)) { |
1683 | dentry->d_parent = target->d_parent; | 1671 | dentry->d_parent = target->d_parent; |
1684 | target->d_parent = target; | 1672 | target->d_parent = target; |
1685 | INIT_LIST_HEAD(&target->d_u.d_child); | 1673 | INIT_LIST_HEAD(&target->d_u.d_child); |
1686 | } else { | 1674 | } else { |
1687 | swap(dentry->d_parent, target->d_parent); | 1675 | swap(dentry->d_parent, target->d_parent); |
1688 | 1676 | ||
1689 | /* And add them back to the (new) parent lists */ | 1677 | /* And add them back to the (new) parent lists */ |
1690 | list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); | 1678 | list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); |
1691 | } | 1679 | } |
1692 | 1680 | ||
1693 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | 1681 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); |
1694 | spin_unlock(&target->d_lock); | 1682 | spin_unlock(&target->d_lock); |
1695 | fsnotify_d_move(dentry); | 1683 | fsnotify_d_move(dentry); |
1696 | spin_unlock(&dentry->d_lock); | 1684 | spin_unlock(&dentry->d_lock); |
1697 | write_sequnlock(&rename_lock); | 1685 | write_sequnlock(&rename_lock); |
1698 | } | 1686 | } |
1699 | 1687 | ||
1700 | /** | 1688 | /** |
1701 | * d_move - move a dentry | 1689 | * d_move - move a dentry |
1702 | * @dentry: entry to move | 1690 | * @dentry: entry to move |
1703 | * @target: new dentry | 1691 | * @target: new dentry |
1704 | * | 1692 | * |
1705 | * Update the dcache to reflect the move of a file name. Negative | 1693 | * Update the dcache to reflect the move of a file name. Negative |
1706 | * dcache entries should not be moved in this way. | 1694 | * dcache entries should not be moved in this way. |
1707 | */ | 1695 | */ |
1708 | 1696 | ||
1709 | void d_move(struct dentry * dentry, struct dentry * target) | 1697 | void d_move(struct dentry * dentry, struct dentry * target) |
1710 | { | 1698 | { |
1711 | spin_lock(&dcache_lock); | 1699 | spin_lock(&dcache_lock); |
1712 | d_move_locked(dentry, target); | 1700 | d_move_locked(dentry, target); |
1713 | spin_unlock(&dcache_lock); | 1701 | spin_unlock(&dcache_lock); |
1714 | } | 1702 | } |
1715 | 1703 | ||
1716 | /** | 1704 | /** |
1717 | * d_ancestor - search for an ancestor | 1705 | * d_ancestor - search for an ancestor |
1718 | * @p1: ancestor dentry | 1706 | * @p1: ancestor dentry |
1719 | * @p2: child dentry | 1707 | * @p2: child dentry |
1720 | * | 1708 | * |
1721 | * Returns the ancestor dentry of p2 which is a child of p1, if p1 is | 1709 | * Returns the ancestor dentry of p2 which is a child of p1, if p1 is |
1722 | * an ancestor of p2, else NULL. | 1710 | * an ancestor of p2, else NULL. |
1723 | */ | 1711 | */ |
1724 | struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) | 1712 | struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) |
1725 | { | 1713 | { |
1726 | struct dentry *p; | 1714 | struct dentry *p; |
1727 | 1715 | ||
1728 | for (p = p2; !IS_ROOT(p); p = p->d_parent) { | 1716 | for (p = p2; !IS_ROOT(p); p = p->d_parent) { |
1729 | if (p->d_parent == p1) | 1717 | if (p->d_parent == p1) |
1730 | return p; | 1718 | return p; |
1731 | } | 1719 | } |
1732 | return NULL; | 1720 | return NULL; |
1733 | } | 1721 | } |
1734 | 1722 | ||
1735 | /* | 1723 | /* |
1736 | * This helper attempts to cope with remotely renamed directories | 1724 | * This helper attempts to cope with remotely renamed directories |
1737 | * | 1725 | * |
1738 | * It assumes that the caller is already holding | 1726 | * It assumes that the caller is already holding |
1739 | * dentry->d_parent->d_inode->i_mutex and the dcache_lock | 1727 | * dentry->d_parent->d_inode->i_mutex and the dcache_lock |
1740 | * | 1728 | * |
1741 | * Note: If ever the locking in lock_rename() changes, then please | 1729 | * Note: If ever the locking in lock_rename() changes, then please |
1742 | * remember to update this too... | 1730 | * remember to update this too... |
1743 | */ | 1731 | */ |
1744 | static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) | 1732 | static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) |
1745 | __releases(dcache_lock) | 1733 | __releases(dcache_lock) |
1746 | { | 1734 | { |
1747 | struct mutex *m1 = NULL, *m2 = NULL; | 1735 | struct mutex *m1 = NULL, *m2 = NULL; |
1748 | struct dentry *ret; | 1736 | struct dentry *ret; |
1749 | 1737 | ||
1750 | /* If alias and dentry share a parent, then no extra locks required */ | 1738 | /* If alias and dentry share a parent, then no extra locks required */ |
1751 | if (alias->d_parent == dentry->d_parent) | 1739 | if (alias->d_parent == dentry->d_parent) |
1752 | goto out_unalias; | 1740 | goto out_unalias; |
1753 | 1741 | ||
1754 | /* Check for loops */ | 1742 | /* Check for loops */ |
1755 | ret = ERR_PTR(-ELOOP); | 1743 | ret = ERR_PTR(-ELOOP); |
1756 | if (d_ancestor(alias, dentry)) | 1744 | if (d_ancestor(alias, dentry)) |
1757 | goto out_err; | 1745 | goto out_err; |
1758 | 1746 | ||
1759 | /* See lock_rename() */ | 1747 | /* See lock_rename() */ |
1760 | ret = ERR_PTR(-EBUSY); | 1748 | ret = ERR_PTR(-EBUSY); |
1761 | if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) | 1749 | if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) |
1762 | goto out_err; | 1750 | goto out_err; |
1763 | m1 = &dentry->d_sb->s_vfs_rename_mutex; | 1751 | m1 = &dentry->d_sb->s_vfs_rename_mutex; |
1764 | if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) | 1752 | if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) |
1765 | goto out_err; | 1753 | goto out_err; |
1766 | m2 = &alias->d_parent->d_inode->i_mutex; | 1754 | m2 = &alias->d_parent->d_inode->i_mutex; |
1767 | out_unalias: | 1755 | out_unalias: |
1768 | d_move_locked(alias, dentry); | 1756 | d_move_locked(alias, dentry); |
1769 | ret = alias; | 1757 | ret = alias; |
1770 | out_err: | 1758 | out_err: |
1771 | spin_unlock(&dcache_lock); | 1759 | spin_unlock(&dcache_lock); |
1772 | if (m2) | 1760 | if (m2) |
1773 | mutex_unlock(m2); | 1761 | mutex_unlock(m2); |
1774 | if (m1) | 1762 | if (m1) |
1775 | mutex_unlock(m1); | 1763 | mutex_unlock(m1); |
1776 | return ret; | 1764 | return ret; |
1777 | } | 1765 | } |
1778 | 1766 | ||
1779 | /* | 1767 | /* |
1780 | * Prepare an anonymous dentry for life in the superblock's dentry tree as a | 1768 | * Prepare an anonymous dentry for life in the superblock's dentry tree as a |
1781 | * named dentry in place of the dentry to be replaced. | 1769 | * named dentry in place of the dentry to be replaced. |
1782 | */ | 1770 | */ |
1783 | static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | 1771 | static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) |
1784 | { | 1772 | { |
1785 | struct dentry *dparent, *aparent; | 1773 | struct dentry *dparent, *aparent; |
1786 | 1774 | ||
1787 | switch_names(dentry, anon); | 1775 | switch_names(dentry, anon); |
1788 | swap(dentry->d_name.hash, anon->d_name.hash); | 1776 | swap(dentry->d_name.hash, anon->d_name.hash); |
1789 | 1777 | ||
1790 | dparent = dentry->d_parent; | 1778 | dparent = dentry->d_parent; |
1791 | aparent = anon->d_parent; | 1779 | aparent = anon->d_parent; |
1792 | 1780 | ||
1793 | dentry->d_parent = (aparent == anon) ? dentry : aparent; | 1781 | dentry->d_parent = (aparent == anon) ? dentry : aparent; |
1794 | list_del(&dentry->d_u.d_child); | 1782 | list_del(&dentry->d_u.d_child); |
1795 | if (!IS_ROOT(dentry)) | 1783 | if (!IS_ROOT(dentry)) |
1796 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); | 1784 | list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); |
1797 | else | 1785 | else |
1798 | INIT_LIST_HEAD(&dentry->d_u.d_child); | 1786 | INIT_LIST_HEAD(&dentry->d_u.d_child); |
1799 | 1787 | ||
1800 | anon->d_parent = (dparent == dentry) ? anon : dparent; | 1788 | anon->d_parent = (dparent == dentry) ? anon : dparent; |
1801 | list_del(&anon->d_u.d_child); | 1789 | list_del(&anon->d_u.d_child); |
1802 | if (!IS_ROOT(anon)) | 1790 | if (!IS_ROOT(anon)) |
1803 | list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); | 1791 | list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); |
1804 | else | 1792 | else |
1805 | INIT_LIST_HEAD(&anon->d_u.d_child); | 1793 | INIT_LIST_HEAD(&anon->d_u.d_child); |
1806 | 1794 | ||
1807 | anon->d_flags &= ~DCACHE_DISCONNECTED; | 1795 | anon->d_flags &= ~DCACHE_DISCONNECTED; |
1808 | } | 1796 | } |
1809 | 1797 | ||
1810 | /** | 1798 | /** |
1811 | * d_materialise_unique - introduce an inode into the tree | 1799 | * d_materialise_unique - introduce an inode into the tree |
1812 | * @dentry: candidate dentry | 1800 | * @dentry: candidate dentry |
1813 | * @inode: inode to bind to the dentry, to which aliases may be attached | 1801 | * @inode: inode to bind to the dentry, to which aliases may be attached |
1814 | * | 1802 | * |
1815 | * Introduces an dentry into the tree, substituting an extant disconnected | 1803 | * Introduces an dentry into the tree, substituting an extant disconnected |
1816 | * root directory alias in its place if there is one | 1804 | * root directory alias in its place if there is one |
1817 | */ | 1805 | */ |
1818 | struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | 1806 | struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) |
1819 | { | 1807 | { |
1820 | struct dentry *actual; | 1808 | struct dentry *actual; |
1821 | 1809 | ||
1822 | BUG_ON(!d_unhashed(dentry)); | 1810 | BUG_ON(!d_unhashed(dentry)); |
1823 | 1811 | ||
1824 | spin_lock(&dcache_lock); | 1812 | spin_lock(&dcache_lock); |
1825 | 1813 | ||
1826 | if (!inode) { | 1814 | if (!inode) { |
1827 | actual = dentry; | 1815 | actual = dentry; |
1828 | __d_instantiate(dentry, NULL); | 1816 | __d_instantiate(dentry, NULL); |
1829 | goto found_lock; | 1817 | goto found_lock; |
1830 | } | 1818 | } |
1831 | 1819 | ||
1832 | if (S_ISDIR(inode->i_mode)) { | 1820 | if (S_ISDIR(inode->i_mode)) { |
1833 | struct dentry *alias; | 1821 | struct dentry *alias; |
1834 | 1822 | ||
1835 | /* Does an aliased dentry already exist? */ | 1823 | /* Does an aliased dentry already exist? */ |
1836 | alias = __d_find_alias(inode, 0); | 1824 | alias = __d_find_alias(inode, 0); |
1837 | if (alias) { | 1825 | if (alias) { |
1838 | actual = alias; | 1826 | actual = alias; |
1839 | /* Is this an anonymous mountpoint that we could splice | 1827 | /* Is this an anonymous mountpoint that we could splice |
1840 | * into our tree? */ | 1828 | * into our tree? */ |
1841 | if (IS_ROOT(alias)) { | 1829 | if (IS_ROOT(alias)) { |
1842 | spin_lock(&alias->d_lock); | 1830 | spin_lock(&alias->d_lock); |
1843 | __d_materialise_dentry(dentry, alias); | 1831 | __d_materialise_dentry(dentry, alias); |
1844 | __d_drop(alias); | 1832 | __d_drop(alias); |
1845 | goto found; | 1833 | goto found; |
1846 | } | 1834 | } |
1847 | /* Nope, but we must(!) avoid directory aliasing */ | 1835 | /* Nope, but we must(!) avoid directory aliasing */ |
1848 | actual = __d_unalias(dentry, alias); | 1836 | actual = __d_unalias(dentry, alias); |
1849 | if (IS_ERR(actual)) | 1837 | if (IS_ERR(actual)) |
1850 | dput(alias); | 1838 | dput(alias); |
1851 | goto out_nolock; | 1839 | goto out_nolock; |
1852 | } | 1840 | } |
1853 | } | 1841 | } |
1854 | 1842 | ||
1855 | /* Add a unique reference */ | 1843 | /* Add a unique reference */ |
1856 | actual = __d_instantiate_unique(dentry, inode); | 1844 | actual = __d_instantiate_unique(dentry, inode); |
1857 | if (!actual) | 1845 | if (!actual) |
1858 | actual = dentry; | 1846 | actual = dentry; |
1859 | else if (unlikely(!d_unhashed(actual))) | 1847 | else if (unlikely(!d_unhashed(actual))) |
1860 | goto shouldnt_be_hashed; | 1848 | goto shouldnt_be_hashed; |
1861 | 1849 | ||
1862 | found_lock: | 1850 | found_lock: |
1863 | spin_lock(&actual->d_lock); | 1851 | spin_lock(&actual->d_lock); |
1864 | found: | 1852 | found: |
1865 | _d_rehash(actual); | 1853 | _d_rehash(actual); |
1866 | spin_unlock(&actual->d_lock); | 1854 | spin_unlock(&actual->d_lock); |
1867 | spin_unlock(&dcache_lock); | 1855 | spin_unlock(&dcache_lock); |
1868 | out_nolock: | 1856 | out_nolock: |
1869 | if (actual == dentry) { | 1857 | if (actual == dentry) { |
1870 | security_d_instantiate(dentry, inode); | 1858 | security_d_instantiate(dentry, inode); |
1871 | return NULL; | 1859 | return NULL; |
1872 | } | 1860 | } |
1873 | 1861 | ||
1874 | iput(inode); | 1862 | iput(inode); |
1875 | return actual; | 1863 | return actual; |
1876 | 1864 | ||
1877 | shouldnt_be_hashed: | 1865 | shouldnt_be_hashed: |
1878 | spin_unlock(&dcache_lock); | 1866 | spin_unlock(&dcache_lock); |
1879 | BUG(); | 1867 | BUG(); |
1880 | } | 1868 | } |
1881 | 1869 | ||
1882 | static int prepend(char **buffer, int *buflen, const char *str, int namelen) | 1870 | static int prepend(char **buffer, int *buflen, const char *str, int namelen) |
1883 | { | 1871 | { |
1884 | *buflen -= namelen; | 1872 | *buflen -= namelen; |
1885 | if (*buflen < 0) | 1873 | if (*buflen < 0) |
1886 | return -ENAMETOOLONG; | 1874 | return -ENAMETOOLONG; |
1887 | *buffer -= namelen; | 1875 | *buffer -= namelen; |
1888 | memcpy(*buffer, str, namelen); | 1876 | memcpy(*buffer, str, namelen); |
1889 | return 0; | 1877 | return 0; |
1890 | } | 1878 | } |
1891 | 1879 | ||
1892 | static int prepend_name(char **buffer, int *buflen, struct qstr *name) | 1880 | static int prepend_name(char **buffer, int *buflen, struct qstr *name) |
1893 | { | 1881 | { |
1894 | return prepend(buffer, buflen, name->name, name->len); | 1882 | return prepend(buffer, buflen, name->name, name->len); |
1895 | } | 1883 | } |
1896 | 1884 | ||
1897 | /** | 1885 | /** |
1898 | * __d_path - return the path of a dentry | 1886 | * __d_path - return the path of a dentry |
1899 | * @path: the dentry/vfsmount to report | 1887 | * @path: the dentry/vfsmount to report |
1900 | * @root: root vfsmnt/dentry (may be modified by this function) | 1888 | * @root: root vfsmnt/dentry (may be modified by this function) |
1901 | * @buffer: buffer to return value in | 1889 | * @buffer: buffer to return value in |
1902 | * @buflen: buffer length | 1890 | * @buflen: buffer length |
1903 | * | 1891 | * |
1904 | * Convert a dentry into an ASCII path name. If the entry has been deleted | 1892 | * Convert a dentry into an ASCII path name. If the entry has been deleted |
1905 | * the string " (deleted)" is appended. Note that this is ambiguous. | 1893 | * the string " (deleted)" is appended. Note that this is ambiguous. |
1906 | * | 1894 | * |
1907 | * Returns a pointer into the buffer or an error code if the | 1895 | * Returns a pointer into the buffer or an error code if the |
1908 | * path was too long. | 1896 | * path was too long. |
1909 | * | 1897 | * |
1910 | * "buflen" should be positive. Caller holds the dcache_lock. | 1898 | * "buflen" should be positive. Caller holds the dcache_lock. |
1911 | * | 1899 | * |
1912 | * If path is not reachable from the supplied root, then the value of | 1900 | * If path is not reachable from the supplied root, then the value of |
1913 | * root is changed (without modifying refcounts). | 1901 | * root is changed (without modifying refcounts). |
1914 | */ | 1902 | */ |
1915 | char *__d_path(const struct path *path, struct path *root, | 1903 | char *__d_path(const struct path *path, struct path *root, |
1916 | char *buffer, int buflen) | 1904 | char *buffer, int buflen) |
1917 | { | 1905 | { |
1918 | struct dentry *dentry = path->dentry; | 1906 | struct dentry *dentry = path->dentry; |
1919 | struct vfsmount *vfsmnt = path->mnt; | 1907 | struct vfsmount *vfsmnt = path->mnt; |
1920 | char *end = buffer + buflen; | 1908 | char *end = buffer + buflen; |
1921 | char *retval; | 1909 | char *retval; |
1922 | 1910 | ||
1923 | spin_lock(&vfsmount_lock); | 1911 | spin_lock(&vfsmount_lock); |
1924 | prepend(&end, &buflen, "\0", 1); | 1912 | prepend(&end, &buflen, "\0", 1); |
1925 | if (!IS_ROOT(dentry) && d_unhashed(dentry) && | 1913 | if (!IS_ROOT(dentry) && d_unhashed(dentry) && |
1926 | (prepend(&end, &buflen, " (deleted)", 10) != 0)) | 1914 | (prepend(&end, &buflen, " (deleted)", 10) != 0)) |
1927 | goto Elong; | 1915 | goto Elong; |
1928 | 1916 | ||
1929 | if (buflen < 1) | 1917 | if (buflen < 1) |
1930 | goto Elong; | 1918 | goto Elong; |
1931 | /* Get '/' right */ | 1919 | /* Get '/' right */ |
1932 | retval = end-1; | 1920 | retval = end-1; |
1933 | *retval = '/'; | 1921 | *retval = '/'; |
1934 | 1922 | ||
1935 | for (;;) { | 1923 | for (;;) { |
1936 | struct dentry * parent; | 1924 | struct dentry * parent; |
1937 | 1925 | ||
1938 | if (dentry == root->dentry && vfsmnt == root->mnt) | 1926 | if (dentry == root->dentry && vfsmnt == root->mnt) |
1939 | break; | 1927 | break; |
1940 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { | 1928 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
1941 | /* Global root? */ | 1929 | /* Global root? */ |
1942 | if (vfsmnt->mnt_parent == vfsmnt) { | 1930 | if (vfsmnt->mnt_parent == vfsmnt) { |
1943 | goto global_root; | 1931 | goto global_root; |
1944 | } | 1932 | } |
1945 | dentry = vfsmnt->mnt_mountpoint; | 1933 | dentry = vfsmnt->mnt_mountpoint; |
1946 | vfsmnt = vfsmnt->mnt_parent; | 1934 | vfsmnt = vfsmnt->mnt_parent; |
1947 | continue; | 1935 | continue; |
1948 | } | 1936 | } |
1949 | parent = dentry->d_parent; | 1937 | parent = dentry->d_parent; |
1950 | prefetch(parent); | 1938 | prefetch(parent); |
1951 | if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || | 1939 | if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || |
1952 | (prepend(&end, &buflen, "/", 1) != 0)) | 1940 | (prepend(&end, &buflen, "/", 1) != 0)) |
1953 | goto Elong; | 1941 | goto Elong; |
1954 | retval = end; | 1942 | retval = end; |
1955 | dentry = parent; | 1943 | dentry = parent; |
1956 | } | 1944 | } |
1957 | 1945 | ||
1958 | out: | 1946 | out: |
1959 | spin_unlock(&vfsmount_lock); | 1947 | spin_unlock(&vfsmount_lock); |
1960 | return retval; | 1948 | return retval; |
1961 | 1949 | ||
1962 | global_root: | 1950 | global_root: |
1963 | retval += 1; /* hit the slash */ | 1951 | retval += 1; /* hit the slash */ |
1964 | if (prepend_name(&retval, &buflen, &dentry->d_name) != 0) | 1952 | if (prepend_name(&retval, &buflen, &dentry->d_name) != 0) |
1965 | goto Elong; | 1953 | goto Elong; |
1966 | root->mnt = vfsmnt; | 1954 | root->mnt = vfsmnt; |
1967 | root->dentry = dentry; | 1955 | root->dentry = dentry; |
1968 | goto out; | 1956 | goto out; |
1969 | 1957 | ||
1970 | Elong: | 1958 | Elong: |
1971 | retval = ERR_PTR(-ENAMETOOLONG); | 1959 | retval = ERR_PTR(-ENAMETOOLONG); |
1972 | goto out; | 1960 | goto out; |
1973 | } | 1961 | } |
1974 | 1962 | ||
1975 | /** | 1963 | /** |
1976 | * d_path - return the path of a dentry | 1964 | * d_path - return the path of a dentry |
1977 | * @path: path to report | 1965 | * @path: path to report |
1978 | * @buf: buffer to return value in | 1966 | * @buf: buffer to return value in |
1979 | * @buflen: buffer length | 1967 | * @buflen: buffer length |
1980 | * | 1968 | * |
1981 | * Convert a dentry into an ASCII path name. If the entry has been deleted | 1969 | * Convert a dentry into an ASCII path name. If the entry has been deleted |
1982 | * the string " (deleted)" is appended. Note that this is ambiguous. | 1970 | * the string " (deleted)" is appended. Note that this is ambiguous. |
1983 | * | 1971 | * |
1984 | * Returns a pointer into the buffer or an error code if the path was | 1972 | * Returns a pointer into the buffer or an error code if the path was |
1985 | * too long. Note: Callers should use the returned pointer, not the passed | 1973 | * too long. Note: Callers should use the returned pointer, not the passed |
1986 | * in buffer, to use the name! The implementation often starts at an offset | 1974 | * in buffer, to use the name! The implementation often starts at an offset |
1987 | * into the buffer, and may leave 0 bytes at the start. | 1975 | * into the buffer, and may leave 0 bytes at the start. |
1988 | * | 1976 | * |
1989 | * "buflen" should be positive. | 1977 | * "buflen" should be positive. |
1990 | */ | 1978 | */ |
1991 | char *d_path(const struct path *path, char *buf, int buflen) | 1979 | char *d_path(const struct path *path, char *buf, int buflen) |
1992 | { | 1980 | { |
1993 | char *res; | 1981 | char *res; |
1994 | struct path root; | 1982 | struct path root; |
1995 | struct path tmp; | 1983 | struct path tmp; |
1996 | 1984 | ||
1997 | /* | 1985 | /* |
1998 | * We have various synthetic filesystems that never get mounted. On | 1986 | * We have various synthetic filesystems that never get mounted. On |
1999 | * these filesystems dentries are never used for lookup purposes, and | 1987 | * these filesystems dentries are never used for lookup purposes, and |
2000 | * thus don't need to be hashed. They also don't need a name until a | 1988 | * thus don't need to be hashed. They also don't need a name until a |
2001 | * user wants to identify the object in /proc/pid/fd/. The little hack | 1989 | * user wants to identify the object in /proc/pid/fd/. The little hack |
2002 | * below allows us to generate a name for these objects on demand: | 1990 | * below allows us to generate a name for these objects on demand: |
2003 | */ | 1991 | */ |
2004 | if (path->dentry->d_op && path->dentry->d_op->d_dname) | 1992 | if (path->dentry->d_op && path->dentry->d_op->d_dname) |
2005 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); | 1993 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); |
2006 | 1994 | ||
2007 | read_lock(¤t->fs->lock); | 1995 | read_lock(¤t->fs->lock); |
2008 | root = current->fs->root; | 1996 | root = current->fs->root; |
2009 | path_get(&root); | 1997 | path_get(&root); |
2010 | read_unlock(¤t->fs->lock); | 1998 | read_unlock(¤t->fs->lock); |
2011 | spin_lock(&dcache_lock); | 1999 | spin_lock(&dcache_lock); |
2012 | tmp = root; | 2000 | tmp = root; |
2013 | res = __d_path(path, &tmp, buf, buflen); | 2001 | res = __d_path(path, &tmp, buf, buflen); |
2014 | spin_unlock(&dcache_lock); | 2002 | spin_unlock(&dcache_lock); |
2015 | path_put(&root); | 2003 | path_put(&root); |
2016 | return res; | 2004 | return res; |
2017 | } | 2005 | } |
2018 | 2006 | ||
2019 | /* | 2007 | /* |
2020 | * Helper function for dentry_operations.d_dname() members | 2008 | * Helper function for dentry_operations.d_dname() members |
2021 | */ | 2009 | */ |
2022 | char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, | 2010 | char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, |
2023 | const char *fmt, ...) | 2011 | const char *fmt, ...) |
2024 | { | 2012 | { |
2025 | va_list args; | 2013 | va_list args; |
2026 | char temp[64]; | 2014 | char temp[64]; |
2027 | int sz; | 2015 | int sz; |
2028 | 2016 | ||
2029 | va_start(args, fmt); | 2017 | va_start(args, fmt); |
2030 | sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; | 2018 | sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; |
2031 | va_end(args); | 2019 | va_end(args); |
2032 | 2020 | ||
2033 | if (sz > sizeof(temp) || sz > buflen) | 2021 | if (sz > sizeof(temp) || sz > buflen) |
2034 | return ERR_PTR(-ENAMETOOLONG); | 2022 | return ERR_PTR(-ENAMETOOLONG); |
2035 | 2023 | ||
2036 | buffer += buflen - sz; | 2024 | buffer += buflen - sz; |
2037 | return memcpy(buffer, temp, sz); | 2025 | return memcpy(buffer, temp, sz); |
2038 | } | 2026 | } |
2039 | 2027 | ||
2040 | /* | 2028 | /* |
2041 | * Write full pathname from the root of the filesystem into the buffer. | 2029 | * Write full pathname from the root of the filesystem into the buffer. |
2042 | */ | 2030 | */ |
2043 | char *dentry_path(struct dentry *dentry, char *buf, int buflen) | 2031 | char *dentry_path(struct dentry *dentry, char *buf, int buflen) |
2044 | { | 2032 | { |
2045 | char *end = buf + buflen; | 2033 | char *end = buf + buflen; |
2046 | char *retval; | 2034 | char *retval; |
2047 | 2035 | ||
2048 | spin_lock(&dcache_lock); | 2036 | spin_lock(&dcache_lock); |
2049 | prepend(&end, &buflen, "\0", 1); | 2037 | prepend(&end, &buflen, "\0", 1); |
2050 | if (!IS_ROOT(dentry) && d_unhashed(dentry) && | 2038 | if (!IS_ROOT(dentry) && d_unhashed(dentry) && |
2051 | (prepend(&end, &buflen, "//deleted", 9) != 0)) | 2039 | (prepend(&end, &buflen, "//deleted", 9) != 0)) |
2052 | goto Elong; | 2040 | goto Elong; |
2053 | if (buflen < 1) | 2041 | if (buflen < 1) |
2054 | goto Elong; | 2042 | goto Elong; |
2055 | /* Get '/' right */ | 2043 | /* Get '/' right */ |
2056 | retval = end-1; | 2044 | retval = end-1; |
2057 | *retval = '/'; | 2045 | *retval = '/'; |
2058 | 2046 | ||
2059 | while (!IS_ROOT(dentry)) { | 2047 | while (!IS_ROOT(dentry)) { |
2060 | struct dentry *parent = dentry->d_parent; | 2048 | struct dentry *parent = dentry->d_parent; |
2061 | 2049 | ||
2062 | prefetch(parent); | 2050 | prefetch(parent); |
2063 | if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || | 2051 | if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || |
2064 | (prepend(&end, &buflen, "/", 1) != 0)) | 2052 | (prepend(&end, &buflen, "/", 1) != 0)) |
2065 | goto Elong; | 2053 | goto Elong; |
2066 | 2054 | ||
2067 | retval = end; | 2055 | retval = end; |
2068 | dentry = parent; | 2056 | dentry = parent; |
2069 | } | 2057 | } |
2070 | spin_unlock(&dcache_lock); | 2058 | spin_unlock(&dcache_lock); |
2071 | return retval; | 2059 | return retval; |
2072 | Elong: | 2060 | Elong: |
2073 | spin_unlock(&dcache_lock); | 2061 | spin_unlock(&dcache_lock); |
2074 | return ERR_PTR(-ENAMETOOLONG); | 2062 | return ERR_PTR(-ENAMETOOLONG); |
2075 | } | 2063 | } |
2076 | 2064 | ||
2077 | /* | 2065 | /* |
2078 | * NOTE! The user-level library version returns a | 2066 | * NOTE! The user-level library version returns a |
2079 | * character pointer. The kernel system call just | 2067 | * character pointer. The kernel system call just |
2080 | * returns the length of the buffer filled (which | 2068 | * returns the length of the buffer filled (which |
2081 | * includes the ending '\0' character), or a negative | 2069 | * includes the ending '\0' character), or a negative |
2082 | * error value. So libc would do something like | 2070 | * error value. So libc would do something like |
2083 | * | 2071 | * |
2084 | * char *getcwd(char * buf, size_t size) | 2072 | * char *getcwd(char * buf, size_t size) |
2085 | * { | 2073 | * { |
2086 | * int retval; | 2074 | * int retval; |
2087 | * | 2075 | * |
2088 | * retval = sys_getcwd(buf, size); | 2076 | * retval = sys_getcwd(buf, size); |
2089 | * if (retval >= 0) | 2077 | * if (retval >= 0) |
2090 | * return buf; | 2078 | * return buf; |
2091 | * errno = -retval; | 2079 | * errno = -retval; |
2092 | * return NULL; | 2080 | * return NULL; |
2093 | * } | 2081 | * } |
2094 | */ | 2082 | */ |
2095 | SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) | 2083 | SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) |
2096 | { | 2084 | { |
2097 | int error; | 2085 | int error; |
2098 | struct path pwd, root; | 2086 | struct path pwd, root; |
2099 | char *page = (char *) __get_free_page(GFP_USER); | 2087 | char *page = (char *) __get_free_page(GFP_USER); |
2100 | 2088 | ||
2101 | if (!page) | 2089 | if (!page) |
2102 | return -ENOMEM; | 2090 | return -ENOMEM; |
2103 | 2091 | ||
2104 | read_lock(¤t->fs->lock); | 2092 | read_lock(¤t->fs->lock); |
2105 | pwd = current->fs->pwd; | 2093 | pwd = current->fs->pwd; |
2106 | path_get(&pwd); | 2094 | path_get(&pwd); |
2107 | root = current->fs->root; | 2095 | root = current->fs->root; |
2108 | path_get(&root); | 2096 | path_get(&root); |
2109 | read_unlock(¤t->fs->lock); | 2097 | read_unlock(¤t->fs->lock); |
2110 | 2098 | ||
2111 | error = -ENOENT; | 2099 | error = -ENOENT; |
2112 | /* Has the current directory has been unlinked? */ | 2100 | /* Has the current directory has been unlinked? */ |
2113 | spin_lock(&dcache_lock); | 2101 | spin_lock(&dcache_lock); |
2114 | if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) { | 2102 | if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) { |
2115 | unsigned long len; | 2103 | unsigned long len; |
2116 | struct path tmp = root; | 2104 | struct path tmp = root; |
2117 | char * cwd; | 2105 | char * cwd; |
2118 | 2106 | ||
2119 | cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE); | 2107 | cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE); |
2120 | spin_unlock(&dcache_lock); | 2108 | spin_unlock(&dcache_lock); |
2121 | 2109 | ||
2122 | error = PTR_ERR(cwd); | 2110 | error = PTR_ERR(cwd); |
2123 | if (IS_ERR(cwd)) | 2111 | if (IS_ERR(cwd)) |
2124 | goto out; | 2112 | goto out; |
2125 | 2113 | ||
2126 | error = -ERANGE; | 2114 | error = -ERANGE; |
2127 | len = PAGE_SIZE + page - cwd; | 2115 | len = PAGE_SIZE + page - cwd; |
2128 | if (len <= size) { | 2116 | if (len <= size) { |
2129 | error = len; | 2117 | error = len; |
2130 | if (copy_to_user(buf, cwd, len)) | 2118 | if (copy_to_user(buf, cwd, len)) |
2131 | error = -EFAULT; | 2119 | error = -EFAULT; |
2132 | } | 2120 | } |
2133 | } else | 2121 | } else |
2134 | spin_unlock(&dcache_lock); | 2122 | spin_unlock(&dcache_lock); |
2135 | 2123 | ||
2136 | out: | 2124 | out: |
2137 | path_put(&pwd); | 2125 | path_put(&pwd); |
2138 | path_put(&root); | 2126 | path_put(&root); |
2139 | free_page((unsigned long) page); | 2127 | free_page((unsigned long) page); |
2140 | return error; | 2128 | return error; |
2141 | } | 2129 | } |
2142 | 2130 | ||
2143 | /* | 2131 | /* |
2144 | * Test whether new_dentry is a subdirectory of old_dentry. | 2132 | * Test whether new_dentry is a subdirectory of old_dentry. |
2145 | * | 2133 | * |
2146 | * Trivially implemented using the dcache structure | 2134 | * Trivially implemented using the dcache structure |
2147 | */ | 2135 | */ |
2148 | 2136 | ||
2149 | /** | 2137 | /** |
2150 | * is_subdir - is new dentry a subdirectory of old_dentry | 2138 | * is_subdir - is new dentry a subdirectory of old_dentry |
2151 | * @new_dentry: new dentry | 2139 | * @new_dentry: new dentry |
2152 | * @old_dentry: old dentry | 2140 | * @old_dentry: old dentry |
2153 | * | 2141 | * |
2154 | * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). | 2142 | * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). |
2155 | * Returns 0 otherwise. | 2143 | * Returns 0 otherwise. |
2156 | * Caller must ensure that "new_dentry" is pinned before calling is_subdir() | 2144 | * Caller must ensure that "new_dentry" is pinned before calling is_subdir() |
2157 | */ | 2145 | */ |
2158 | 2146 | ||
2159 | int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) | 2147 | int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) |
2160 | { | 2148 | { |
2161 | int result; | 2149 | int result; |
2162 | unsigned long seq; | 2150 | unsigned long seq; |
2163 | 2151 | ||
2164 | /* FIXME: This is old behavior, needed? Please check callers. */ | 2152 | /* FIXME: This is old behavior, needed? Please check callers. */ |
2165 | if (new_dentry == old_dentry) | 2153 | if (new_dentry == old_dentry) |
2166 | return 1; | 2154 | return 1; |
2167 | 2155 | ||
2168 | /* | 2156 | /* |
2169 | * Need rcu_readlock to protect against the d_parent trashing | 2157 | * Need rcu_readlock to protect against the d_parent trashing |
2170 | * due to d_move | 2158 | * due to d_move |
2171 | */ | 2159 | */ |
2172 | rcu_read_lock(); | 2160 | rcu_read_lock(); |
2173 | do { | 2161 | do { |
2174 | /* for restarting inner loop in case of seq retry */ | 2162 | /* for restarting inner loop in case of seq retry */ |
2175 | seq = read_seqbegin(&rename_lock); | 2163 | seq = read_seqbegin(&rename_lock); |
2176 | if (d_ancestor(old_dentry, new_dentry)) | 2164 | if (d_ancestor(old_dentry, new_dentry)) |
2177 | result = 1; | 2165 | result = 1; |
2178 | else | 2166 | else |
2179 | result = 0; | 2167 | result = 0; |
2180 | } while (read_seqretry(&rename_lock, seq)); | 2168 | } while (read_seqretry(&rename_lock, seq)); |
2181 | rcu_read_unlock(); | 2169 | rcu_read_unlock(); |
2182 | 2170 | ||
2183 | return result; | 2171 | return result; |
2184 | } | 2172 | } |
2185 | 2173 | ||
2186 | void d_genocide(struct dentry *root) | 2174 | void d_genocide(struct dentry *root) |
2187 | { | 2175 | { |
2188 | struct dentry *this_parent = root; | 2176 | struct dentry *this_parent = root; |
2189 | struct list_head *next; | 2177 | struct list_head *next; |
2190 | 2178 | ||
2191 | spin_lock(&dcache_lock); | 2179 | spin_lock(&dcache_lock); |
2192 | repeat: | 2180 | repeat: |
2193 | next = this_parent->d_subdirs.next; | 2181 | next = this_parent->d_subdirs.next; |
2194 | resume: | 2182 | resume: |
2195 | while (next != &this_parent->d_subdirs) { | 2183 | while (next != &this_parent->d_subdirs) { |
2196 | struct list_head *tmp = next; | 2184 | struct list_head *tmp = next; |
2197 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); | 2185 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
2198 | next = tmp->next; | 2186 | next = tmp->next; |
2199 | if (d_unhashed(dentry)||!dentry->d_inode) | 2187 | if (d_unhashed(dentry)||!dentry->d_inode) |
2200 | continue; | 2188 | continue; |
2201 | if (!list_empty(&dentry->d_subdirs)) { | 2189 | if (!list_empty(&dentry->d_subdirs)) { |
2202 | this_parent = dentry; | 2190 | this_parent = dentry; |
2203 | goto repeat; | 2191 | goto repeat; |
2204 | } | 2192 | } |
2205 | atomic_dec(&dentry->d_count); | 2193 | atomic_dec(&dentry->d_count); |
2206 | } | 2194 | } |
2207 | if (this_parent != root) { | 2195 | if (this_parent != root) { |
2208 | next = this_parent->d_u.d_child.next; | 2196 | next = this_parent->d_u.d_child.next; |
2209 | atomic_dec(&this_parent->d_count); | 2197 | atomic_dec(&this_parent->d_count); |
2210 | this_parent = this_parent->d_parent; | 2198 | this_parent = this_parent->d_parent; |
2211 | goto resume; | 2199 | goto resume; |
2212 | } | 2200 | } |
2213 | spin_unlock(&dcache_lock); | 2201 | spin_unlock(&dcache_lock); |
2214 | } | 2202 | } |
2215 | 2203 | ||
2216 | /** | 2204 | /** |
2217 | * find_inode_number - check for dentry with name | 2205 | * find_inode_number - check for dentry with name |
2218 | * @dir: directory to check | 2206 | * @dir: directory to check |
2219 | * @name: Name to find. | 2207 | * @name: Name to find. |
2220 | * | 2208 | * |
2221 | * Check whether a dentry already exists for the given name, | 2209 | * Check whether a dentry already exists for the given name, |
2222 | * and return the inode number if it has an inode. Otherwise | 2210 | * and return the inode number if it has an inode. Otherwise |
2223 | * 0 is returned. | 2211 | * 0 is returned. |
2224 | * | 2212 | * |
2225 | * This routine is used to post-process directory listings for | 2213 | * This routine is used to post-process directory listings for |
2226 | * filesystems using synthetic inode numbers, and is necessary | 2214 | * filesystems using synthetic inode numbers, and is necessary |
2227 | * to keep getcwd() working. | 2215 | * to keep getcwd() working. |
2228 | */ | 2216 | */ |
2229 | 2217 | ||
2230 | ino_t find_inode_number(struct dentry *dir, struct qstr *name) | 2218 | ino_t find_inode_number(struct dentry *dir, struct qstr *name) |
2231 | { | 2219 | { |
2232 | struct dentry * dentry; | 2220 | struct dentry * dentry; |
2233 | ino_t ino = 0; | 2221 | ino_t ino = 0; |
2234 | 2222 | ||
2235 | dentry = d_hash_and_lookup(dir, name); | 2223 | dentry = d_hash_and_lookup(dir, name); |
2236 | if (dentry) { | 2224 | if (dentry) { |
2237 | if (dentry->d_inode) | 2225 | if (dentry->d_inode) |
2238 | ino = dentry->d_inode->i_ino; | 2226 | ino = dentry->d_inode->i_ino; |
2239 | dput(dentry); | 2227 | dput(dentry); |
2240 | } | 2228 | } |
2241 | return ino; | 2229 | return ino; |
2242 | } | 2230 | } |
2243 | 2231 | ||
2244 | static __initdata unsigned long dhash_entries; | 2232 | static __initdata unsigned long dhash_entries; |
2245 | static int __init set_dhash_entries(char *str) | 2233 | static int __init set_dhash_entries(char *str) |
2246 | { | 2234 | { |
2247 | if (!str) | 2235 | if (!str) |
2248 | return 0; | 2236 | return 0; |
2249 | dhash_entries = simple_strtoul(str, &str, 0); | 2237 | dhash_entries = simple_strtoul(str, &str, 0); |
2250 | return 1; | 2238 | return 1; |
2251 | } | 2239 | } |
2252 | __setup("dhash_entries=", set_dhash_entries); | 2240 | __setup("dhash_entries=", set_dhash_entries); |
2253 | 2241 | ||
2254 | static void __init dcache_init_early(void) | 2242 | static void __init dcache_init_early(void) |
2255 | { | 2243 | { |
2256 | int loop; | 2244 | int loop; |
2257 | 2245 | ||
2258 | /* If hashes are distributed across NUMA nodes, defer | 2246 | /* If hashes are distributed across NUMA nodes, defer |
2259 | * hash allocation until vmalloc space is available. | 2247 | * hash allocation until vmalloc space is available. |
2260 | */ | 2248 | */ |
2261 | if (hashdist) | 2249 | if (hashdist) |
2262 | return; | 2250 | return; |
2263 | 2251 | ||
2264 | dentry_hashtable = | 2252 | dentry_hashtable = |
2265 | alloc_large_system_hash("Dentry cache", | 2253 | alloc_large_system_hash("Dentry cache", |
2266 | sizeof(struct hlist_head), | 2254 | sizeof(struct hlist_head), |
2267 | dhash_entries, | 2255 | dhash_entries, |
2268 | 13, | 2256 | 13, |
2269 | HASH_EARLY, | 2257 | HASH_EARLY, |
2270 | &d_hash_shift, | 2258 | &d_hash_shift, |
2271 | &d_hash_mask, | 2259 | &d_hash_mask, |
2272 | 0); | 2260 | 0); |
2273 | 2261 | ||
2274 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 2262 | for (loop = 0; loop < (1 << d_hash_shift); loop++) |
2275 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); | 2263 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); |
2276 | } | 2264 | } |
2277 | 2265 | ||
2278 | static void __init dcache_init(void) | 2266 | static void __init dcache_init(void) |
2279 | { | 2267 | { |
2280 | int loop; | 2268 | int loop; |
2281 | 2269 | ||
2282 | /* | 2270 | /* |
2283 | * A constructor could be added for stable state like the lists, | 2271 | * A constructor could be added for stable state like the lists, |
2284 | * but it is probably not worth it because of the cache nature | 2272 | * but it is probably not worth it because of the cache nature |
2285 | * of the dcache. | 2273 | * of the dcache. |
2286 | */ | 2274 | */ |
2287 | dentry_cache = KMEM_CACHE(dentry, | 2275 | dentry_cache = KMEM_CACHE(dentry, |
2288 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); | 2276 | SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); |
2289 | 2277 | ||
2290 | register_shrinker(&dcache_shrinker); | 2278 | register_shrinker(&dcache_shrinker); |
2291 | 2279 | ||
2292 | /* Hash may have been set up in dcache_init_early */ | 2280 | /* Hash may have been set up in dcache_init_early */ |
2293 | if (!hashdist) | 2281 | if (!hashdist) |
2294 | return; | 2282 | return; |
2295 | 2283 | ||
2296 | dentry_hashtable = | 2284 | dentry_hashtable = |
2297 | alloc_large_system_hash("Dentry cache", | 2285 | alloc_large_system_hash("Dentry cache", |
2298 | sizeof(struct hlist_head), | 2286 | sizeof(struct hlist_head), |
2299 | dhash_entries, | 2287 | dhash_entries, |
2300 | 13, | 2288 | 13, |
2301 | 0, | 2289 | 0, |
2302 | &d_hash_shift, | 2290 | &d_hash_shift, |
2303 | &d_hash_mask, | 2291 | &d_hash_mask, |
2304 | 0); | 2292 | 0); |
2305 | 2293 | ||
2306 | for (loop = 0; loop < (1 << d_hash_shift); loop++) | 2294 | for (loop = 0; loop < (1 << d_hash_shift); loop++) |
2307 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); | 2295 | INIT_HLIST_HEAD(&dentry_hashtable[loop]); |
2308 | } | 2296 | } |
2309 | 2297 | ||
2310 | /* SLAB cache for __getname() consumers */ | 2298 | /* SLAB cache for __getname() consumers */ |
2311 | struct kmem_cache *names_cachep __read_mostly; | 2299 | struct kmem_cache *names_cachep __read_mostly; |
2312 | 2300 | ||
2313 | EXPORT_SYMBOL(d_genocide); | 2301 | EXPORT_SYMBOL(d_genocide); |
2314 | 2302 | ||
2315 | void __init vfs_caches_init_early(void) | 2303 | void __init vfs_caches_init_early(void) |
2316 | { | 2304 | { |
2317 | dcache_init_early(); | 2305 | dcache_init_early(); |
2318 | inode_init_early(); | 2306 | inode_init_early(); |
2319 | } | 2307 | } |
2320 | 2308 | ||
2321 | void __init vfs_caches_init(unsigned long mempages) | 2309 | void __init vfs_caches_init(unsigned long mempages) |
2322 | { | 2310 | { |
2323 | unsigned long reserve; | 2311 | unsigned long reserve; |
2324 | 2312 | ||
2325 | /* Base hash sizes on available memory, with a reserve equal to | 2313 | /* Base hash sizes on available memory, with a reserve equal to |
2326 | 150% of current kernel size */ | 2314 | 150% of current kernel size */ |
2327 | 2315 | ||
2328 | reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); | 2316 | reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); |
2329 | mempages -= reserve; | 2317 | mempages -= reserve; |
2330 | 2318 | ||
2331 | names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, | 2319 | names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, |
2332 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 2320 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
2333 | 2321 | ||
2334 | dcache_init(); | 2322 | dcache_init(); |
2335 | inode_init(); | 2323 | inode_init(); |
2336 | files_init(mempages); | 2324 | files_init(mempages); |
2337 | mnt_init(); | 2325 | mnt_init(); |
2338 | bdev_cache_init(); | 2326 | bdev_cache_init(); |
2339 | chrdev_init(); | 2327 | chrdev_init(); |
2340 | } | 2328 | } |
2341 | 2329 | ||
2342 | EXPORT_SYMBOL(d_alloc); | 2330 | EXPORT_SYMBOL(d_alloc); |
2343 | EXPORT_SYMBOL(d_alloc_root); | 2331 | EXPORT_SYMBOL(d_alloc_root); |
2344 | EXPORT_SYMBOL(d_delete); | 2332 | EXPORT_SYMBOL(d_delete); |
2345 | EXPORT_SYMBOL(d_find_alias); | 2333 | EXPORT_SYMBOL(d_find_alias); |
2346 | EXPORT_SYMBOL(d_instantiate); | 2334 | EXPORT_SYMBOL(d_instantiate); |
2347 | EXPORT_SYMBOL(d_invalidate); | 2335 | EXPORT_SYMBOL(d_invalidate); |
2348 | EXPORT_SYMBOL(d_lookup); | 2336 | EXPORT_SYMBOL(d_lookup); |
2349 | EXPORT_SYMBOL(d_move); | 2337 | EXPORT_SYMBOL(d_move); |
2350 | EXPORT_SYMBOL_GPL(d_materialise_unique); | 2338 | EXPORT_SYMBOL_GPL(d_materialise_unique); |
2351 | EXPORT_SYMBOL(d_path); | 2339 | EXPORT_SYMBOL(d_path); |
2352 | EXPORT_SYMBOL(d_prune_aliases); | 2340 | EXPORT_SYMBOL(d_prune_aliases); |