Commit 3c5119c05d624f95f4967d16b38c9624b816bdb9
1 parent
c28f7e56e9
Exists in
master
and in
40 other branches
dnotify: reimplement dnotify using fsnotify
Reimplement dnotify using fsnotify. Signed-off-by: Eric Paris <eparis@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de>
Showing 7 changed files with 398 additions and 183 deletions Side-by-side Diff
MAINTAINERS
... | ... | @@ -1802,10 +1802,10 @@ |
1802 | 1802 | F: drivers/char/digi* |
1803 | 1803 | |
1804 | 1804 | DIRECTORY NOTIFICATION (DNOTIFY) |
1805 | -P: Stephen Rothwell | |
1806 | -M: sfr@canb.auug.org.au | |
1805 | +P: Eric Paris | |
1806 | +M: eparis@parisplace.org | |
1807 | 1807 | L: linux-kernel@vger.kernel.org |
1808 | -S: Supported | |
1808 | +S: Maintained | |
1809 | 1809 | F: Documentation/filesystems/dnotify.txt |
1810 | 1810 | F: fs/notify/dnotify/ |
1811 | 1811 | F: include/linux/dnotify.h |
fs/notify/dnotify/Kconfig
fs/notify/dnotify/dnotify.c
... | ... | @@ -3,6 +3,9 @@ |
3 | 3 | * |
4 | 4 | * Copyright (C) 2000,2001,2002 Stephen Rothwell |
5 | 5 | * |
6 | + * Copyright (C) 2009 Eric Paris <Red Hat Inc> | |
7 | + * dnotify was largly rewritten to use the new fsnotify infrastructure | |
8 | + * | |
6 | 9 | * This program is free software; you can redistribute it and/or modify it |
7 | 10 | * under the terms of the GNU General Public License as published by the |
8 | 11 | * Free Software Foundation; either version 2, or (at your option) any |
9 | 12 | |
10 | 13 | |
11 | 14 | |
12 | 15 | |
13 | 16 | |
14 | 17 | |
15 | 18 | |
16 | 19 | |
... | ... | @@ -21,24 +24,178 @@ |
21 | 24 | #include <linux/spinlock.h> |
22 | 25 | #include <linux/slab.h> |
23 | 26 | #include <linux/fdtable.h> |
27 | +#include <linux/fsnotify_backend.h> | |
24 | 28 | |
25 | 29 | int dir_notify_enable __read_mostly = 1; |
26 | 30 | |
27 | -static struct kmem_cache *dn_cache __read_mostly; | |
31 | +static struct kmem_cache *dnotify_struct_cache __read_mostly; | |
32 | +static struct kmem_cache *dnotify_mark_entry_cache __read_mostly; | |
33 | +static struct fsnotify_group *dnotify_group __read_mostly; | |
34 | +static DEFINE_MUTEX(dnotify_mark_mutex); | |
28 | 35 | |
29 | -static void redo_inode_mask(struct inode *inode) | |
36 | +/* | |
37 | + * dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which | |
38 | + * is being watched by dnotify. If multiple userspace applications are watching | |
39 | + * the same directory with dnotify their information is chained in dn | |
40 | + */ | |
41 | +struct dnotify_mark_entry { | |
42 | + struct fsnotify_mark_entry fsn_entry; | |
43 | + struct dnotify_struct *dn; | |
44 | +}; | |
45 | + | |
46 | +/* | |
47 | + * When a process starts or stops watching an inode the set of events which | |
48 | + * dnotify cares about for that inode may change. This function runs the | |
49 | + * list of everything receiving dnotify events about this directory and calculates | |
50 | + * the set of all those events. After it updates what dnotify is interested in | |
51 | + * it calls the fsnotify function so it can update the set of all events relevant | |
52 | + * to this inode. | |
53 | + */ | |
54 | +static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry) | |
30 | 55 | { |
31 | - unsigned long new_mask; | |
56 | + __u32 new_mask, old_mask; | |
32 | 57 | struct dnotify_struct *dn; |
58 | + struct dnotify_mark_entry *dnentry = container_of(entry, | |
59 | + struct dnotify_mark_entry, | |
60 | + fsn_entry); | |
33 | 61 | |
62 | + assert_spin_locked(&entry->lock); | |
63 | + | |
64 | + old_mask = entry->mask; | |
34 | 65 | new_mask = 0; |
35 | - for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) | |
36 | - new_mask |= dn->dn_mask & ~DN_MULTISHOT; | |
37 | - inode->i_dnotify_mask = new_mask; | |
66 | + for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next) | |
67 | + new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); | |
68 | + entry->mask = new_mask; | |
69 | + | |
70 | + if (old_mask == new_mask) | |
71 | + return; | |
72 | + | |
73 | + if (entry->inode) | |
74 | + fsnotify_recalc_inode_mask(entry->inode); | |
38 | 75 | } |
39 | 76 | |
77 | +/* | |
78 | + * Mains fsnotify call where events are delivered to dnotify. | |
79 | + * Find the dnotify mark on the relevant inode, run the list of dnotify structs | |
80 | + * on that mark and determine which of them has expressed interest in receiving | |
81 | + * events of this type. When found send the correct process and signal and | |
82 | + * destroy the dnotify struct if it was not registered to receive multiple | |
83 | + * events. | |
84 | + */ | |
85 | +static int dnotify_handle_event(struct fsnotify_group *group, | |
86 | + struct fsnotify_event *event) | |
87 | +{ | |
88 | + struct fsnotify_mark_entry *entry = NULL; | |
89 | + struct dnotify_mark_entry *dnentry; | |
90 | + struct inode *to_tell; | |
91 | + struct dnotify_struct *dn; | |
92 | + struct dnotify_struct **prev; | |
93 | + struct fown_struct *fown; | |
94 | + | |
95 | + to_tell = event->to_tell; | |
96 | + | |
97 | + spin_lock(&to_tell->i_lock); | |
98 | + entry = fsnotify_find_mark_entry(group, to_tell); | |
99 | + spin_unlock(&to_tell->i_lock); | |
100 | + | |
101 | + /* unlikely since we alreay passed dnotify_should_send_event() */ | |
102 | + if (unlikely(!entry)) | |
103 | + return 0; | |
104 | + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); | |
105 | + | |
106 | + spin_lock(&entry->lock); | |
107 | + prev = &dnentry->dn; | |
108 | + while ((dn = *prev) != NULL) { | |
109 | + if ((dn->dn_mask & event->mask) == 0) { | |
110 | + prev = &dn->dn_next; | |
111 | + continue; | |
112 | + } | |
113 | + fown = &dn->dn_filp->f_owner; | |
114 | + send_sigio(fown, dn->dn_fd, POLL_MSG); | |
115 | + if (dn->dn_mask & FS_DN_MULTISHOT) | |
116 | + prev = &dn->dn_next; | |
117 | + else { | |
118 | + *prev = dn->dn_next; | |
119 | + kmem_cache_free(dnotify_struct_cache, dn); | |
120 | + dnotify_recalc_inode_mask(entry); | |
121 | + } | |
122 | + } | |
123 | + | |
124 | + spin_unlock(&entry->lock); | |
125 | + fsnotify_put_mark(entry); | |
126 | + | |
127 | + return 0; | |
128 | +} | |
129 | + | |
130 | +/* | |
131 | + * Given an inode and mask determine if dnotify would be interested in sending | |
132 | + * userspace notification for that pair. | |
133 | + */ | |
134 | +static bool dnotify_should_send_event(struct fsnotify_group *group, | |
135 | + struct inode *inode, __u32 mask) | |
136 | +{ | |
137 | + struct fsnotify_mark_entry *entry; | |
138 | + bool send; | |
139 | + | |
140 | + /* !dir_notify_enable should never get here, don't waste time checking | |
141 | + if (!dir_notify_enable) | |
142 | + return 0; */ | |
143 | + | |
144 | + /* not a dir, dnotify doesn't care */ | |
145 | + if (!S_ISDIR(inode->i_mode)) | |
146 | + return false; | |
147 | + | |
148 | + spin_lock(&inode->i_lock); | |
149 | + entry = fsnotify_find_mark_entry(group, inode); | |
150 | + spin_unlock(&inode->i_lock); | |
151 | + | |
152 | + /* no mark means no dnotify watch */ | |
153 | + if (!entry) | |
154 | + return false; | |
155 | + | |
156 | + spin_lock(&entry->lock); | |
157 | + send = (mask & entry->mask) ? true : false; | |
158 | + spin_unlock(&entry->lock); | |
159 | + fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */ | |
160 | + | |
161 | + return send; | |
162 | +} | |
163 | + | |
164 | +static void dnotify_freeing_mark(struct fsnotify_mark_entry *entry, | |
165 | + struct fsnotify_group *group) | |
166 | +{ | |
167 | + /* dnotify doesn't care than an inode is on the way out */ | |
168 | +} | |
169 | + | |
170 | +static void dnotify_free_mark(struct fsnotify_mark_entry *entry) | |
171 | +{ | |
172 | + struct dnotify_mark_entry *dnentry = container_of(entry, | |
173 | + struct dnotify_mark_entry, | |
174 | + fsn_entry); | |
175 | + | |
176 | + BUG_ON(dnentry->dn); | |
177 | + | |
178 | + kmem_cache_free(dnotify_mark_entry_cache, dnentry); | |
179 | +} | |
180 | + | |
181 | +static struct fsnotify_ops dnotify_fsnotify_ops = { | |
182 | + .handle_event = dnotify_handle_event, | |
183 | + .should_send_event = dnotify_should_send_event, | |
184 | + .free_group_priv = NULL, | |
185 | + .freeing_mark = dnotify_freeing_mark, | |
186 | +}; | |
187 | + | |
188 | +/* | |
189 | + * Called every time a file is closed. Looks first for a dnotify mark on the | |
190 | + * inode. If one is found run all of the ->dn entries attached to that | |
191 | + * mark for one relevant to this process closing the file and remove that | |
192 | + * dnotify_struct. If that was the last dnotify_struct also remove the | |
193 | + * fsnotify_mark_entry. | |
194 | + */ | |
40 | 195 | void dnotify_flush(struct file *filp, fl_owner_t id) |
41 | 196 | { |
197 | + struct fsnotify_mark_entry *entry; | |
198 | + struct dnotify_mark_entry *dnentry; | |
42 | 199 | struct dnotify_struct *dn; |
43 | 200 | struct dnotify_struct **prev; |
44 | 201 | struct inode *inode; |
45 | 202 | |
46 | 203 | |
47 | 204 | |
48 | 205 | |
49 | 206 | |
50 | 207 | |
51 | 208 | |
52 | 209 | |
53 | 210 | |
54 | 211 | |
55 | 212 | |
56 | 213 | |
57 | 214 | |
58 | 215 | |
59 | 216 | |
60 | 217 | |
61 | 218 | |
62 | 219 | |
63 | 220 | |
64 | 221 | |
65 | 222 | |
66 | 223 | |
67 | 224 | |
68 | 225 | |
69 | 226 | |
... | ... | @@ -46,145 +203,243 @@ |
46 | 203 | inode = filp->f_path.dentry->d_inode; |
47 | 204 | if (!S_ISDIR(inode->i_mode)) |
48 | 205 | return; |
206 | + | |
49 | 207 | spin_lock(&inode->i_lock); |
50 | - prev = &inode->i_dnotify; | |
208 | + entry = fsnotify_find_mark_entry(dnotify_group, inode); | |
209 | + spin_unlock(&inode->i_lock); | |
210 | + if (!entry) | |
211 | + return; | |
212 | + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); | |
213 | + | |
214 | + mutex_lock(&dnotify_mark_mutex); | |
215 | + | |
216 | + spin_lock(&entry->lock); | |
217 | + prev = &dnentry->dn; | |
51 | 218 | while ((dn = *prev) != NULL) { |
52 | 219 | if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { |
53 | 220 | *prev = dn->dn_next; |
54 | - redo_inode_mask(inode); | |
55 | - kmem_cache_free(dn_cache, dn); | |
221 | + kmem_cache_free(dnotify_struct_cache, dn); | |
222 | + dnotify_recalc_inode_mask(entry); | |
56 | 223 | break; |
57 | 224 | } |
58 | 225 | prev = &dn->dn_next; |
59 | 226 | } |
60 | - spin_unlock(&inode->i_lock); | |
227 | + | |
228 | + spin_unlock(&entry->lock); | |
229 | + | |
230 | + /* nothing else could have found us thanks to the dnotify_mark_mutex */ | |
231 | + if (dnentry->dn == NULL) | |
232 | + fsnotify_destroy_mark_by_entry(entry); | |
233 | + | |
234 | + fsnotify_recalc_group_mask(dnotify_group); | |
235 | + | |
236 | + mutex_unlock(&dnotify_mark_mutex); | |
237 | + | |
238 | + fsnotify_put_mark(entry); | |
61 | 239 | } |
62 | 240 | |
241 | +/* this conversion is done only at watch creation */ | |
242 | +static __u32 convert_arg(unsigned long arg) | |
243 | +{ | |
244 | + __u32 new_mask = FS_EVENT_ON_CHILD; | |
245 | + | |
246 | + if (arg & DN_MULTISHOT) | |
247 | + new_mask |= FS_DN_MULTISHOT; | |
248 | + if (arg & DN_DELETE) | |
249 | + new_mask |= (FS_DELETE | FS_MOVED_FROM); | |
250 | + if (arg & DN_MODIFY) | |
251 | + new_mask |= FS_MODIFY; | |
252 | + if (arg & DN_ACCESS) | |
253 | + new_mask |= FS_ACCESS; | |
254 | + if (arg & DN_ATTRIB) | |
255 | + new_mask |= FS_ATTRIB; | |
256 | + if (arg & DN_RENAME) | |
257 | + new_mask |= FS_DN_RENAME; | |
258 | + if (arg & DN_CREATE) | |
259 | + new_mask |= (FS_CREATE | FS_MOVED_TO); | |
260 | + | |
261 | + return new_mask; | |
262 | +} | |
263 | + | |
264 | +/* | |
265 | + * If multiple processes watch the same inode with dnotify there is only one | |
266 | + * dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct | |
267 | + * onto that mark. This function either attaches the new dnotify_struct onto | |
268 | + * that list, or it |= the mask onto an existing dnofiy_struct. | |
269 | + */ | |
270 | +static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry, | |
271 | + fl_owner_t id, int fd, struct file *filp, __u32 mask) | |
272 | +{ | |
273 | + struct dnotify_struct *odn; | |
274 | + | |
275 | + odn = dnentry->dn; | |
276 | + while (odn != NULL) { | |
277 | + /* adding more events to existing dnofiy_struct? */ | |
278 | + if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { | |
279 | + odn->dn_fd = fd; | |
280 | + odn->dn_mask |= mask; | |
281 | + return -EEXIST; | |
282 | + } | |
283 | + odn = odn->dn_next; | |
284 | + } | |
285 | + | |
286 | + dn->dn_mask = mask; | |
287 | + dn->dn_fd = fd; | |
288 | + dn->dn_filp = filp; | |
289 | + dn->dn_owner = id; | |
290 | + dn->dn_next = dnentry->dn; | |
291 | + dnentry->dn = dn; | |
292 | + | |
293 | + return 0; | |
294 | +} | |
295 | + | |
296 | +/* | |
297 | + * When a process calls fcntl to attach a dnotify watch to a directory it ends | |
298 | + * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be | |
299 | + * attached to the fsnotify_mark. | |
300 | + */ | |
63 | 301 | int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) |
64 | 302 | { |
303 | + struct dnotify_mark_entry *new_dnentry, *dnentry; | |
304 | + struct fsnotify_mark_entry *new_entry, *entry; | |
65 | 305 | struct dnotify_struct *dn; |
66 | - struct dnotify_struct *odn; | |
67 | - struct dnotify_struct **prev; | |
68 | 306 | struct inode *inode; |
69 | 307 | fl_owner_t id = current->files; |
70 | 308 | struct file *f; |
71 | - int error = 0; | |
309 | + int destroy = 0, error = 0; | |
310 | + __u32 mask; | |
72 | 311 | |
312 | + /* we use these to tell if we need to kfree */ | |
313 | + new_entry = NULL; | |
314 | + dn = NULL; | |
315 | + | |
316 | + if (!dir_notify_enable) { | |
317 | + error = -EINVAL; | |
318 | + goto out_err; | |
319 | + } | |
320 | + | |
321 | + /* a 0 mask means we are explicitly removing the watch */ | |
73 | 322 | if ((arg & ~DN_MULTISHOT) == 0) { |
74 | 323 | dnotify_flush(filp, id); |
75 | - return 0; | |
324 | + error = 0; | |
325 | + goto out_err; | |
76 | 326 | } |
77 | - if (!dir_notify_enable) | |
78 | - return -EINVAL; | |
327 | + | |
328 | + /* dnotify only works on directories */ | |
79 | 329 | inode = filp->f_path.dentry->d_inode; |
80 | - if (!S_ISDIR(inode->i_mode)) | |
81 | - return -ENOTDIR; | |
82 | - dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); | |
83 | - if (dn == NULL) | |
84 | - return -ENOMEM; | |
85 | - spin_lock(&inode->i_lock); | |
86 | - prev = &inode->i_dnotify; | |
87 | - while ((odn = *prev) != NULL) { | |
88 | - if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { | |
89 | - odn->dn_fd = fd; | |
90 | - odn->dn_mask |= arg; | |
91 | - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; | |
92 | - goto out_free; | |
93 | - } | |
94 | - prev = &odn->dn_next; | |
330 | + if (!S_ISDIR(inode->i_mode)) { | |
331 | + error = -ENOTDIR; | |
332 | + goto out_err; | |
95 | 333 | } |
96 | 334 | |
97 | - rcu_read_lock(); | |
98 | - f = fcheck(fd); | |
99 | - rcu_read_unlock(); | |
100 | - /* we'd lost the race with close(), sod off silently */ | |
101 | - /* note that inode->i_lock prevents reordering problems | |
102 | - * between accesses to descriptor table and ->i_dnotify */ | |
103 | - if (f != filp) | |
104 | - goto out_free; | |
335 | + /* expect most fcntl to add new rather than augment old */ | |
336 | + dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); | |
337 | + if (!dn) { | |
338 | + error = -ENOMEM; | |
339 | + goto out_err; | |
340 | + } | |
105 | 341 | |
106 | - error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | |
107 | - if (error) | |
108 | - goto out_free; | |
342 | + /* new fsnotify mark, we expect most fcntl calls to add a new mark */ | |
343 | + new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL); | |
344 | + if (!new_dnentry) { | |
345 | + error = -ENOMEM; | |
346 | + goto out_err; | |
347 | + } | |
109 | 348 | |
110 | - dn->dn_mask = arg; | |
111 | - dn->dn_fd = fd; | |
112 | - dn->dn_filp = filp; | |
113 | - dn->dn_owner = id; | |
114 | - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; | |
115 | - dn->dn_next = inode->i_dnotify; | |
116 | - inode->i_dnotify = dn; | |
117 | - spin_unlock(&inode->i_lock); | |
118 | - return 0; | |
349 | + /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ | |
350 | + mask = convert_arg(arg); | |
119 | 351 | |
120 | -out_free: | |
121 | - spin_unlock(&inode->i_lock); | |
122 | - kmem_cache_free(dn_cache, dn); | |
123 | - return error; | |
124 | -} | |
352 | + /* set up the new_entry and new_dnentry */ | |
353 | + new_entry = &new_dnentry->fsn_entry; | |
354 | + fsnotify_init_mark(new_entry, dnotify_free_mark); | |
355 | + new_entry->mask = mask; | |
356 | + new_dnentry->dn = NULL; | |
125 | 357 | |
126 | -void __inode_dir_notify(struct inode *inode, unsigned long event) | |
127 | -{ | |
128 | - struct dnotify_struct * dn; | |
129 | - struct dnotify_struct **prev; | |
130 | - struct fown_struct * fown; | |
131 | - int changed = 0; | |
358 | + /* this is needed to prevent the fcntl/close race described below */ | |
359 | + mutex_lock(&dnotify_mark_mutex); | |
132 | 360 | |
361 | + /* add the new_entry or find an old one. */ | |
133 | 362 | spin_lock(&inode->i_lock); |
134 | - prev = &inode->i_dnotify; | |
135 | - while ((dn = *prev) != NULL) { | |
136 | - if ((dn->dn_mask & event) == 0) { | |
137 | - prev = &dn->dn_next; | |
138 | - continue; | |
139 | - } | |
140 | - fown = &dn->dn_filp->f_owner; | |
141 | - send_sigio(fown, dn->dn_fd, POLL_MSG); | |
142 | - if (dn->dn_mask & DN_MULTISHOT) | |
143 | - prev = &dn->dn_next; | |
144 | - else { | |
145 | - *prev = dn->dn_next; | |
146 | - changed = 1; | |
147 | - kmem_cache_free(dn_cache, dn); | |
148 | - } | |
149 | - } | |
150 | - if (changed) | |
151 | - redo_inode_mask(inode); | |
363 | + entry = fsnotify_find_mark_entry(dnotify_group, inode); | |
152 | 364 | spin_unlock(&inode->i_lock); |
153 | -} | |
365 | + if (entry) { | |
366 | + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); | |
367 | + spin_lock(&entry->lock); | |
368 | + } else { | |
369 | + fsnotify_add_mark(new_entry, dnotify_group, inode); | |
370 | + spin_lock(&new_entry->lock); | |
371 | + entry = new_entry; | |
372 | + dnentry = new_dnentry; | |
373 | + /* we used new_entry, so don't free it */ | |
374 | + new_entry = NULL; | |
375 | + } | |
154 | 376 | |
155 | -EXPORT_SYMBOL(__inode_dir_notify); | |
377 | + rcu_read_lock(); | |
378 | + f = fcheck(fd); | |
379 | + rcu_read_unlock(); | |
156 | 380 | |
157 | -/* | |
158 | - * This is hopelessly wrong, but unfixable without API changes. At | |
159 | - * least it doesn't oops the kernel... | |
160 | - * | |
161 | - * To safely access ->d_parent we need to keep d_move away from it. Use the | |
162 | - * dentry's d_lock for this. | |
163 | - */ | |
164 | -void dnotify_parent(struct dentry *dentry, unsigned long event) | |
165 | -{ | |
166 | - struct dentry *parent; | |
381 | + /* if (f != filp) means that we lost a race and another task/thread | |
382 | + * actually closed the fd we are still playing with before we grabbed | |
383 | + * the dnotify_mark_mutex and entry->lock. Since closing the fd is the | |
384 | + * only time we clean up the mark entries we need to get our mark off | |
385 | + * the list. */ | |
386 | + if (f != filp) { | |
387 | + /* if we added ourselves, shoot ourselves, it's possible that | |
388 | + * the flush actually did shoot this entry. That's fine too | |
389 | + * since multiple calls to destroy_mark is perfectly safe, if | |
390 | + * we found a dnentry already attached to the inode, just sod | |
391 | + * off silently as the flush at close time dealt with it. | |
392 | + */ | |
393 | + if (dnentry == new_dnentry) | |
394 | + destroy = 1; | |
395 | + goto out; | |
396 | + } | |
167 | 397 | |
168 | - if (!dir_notify_enable) | |
169 | - return; | |
170 | - | |
171 | - spin_lock(&dentry->d_lock); | |
172 | - parent = dentry->d_parent; | |
173 | - if (parent->d_inode->i_dnotify_mask & event) { | |
174 | - dget(parent); | |
175 | - spin_unlock(&dentry->d_lock); | |
176 | - __inode_dir_notify(parent->d_inode, event); | |
177 | - dput(parent); | |
178 | - } else { | |
179 | - spin_unlock(&dentry->d_lock); | |
398 | + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | |
399 | + if (error) { | |
400 | + /* if we added, we must shoot */ | |
401 | + if (dnentry == new_dnentry) | |
402 | + destroy = 1; | |
403 | + goto out; | |
180 | 404 | } |
405 | + | |
406 | + error = attach_dn(dn, dnentry, id, fd, filp, mask); | |
407 | + /* !error means that we attached the dn to the dnentry, so don't free it */ | |
408 | + if (!error) | |
409 | + dn = NULL; | |
410 | + /* -EEXIST means that we didn't add this new dn and used an old one. | |
411 | + * that isn't an error (and the unused dn should be freed) */ | |
412 | + else if (error == -EEXIST) | |
413 | + error = 0; | |
414 | + | |
415 | + dnotify_recalc_inode_mask(entry); | |
416 | +out: | |
417 | + spin_unlock(&entry->lock); | |
418 | + | |
419 | + if (destroy) | |
420 | + fsnotify_destroy_mark_by_entry(entry); | |
421 | + | |
422 | + fsnotify_recalc_group_mask(dnotify_group); | |
423 | + | |
424 | + mutex_unlock(&dnotify_mark_mutex); | |
425 | + fsnotify_put_mark(entry); | |
426 | +out_err: | |
427 | + if (new_entry) | |
428 | + fsnotify_put_mark(new_entry); | |
429 | + if (dn) | |
430 | + kmem_cache_free(dnotify_struct_cache, dn); | |
431 | + return error; | |
181 | 432 | } |
182 | -EXPORT_SYMBOL_GPL(dnotify_parent); | |
183 | 433 | |
184 | 434 | static int __init dnotify_init(void) |
185 | 435 | { |
186 | - dn_cache = kmem_cache_create("dnotify_cache", | |
187 | - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); | |
436 | + dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); | |
437 | + dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC); | |
438 | + | |
439 | + dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM, | |
440 | + 0, &dnotify_fsnotify_ops); | |
441 | + if (IS_ERR(dnotify_group)) | |
442 | + panic("unable to allocate fsnotify group for dnotify\n"); | |
188 | 443 | return 0; |
189 | 444 | } |
190 | 445 |
include/linux/dnotify.h
... | ... | @@ -10,7 +10,7 @@ |
10 | 10 | |
11 | 11 | struct dnotify_struct { |
12 | 12 | struct dnotify_struct * dn_next; |
13 | - unsigned long dn_mask; | |
13 | + __u32 dn_mask; | |
14 | 14 | int dn_fd; |
15 | 15 | struct file * dn_filp; |
16 | 16 | fl_owner_t dn_owner; |
17 | 17 | |
18 | 18 | |
19 | 19 | |
... | ... | @@ -21,23 +21,18 @@ |
21 | 21 | |
22 | 22 | #ifdef CONFIG_DNOTIFY |
23 | 23 | |
24 | -extern void __inode_dir_notify(struct inode *, unsigned long); | |
24 | +#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ | |
25 | + FS_MODIFY | FS_MODIFY_CHILD |\ | |
26 | + FS_ACCESS | FS_ACCESS_CHILD |\ | |
27 | + FS_ATTRIB | FS_ATTRIB_CHILD |\ | |
28 | + FS_CREATE | FS_DN_RENAME |\ | |
29 | + FS_MOVED_FROM | FS_MOVED_TO) | |
30 | + | |
25 | 31 | extern void dnotify_flush(struct file *, fl_owner_t); |
26 | 32 | extern int fcntl_dirnotify(int, struct file *, unsigned long); |
27 | -extern void dnotify_parent(struct dentry *, unsigned long); | |
28 | 33 | |
29 | -static inline void inode_dir_notify(struct inode *inode, unsigned long event) | |
30 | -{ | |
31 | - if (inode->i_dnotify_mask & (event)) | |
32 | - __inode_dir_notify(inode, event); | |
33 | -} | |
34 | - | |
35 | 34 | #else |
36 | 35 | |
37 | -static inline void __inode_dir_notify(struct inode *inode, unsigned long event) | |
38 | -{ | |
39 | -} | |
40 | - | |
41 | 36 | static inline void dnotify_flush(struct file *filp, fl_owner_t id) |
42 | 37 | { |
43 | 38 | } |
... | ... | @@ -45,14 +40,6 @@ |
45 | 40 | static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) |
46 | 41 | { |
47 | 42 | return -EINVAL; |
48 | -} | |
49 | - | |
50 | -static inline void dnotify_parent(struct dentry *dentry, unsigned long event) | |
51 | -{ | |
52 | -} | |
53 | - | |
54 | -static inline void inode_dir_notify(struct inode *inode, unsigned long event) | |
55 | -{ | |
56 | 43 | } |
57 | 44 | |
58 | 45 | #endif /* CONFIG_DNOTIFY */ |
include/linux/fs.h
... | ... | @@ -760,11 +760,6 @@ |
760 | 760 | struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ |
761 | 761 | #endif |
762 | 762 | |
763 | -#ifdef CONFIG_DNOTIFY | |
764 | - unsigned long i_dnotify_mask; /* Directory notify events */ | |
765 | - struct dnotify_struct *i_dnotify; /* for directory notifications */ | |
766 | -#endif | |
767 | - | |
768 | 763 | #ifdef CONFIG_INOTIFY |
769 | 764 | struct list_head inotify_watches; /* watches on this inode */ |
770 | 765 | struct mutex inotify_mutex; /* protects the watches list */ |
include/linux/fsnotify.h
... | ... | @@ -74,13 +74,7 @@ |
74 | 74 | __u32 new_dir_mask = 0; |
75 | 75 | |
76 | 76 | if (old_dir == new_dir) { |
77 | - inode_dir_notify(old_dir, DN_RENAME); | |
78 | 77 | old_dir_mask = FS_DN_RENAME; |
79 | - } else { | |
80 | - inode_dir_notify(old_dir, DN_DELETE); | |
81 | - old_dir_mask = FS_DELETE; | |
82 | - inode_dir_notify(new_dir, DN_CREATE); | |
83 | - new_dir_mask = FS_CREATE; | |
84 | 78 | } |
85 | 79 | |
86 | 80 | if (isdir) { |
... | ... | @@ -132,7 +126,6 @@ |
132 | 126 | |
133 | 127 | if (isdir) |
134 | 128 | mask |= FS_IN_ISDIR; |
135 | - dnotify_parent(dentry, DN_DELETE); | |
136 | 129 | |
137 | 130 | fsnotify_parent(dentry, mask); |
138 | 131 | } |
... | ... | @@ -154,7 +147,6 @@ |
154 | 147 | */ |
155 | 148 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) |
156 | 149 | { |
157 | - inode_dir_notify(inode, DN_CREATE); | |
158 | 150 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, |
159 | 151 | dentry->d_inode); |
160 | 152 | audit_inode_child(dentry->d_name.name, dentry, inode); |
... | ... | @@ -169,7 +161,6 @@ |
169 | 161 | */ |
170 | 162 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) |
171 | 163 | { |
172 | - inode_dir_notify(dir, DN_CREATE); | |
173 | 164 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, |
174 | 165 | inode); |
175 | 166 | fsnotify_link_count(inode); |
... | ... | @@ -186,7 +177,6 @@ |
186 | 177 | __u32 mask = (FS_CREATE | FS_IN_ISDIR); |
187 | 178 | struct inode *d_inode = dentry->d_inode; |
188 | 179 | |
189 | - inode_dir_notify(inode, DN_CREATE); | |
190 | 180 | inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); |
191 | 181 | audit_inode_child(dentry->d_name.name, dentry, inode); |
192 | 182 | |
... | ... | @@ -204,7 +194,6 @@ |
204 | 194 | if (S_ISDIR(inode->i_mode)) |
205 | 195 | mask |= FS_IN_ISDIR; |
206 | 196 | |
207 | - dnotify_parent(dentry, DN_ACCESS); | |
208 | 197 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
209 | 198 | |
210 | 199 | fsnotify_parent(dentry, mask); |
... | ... | @@ -222,7 +211,6 @@ |
222 | 211 | if (S_ISDIR(inode->i_mode)) |
223 | 212 | mask |= FS_IN_ISDIR; |
224 | 213 | |
225 | - dnotify_parent(dentry, DN_MODIFY); | |
226 | 214 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
227 | 215 | |
228 | 216 | fsnotify_parent(dentry, mask); |
229 | 217 | |
230 | 218 | |
231 | 219 | |
232 | 220 | |
233 | 221 | |
... | ... | @@ -289,47 +277,33 @@ |
289 | 277 | static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) |
290 | 278 | { |
291 | 279 | struct inode *inode = dentry->d_inode; |
292 | - int dn_mask = 0; | |
293 | - __u32 in_mask = 0; | |
280 | + __u32 mask = 0; | |
294 | 281 | |
295 | - if (ia_valid & ATTR_UID) { | |
296 | - in_mask |= FS_ATTRIB; | |
297 | - dn_mask |= DN_ATTRIB; | |
298 | - } | |
299 | - if (ia_valid & ATTR_GID) { | |
300 | - in_mask |= FS_ATTRIB; | |
301 | - dn_mask |= DN_ATTRIB; | |
302 | - } | |
303 | - if (ia_valid & ATTR_SIZE) { | |
304 | - in_mask |= FS_MODIFY; | |
305 | - dn_mask |= DN_MODIFY; | |
306 | - } | |
282 | + if (ia_valid & ATTR_UID) | |
283 | + mask |= FS_ATTRIB; | |
284 | + if (ia_valid & ATTR_GID) | |
285 | + mask |= FS_ATTRIB; | |
286 | + if (ia_valid & ATTR_SIZE) | |
287 | + mask |= FS_MODIFY; | |
288 | + | |
307 | 289 | /* both times implies a utime(s) call */ |
308 | 290 | if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) |
309 | - { | |
310 | - in_mask |= FS_ATTRIB; | |
311 | - dn_mask |= DN_ATTRIB; | |
312 | - } else if (ia_valid & ATTR_ATIME) { | |
313 | - in_mask |= FS_ACCESS; | |
314 | - dn_mask |= DN_ACCESS; | |
315 | - } else if (ia_valid & ATTR_MTIME) { | |
316 | - in_mask |= FS_MODIFY; | |
317 | - dn_mask |= DN_MODIFY; | |
318 | - } | |
319 | - if (ia_valid & ATTR_MODE) { | |
320 | - in_mask |= FS_ATTRIB; | |
321 | - dn_mask |= DN_ATTRIB; | |
322 | - } | |
291 | + mask |= FS_ATTRIB; | |
292 | + else if (ia_valid & ATTR_ATIME) | |
293 | + mask |= FS_ACCESS; | |
294 | + else if (ia_valid & ATTR_MTIME) | |
295 | + mask |= FS_MODIFY; | |
323 | 296 | |
324 | - if (dn_mask) | |
325 | - dnotify_parent(dentry, dn_mask); | |
326 | - if (in_mask) { | |
297 | + if (ia_valid & ATTR_MODE) | |
298 | + mask |= FS_ATTRIB; | |
299 | + | |
300 | + if (mask) { | |
327 | 301 | if (S_ISDIR(inode->i_mode)) |
328 | - in_mask |= FS_IN_ISDIR; | |
329 | - inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); | |
302 | + mask |= FS_IN_ISDIR; | |
303 | + inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | |
330 | 304 | |
331 | - fsnotify_parent(dentry, in_mask); | |
332 | - fsnotify(inode, in_mask, inode, FSNOTIFY_EVENT_INODE); | |
305 | + fsnotify_parent(dentry, mask); | |
306 | + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE); | |
333 | 307 | } |
334 | 308 | } |
335 | 309 |
include/linux/fsnotify_backend.h