Commit 9710a78e55fe29fa2d2f1a9cbd1d399797585fd9

Authored by H Hartley Sweeten
Committed by Linus Torvalds
1 parent 03ff3efb64

fs/notify/notification.c: make subsys_initcall function static

Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Mike Frysinger <vapier@gentoo.org>
Cc: Arun Sharma <asharma@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 1 additions and 2 deletions Inline Diff

fs/notify/notification.c
1 /* 1 /*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> 2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option) 6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version. 7 * any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to 15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */ 17 */
18 18
19 /* 19 /*
20 * Basic idea behind the notification queue: An fsnotify group (like inotify) 20 * Basic idea behind the notification queue: An fsnotify group (like inotify)
21 * sends the userspace notification about events asyncronously some time after 21 * sends the userspace notification about events asyncronously some time after
22 * the event happened. When inotify gets an event it will need to add that 22 * the event happened. When inotify gets an event it will need to add that
23 * event to the group notify queue. Since a single event might need to be on 23 * event to the group notify queue. Since a single event might need to be on
24 * multiple group's notification queues we can't add the event directly to each 24 * multiple group's notification queues we can't add the event directly to each
25 * queue and instead add a small "event_holder" to each queue. This event_holder 25 * queue and instead add a small "event_holder" to each queue. This event_holder
26 * has a pointer back to the original event. Since the majority of events are 26 * has a pointer back to the original event. Since the majority of events are
27 * going to end up on one, and only one, notification queue we embed one 27 * going to end up on one, and only one, notification queue we embed one
28 * event_holder into each event. This means we have a single allocation instead 28 * event_holder into each event. This means we have a single allocation instead
29 * of always needing two. If the embedded event_holder is already in use by 29 * of always needing two. If the embedded event_holder is already in use by
30 * another group a new event_holder (from fsnotify_event_holder_cachep) will be 30 * another group a new event_holder (from fsnotify_event_holder_cachep) will be
31 * allocated and used. 31 * allocated and used.
32 */ 32 */
33 33
34 #include <linux/fs.h> 34 #include <linux/fs.h>
35 #include <linux/init.h> 35 #include <linux/init.h>
36 #include <linux/kernel.h> 36 #include <linux/kernel.h>
37 #include <linux/list.h> 37 #include <linux/list.h>
38 #include <linux/module.h> 38 #include <linux/module.h>
39 #include <linux/mount.h> 39 #include <linux/mount.h>
40 #include <linux/mutex.h> 40 #include <linux/mutex.h>
41 #include <linux/namei.h> 41 #include <linux/namei.h>
42 #include <linux/path.h> 42 #include <linux/path.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include <linux/spinlock.h> 44 #include <linux/spinlock.h>
45 45
46 #include <linux/atomic.h> 46 #include <linux/atomic.h>
47 47
48 #include <linux/fsnotify_backend.h> 48 #include <linux/fsnotify_backend.h>
49 #include "fsnotify.h" 49 #include "fsnotify.h"
50 50
51 static struct kmem_cache *fsnotify_event_cachep; 51 static struct kmem_cache *fsnotify_event_cachep;
52 static struct kmem_cache *fsnotify_event_holder_cachep; 52 static struct kmem_cache *fsnotify_event_holder_cachep;
53 /* 53 /*
54 * This is a magic event we send when the q is too full. Since it doesn't 54 * This is a magic event we send when the q is too full. Since it doesn't
55 * hold real event information we just keep one system wide and use it any time 55 * hold real event information we just keep one system wide and use it any time
56 * it is needed. It's refcnt is set 1 at kernel init time and will never 56 * it is needed. It's refcnt is set 1 at kernel init time and will never
57 * get set to 0 so it will never get 'freed' 57 * get set to 0 so it will never get 'freed'
58 */ 58 */
59 static struct fsnotify_event *q_overflow_event; 59 static struct fsnotify_event *q_overflow_event;
60 static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); 60 static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61 61
62 /** 62 /**
63 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. 63 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64 * Called from fsnotify_move, which is inlined into filesystem modules. 64 * Called from fsnotify_move, which is inlined into filesystem modules.
65 */ 65 */
66 u32 fsnotify_get_cookie(void) 66 u32 fsnotify_get_cookie(void)
67 { 67 {
68 return atomic_inc_return(&fsnotify_sync_cookie); 68 return atomic_inc_return(&fsnotify_sync_cookie);
69 } 69 }
70 EXPORT_SYMBOL_GPL(fsnotify_get_cookie); 70 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
71 71
72 /* return true if the notify queue is empty, false otherwise */ 72 /* return true if the notify queue is empty, false otherwise */
73 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) 73 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
74 { 74 {
75 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 75 BUG_ON(!mutex_is_locked(&group->notification_mutex));
76 return list_empty(&group->notification_list) ? true : false; 76 return list_empty(&group->notification_list) ? true : false;
77 } 77 }
78 78
79 void fsnotify_get_event(struct fsnotify_event *event) 79 void fsnotify_get_event(struct fsnotify_event *event)
80 { 80 {
81 atomic_inc(&event->refcnt); 81 atomic_inc(&event->refcnt);
82 } 82 }
83 83
84 void fsnotify_put_event(struct fsnotify_event *event) 84 void fsnotify_put_event(struct fsnotify_event *event)
85 { 85 {
86 if (!event) 86 if (!event)
87 return; 87 return;
88 88
89 if (atomic_dec_and_test(&event->refcnt)) { 89 if (atomic_dec_and_test(&event->refcnt)) {
90 pr_debug("%s: event=%p\n", __func__, event); 90 pr_debug("%s: event=%p\n", __func__, event);
91 91
92 if (event->data_type == FSNOTIFY_EVENT_PATH) 92 if (event->data_type == FSNOTIFY_EVENT_PATH)
93 path_put(&event->path); 93 path_put(&event->path);
94 94
95 BUG_ON(!list_empty(&event->private_data_list)); 95 BUG_ON(!list_empty(&event->private_data_list));
96 96
97 kfree(event->file_name); 97 kfree(event->file_name);
98 put_pid(event->tgid); 98 put_pid(event->tgid);
99 kmem_cache_free(fsnotify_event_cachep, event); 99 kmem_cache_free(fsnotify_event_cachep, event);
100 } 100 }
101 } 101 }
102 102
103 struct fsnotify_event_holder *fsnotify_alloc_event_holder(void) 103 struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
104 { 104 {
105 return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL); 105 return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
106 } 106 }
107 107
108 void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder) 108 void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
109 { 109 {
110 if (holder) 110 if (holder)
111 kmem_cache_free(fsnotify_event_holder_cachep, holder); 111 kmem_cache_free(fsnotify_event_holder_cachep, holder);
112 } 112 }
113 113
114 /* 114 /*
115 * Find the private data that the group previously attached to this event when 115 * Find the private data that the group previously attached to this event when
116 * the group added the event to the notification queue (fsnotify_add_notify_event) 116 * the group added the event to the notification queue (fsnotify_add_notify_event)
117 */ 117 */
118 struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event) 118 struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event)
119 { 119 {
120 struct fsnotify_event_private_data *lpriv; 120 struct fsnotify_event_private_data *lpriv;
121 struct fsnotify_event_private_data *priv = NULL; 121 struct fsnotify_event_private_data *priv = NULL;
122 122
123 assert_spin_locked(&event->lock); 123 assert_spin_locked(&event->lock);
124 124
125 list_for_each_entry(lpriv, &event->private_data_list, event_list) { 125 list_for_each_entry(lpriv, &event->private_data_list, event_list) {
126 if (lpriv->group == group) { 126 if (lpriv->group == group) {
127 priv = lpriv; 127 priv = lpriv;
128 list_del(&priv->event_list); 128 list_del(&priv->event_list);
129 break; 129 break;
130 } 130 }
131 } 131 }
132 return priv; 132 return priv;
133 } 133 }
134 134
135 /* 135 /*
136 * Add an event to the group notification queue. The group can later pull this 136 * Add an event to the group notification queue. The group can later pull this
137 * event off the queue to deal with. If the event is successfully added to the 137 * event off the queue to deal with. If the event is successfully added to the
138 * group's notification queue, a reference is taken on event. 138 * group's notification queue, a reference is taken on event.
139 */ 139 */
140 struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, 140 struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
141 struct fsnotify_event_private_data *priv, 141 struct fsnotify_event_private_data *priv,
142 struct fsnotify_event *(*merge)(struct list_head *, 142 struct fsnotify_event *(*merge)(struct list_head *,
143 struct fsnotify_event *)) 143 struct fsnotify_event *))
144 { 144 {
145 struct fsnotify_event *return_event = NULL; 145 struct fsnotify_event *return_event = NULL;
146 struct fsnotify_event_holder *holder = NULL; 146 struct fsnotify_event_holder *holder = NULL;
147 struct list_head *list = &group->notification_list; 147 struct list_head *list = &group->notification_list;
148 148
149 pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv); 149 pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv);
150 150
151 /* 151 /*
152 * There is one fsnotify_event_holder embedded inside each fsnotify_event. 152 * There is one fsnotify_event_holder embedded inside each fsnotify_event.
153 * Check if we expect to be able to use that holder. If not alloc a new 153 * Check if we expect to be able to use that holder. If not alloc a new
154 * holder. 154 * holder.
155 * For the overflow event it's possible that something will use the in 155 * For the overflow event it's possible that something will use the in
156 * event holder before we get the lock so we may need to jump back and 156 * event holder before we get the lock so we may need to jump back and
157 * alloc a new holder, this can't happen for most events... 157 * alloc a new holder, this can't happen for most events...
158 */ 158 */
159 if (!list_empty(&event->holder.event_list)) { 159 if (!list_empty(&event->holder.event_list)) {
160 alloc_holder: 160 alloc_holder:
161 holder = fsnotify_alloc_event_holder(); 161 holder = fsnotify_alloc_event_holder();
162 if (!holder) 162 if (!holder)
163 return ERR_PTR(-ENOMEM); 163 return ERR_PTR(-ENOMEM);
164 } 164 }
165 165
166 mutex_lock(&group->notification_mutex); 166 mutex_lock(&group->notification_mutex);
167 167
168 if (group->q_len >= group->max_events) { 168 if (group->q_len >= group->max_events) {
169 event = q_overflow_event; 169 event = q_overflow_event;
170 170
171 /* 171 /*
172 * we need to return the overflow event 172 * we need to return the overflow event
173 * which means we need a ref 173 * which means we need a ref
174 */ 174 */
175 fsnotify_get_event(event); 175 fsnotify_get_event(event);
176 return_event = event; 176 return_event = event;
177 177
178 /* sorry, no private data on the overflow event */ 178 /* sorry, no private data on the overflow event */
179 priv = NULL; 179 priv = NULL;
180 } 180 }
181 181
182 if (!list_empty(list) && merge) { 182 if (!list_empty(list) && merge) {
183 struct fsnotify_event *tmp; 183 struct fsnotify_event *tmp;
184 184
185 tmp = merge(list, event); 185 tmp = merge(list, event);
186 if (tmp) { 186 if (tmp) {
187 mutex_unlock(&group->notification_mutex); 187 mutex_unlock(&group->notification_mutex);
188 188
189 if (return_event) 189 if (return_event)
190 fsnotify_put_event(return_event); 190 fsnotify_put_event(return_event);
191 if (holder != &event->holder) 191 if (holder != &event->holder)
192 fsnotify_destroy_event_holder(holder); 192 fsnotify_destroy_event_holder(holder);
193 return tmp; 193 return tmp;
194 } 194 }
195 } 195 }
196 196
197 spin_lock(&event->lock); 197 spin_lock(&event->lock);
198 198
199 if (list_empty(&event->holder.event_list)) { 199 if (list_empty(&event->holder.event_list)) {
200 if (unlikely(holder)) 200 if (unlikely(holder))
201 fsnotify_destroy_event_holder(holder); 201 fsnotify_destroy_event_holder(holder);
202 holder = &event->holder; 202 holder = &event->holder;
203 } else if (unlikely(!holder)) { 203 } else if (unlikely(!holder)) {
204 /* between the time we checked above and got the lock the in 204 /* between the time we checked above and got the lock the in
205 * event holder was used, go back and get a new one */ 205 * event holder was used, go back and get a new one */
206 spin_unlock(&event->lock); 206 spin_unlock(&event->lock);
207 mutex_unlock(&group->notification_mutex); 207 mutex_unlock(&group->notification_mutex);
208 208
209 if (return_event) { 209 if (return_event) {
210 fsnotify_put_event(return_event); 210 fsnotify_put_event(return_event);
211 return_event = NULL; 211 return_event = NULL;
212 } 212 }
213 213
214 goto alloc_holder; 214 goto alloc_holder;
215 } 215 }
216 216
217 group->q_len++; 217 group->q_len++;
218 holder->event = event; 218 holder->event = event;
219 219
220 fsnotify_get_event(event); 220 fsnotify_get_event(event);
221 list_add_tail(&holder->event_list, list); 221 list_add_tail(&holder->event_list, list);
222 if (priv) 222 if (priv)
223 list_add_tail(&priv->event_list, &event->private_data_list); 223 list_add_tail(&priv->event_list, &event->private_data_list);
224 spin_unlock(&event->lock); 224 spin_unlock(&event->lock);
225 mutex_unlock(&group->notification_mutex); 225 mutex_unlock(&group->notification_mutex);
226 226
227 wake_up(&group->notification_waitq); 227 wake_up(&group->notification_waitq);
228 return return_event; 228 return return_event;
229 } 229 }
230 230
231 /* 231 /*
232 * Remove and return the first event from the notification list. There is a 232 * Remove and return the first event from the notification list. There is a
233 * reference held on this event since it was on the list. It is the responsibility 233 * reference held on this event since it was on the list. It is the responsibility
234 * of the caller to drop this reference. 234 * of the caller to drop this reference.
235 */ 235 */
236 struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group) 236 struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
237 { 237 {
238 struct fsnotify_event *event; 238 struct fsnotify_event *event;
239 struct fsnotify_event_holder *holder; 239 struct fsnotify_event_holder *holder;
240 240
241 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 241 BUG_ON(!mutex_is_locked(&group->notification_mutex));
242 242
243 pr_debug("%s: group=%p\n", __func__, group); 243 pr_debug("%s: group=%p\n", __func__, group);
244 244
245 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); 245 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
246 246
247 event = holder->event; 247 event = holder->event;
248 248
249 spin_lock(&event->lock); 249 spin_lock(&event->lock);
250 holder->event = NULL; 250 holder->event = NULL;
251 list_del_init(&holder->event_list); 251 list_del_init(&holder->event_list);
252 spin_unlock(&event->lock); 252 spin_unlock(&event->lock);
253 253
254 /* event == holder means we are referenced through the in event holder */ 254 /* event == holder means we are referenced through the in event holder */
255 if (holder != &event->holder) 255 if (holder != &event->holder)
256 fsnotify_destroy_event_holder(holder); 256 fsnotify_destroy_event_holder(holder);
257 257
258 group->q_len--; 258 group->q_len--;
259 259
260 return event; 260 return event;
261 } 261 }
262 262
263 /* 263 /*
264 * This will not remove the event, that must be done with fsnotify_remove_notify_event() 264 * This will not remove the event, that must be done with fsnotify_remove_notify_event()
265 */ 265 */
266 struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group) 266 struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
267 { 267 {
268 struct fsnotify_event *event; 268 struct fsnotify_event *event;
269 struct fsnotify_event_holder *holder; 269 struct fsnotify_event_holder *holder;
270 270
271 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 271 BUG_ON(!mutex_is_locked(&group->notification_mutex));
272 272
273 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); 273 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
274 event = holder->event; 274 event = holder->event;
275 275
276 return event; 276 return event;
277 } 277 }
278 278
279 /* 279 /*
280 * Called when a group is being torn down to clean up any outstanding 280 * Called when a group is being torn down to clean up any outstanding
281 * event notifications. 281 * event notifications.
282 */ 282 */
283 void fsnotify_flush_notify(struct fsnotify_group *group) 283 void fsnotify_flush_notify(struct fsnotify_group *group)
284 { 284 {
285 struct fsnotify_event *event; 285 struct fsnotify_event *event;
286 struct fsnotify_event_private_data *priv; 286 struct fsnotify_event_private_data *priv;
287 287
288 mutex_lock(&group->notification_mutex); 288 mutex_lock(&group->notification_mutex);
289 while (!fsnotify_notify_queue_is_empty(group)) { 289 while (!fsnotify_notify_queue_is_empty(group)) {
290 event = fsnotify_remove_notify_event(group); 290 event = fsnotify_remove_notify_event(group);
291 /* if they don't implement free_event_priv they better not have attached any */ 291 /* if they don't implement free_event_priv they better not have attached any */
292 if (group->ops->free_event_priv) { 292 if (group->ops->free_event_priv) {
293 spin_lock(&event->lock); 293 spin_lock(&event->lock);
294 priv = fsnotify_remove_priv_from_event(group, event); 294 priv = fsnotify_remove_priv_from_event(group, event);
295 spin_unlock(&event->lock); 295 spin_unlock(&event->lock);
296 if (priv) 296 if (priv)
297 group->ops->free_event_priv(priv); 297 group->ops->free_event_priv(priv);
298 } 298 }
299 fsnotify_put_event(event); /* matches fsnotify_add_notify_event */ 299 fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
300 } 300 }
301 mutex_unlock(&group->notification_mutex); 301 mutex_unlock(&group->notification_mutex);
302 } 302 }
303 303
304 static void initialize_event(struct fsnotify_event *event) 304 static void initialize_event(struct fsnotify_event *event)
305 { 305 {
306 INIT_LIST_HEAD(&event->holder.event_list); 306 INIT_LIST_HEAD(&event->holder.event_list);
307 atomic_set(&event->refcnt, 1); 307 atomic_set(&event->refcnt, 1);
308 308
309 spin_lock_init(&event->lock); 309 spin_lock_init(&event->lock);
310 310
311 INIT_LIST_HEAD(&event->private_data_list); 311 INIT_LIST_HEAD(&event->private_data_list);
312 } 312 }
313 313
314 /* 314 /*
315 * Caller damn well better be holding whatever mutex is protecting the 315 * Caller damn well better be holding whatever mutex is protecting the
316 * old_holder->event_list and the new_event must be a clean event which 316 * old_holder->event_list and the new_event must be a clean event which
317 * cannot be found anywhere else in the kernel. 317 * cannot be found anywhere else in the kernel.
318 */ 318 */
319 int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, 319 int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
320 struct fsnotify_event *new_event) 320 struct fsnotify_event *new_event)
321 { 321 {
322 struct fsnotify_event *old_event = old_holder->event; 322 struct fsnotify_event *old_event = old_holder->event;
323 struct fsnotify_event_holder *new_holder = &new_event->holder; 323 struct fsnotify_event_holder *new_holder = &new_event->holder;
324 324
325 enum event_spinlock_class { 325 enum event_spinlock_class {
326 SPINLOCK_OLD, 326 SPINLOCK_OLD,
327 SPINLOCK_NEW, 327 SPINLOCK_NEW,
328 }; 328 };
329 329
330 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event); 330 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event);
331 331
332 /* 332 /*
333 * if the new_event's embedded holder is in use someone 333 * if the new_event's embedded holder is in use someone
334 * screwed up and didn't give us a clean new event. 334 * screwed up and didn't give us a clean new event.
335 */ 335 */
336 BUG_ON(!list_empty(&new_holder->event_list)); 336 BUG_ON(!list_empty(&new_holder->event_list));
337 337
338 spin_lock_nested(&old_event->lock, SPINLOCK_OLD); 338 spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
339 spin_lock_nested(&new_event->lock, SPINLOCK_NEW); 339 spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
340 340
341 new_holder->event = new_event; 341 new_holder->event = new_event;
342 list_replace_init(&old_holder->event_list, &new_holder->event_list); 342 list_replace_init(&old_holder->event_list, &new_holder->event_list);
343 343
344 spin_unlock(&new_event->lock); 344 spin_unlock(&new_event->lock);
345 spin_unlock(&old_event->lock); 345 spin_unlock(&old_event->lock);
346 346
347 /* event == holder means we are referenced through the in event holder */ 347 /* event == holder means we are referenced through the in event holder */
348 if (old_holder != &old_event->holder) 348 if (old_holder != &old_event->holder)
349 fsnotify_destroy_event_holder(old_holder); 349 fsnotify_destroy_event_holder(old_holder);
350 350
351 fsnotify_get_event(new_event); /* on the list take reference */ 351 fsnotify_get_event(new_event); /* on the list take reference */
352 fsnotify_put_event(old_event); /* off the list, drop reference */ 352 fsnotify_put_event(old_event); /* off the list, drop reference */
353 353
354 return 0; 354 return 0;
355 } 355 }
356 356
357 struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event) 357 struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
358 { 358 {
359 struct fsnotify_event *event; 359 struct fsnotify_event *event;
360 360
361 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); 361 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
362 if (!event) 362 if (!event)
363 return NULL; 363 return NULL;
364 364
365 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event); 365 pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event);
366 366
367 memcpy(event, old_event, sizeof(*event)); 367 memcpy(event, old_event, sizeof(*event));
368 initialize_event(event); 368 initialize_event(event);
369 369
370 if (event->name_len) { 370 if (event->name_len) {
371 event->file_name = kstrdup(old_event->file_name, GFP_KERNEL); 371 event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
372 if (!event->file_name) { 372 if (!event->file_name) {
373 kmem_cache_free(fsnotify_event_cachep, event); 373 kmem_cache_free(fsnotify_event_cachep, event);
374 return NULL; 374 return NULL;
375 } 375 }
376 } 376 }
377 event->tgid = get_pid(old_event->tgid); 377 event->tgid = get_pid(old_event->tgid);
378 if (event->data_type == FSNOTIFY_EVENT_PATH) 378 if (event->data_type == FSNOTIFY_EVENT_PATH)
379 path_get(&event->path); 379 path_get(&event->path);
380 380
381 return event; 381 return event;
382 } 382 }
383 383
384 /* 384 /*
385 * fsnotify_create_event - Allocate a new event which will be sent to each 385 * fsnotify_create_event - Allocate a new event which will be sent to each
386 * group's handle_event function if the group was interested in this 386 * group's handle_event function if the group was interested in this
387 * particular event. 387 * particular event.
388 * 388 *
389 * @to_tell the inode which is supposed to receive the event (sometimes a 389 * @to_tell the inode which is supposed to receive the event (sometimes a
390 * parent of the inode to which the event happened. 390 * parent of the inode to which the event happened.
391 * @mask what actually happened. 391 * @mask what actually happened.
392 * @data pointer to the object which was actually affected 392 * @data pointer to the object which was actually affected
393 * @data_type flag indication if the data is a file, path, inode, nothing... 393 * @data_type flag indication if the data is a file, path, inode, nothing...
394 * @name the filename, if available 394 * @name the filename, if available
395 */ 395 */
396 struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, 396 struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
397 int data_type, const unsigned char *name, 397 int data_type, const unsigned char *name,
398 u32 cookie, gfp_t gfp) 398 u32 cookie, gfp_t gfp)
399 { 399 {
400 struct fsnotify_event *event; 400 struct fsnotify_event *event;
401 401
402 event = kmem_cache_zalloc(fsnotify_event_cachep, gfp); 402 event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
403 if (!event) 403 if (!event)
404 return NULL; 404 return NULL;
405 405
406 pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n", 406 pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n",
407 __func__, event, to_tell, mask, data, data_type); 407 __func__, event, to_tell, mask, data, data_type);
408 408
409 initialize_event(event); 409 initialize_event(event);
410 410
411 if (name) { 411 if (name) {
412 event->file_name = kstrdup(name, gfp); 412 event->file_name = kstrdup(name, gfp);
413 if (!event->file_name) { 413 if (!event->file_name) {
414 kmem_cache_free(fsnotify_event_cachep, event); 414 kmem_cache_free(fsnotify_event_cachep, event);
415 return NULL; 415 return NULL;
416 } 416 }
417 event->name_len = strlen(event->file_name); 417 event->name_len = strlen(event->file_name);
418 } 418 }
419 419
420 event->tgid = get_pid(task_tgid(current)); 420 event->tgid = get_pid(task_tgid(current));
421 event->sync_cookie = cookie; 421 event->sync_cookie = cookie;
422 event->to_tell = to_tell; 422 event->to_tell = to_tell;
423 event->data_type = data_type; 423 event->data_type = data_type;
424 424
425 switch (data_type) { 425 switch (data_type) {
426 case FSNOTIFY_EVENT_PATH: { 426 case FSNOTIFY_EVENT_PATH: {
427 struct path *path = data; 427 struct path *path = data;
428 event->path.dentry = path->dentry; 428 event->path.dentry = path->dentry;
429 event->path.mnt = path->mnt; 429 event->path.mnt = path->mnt;
430 path_get(&event->path); 430 path_get(&event->path);
431 break; 431 break;
432 } 432 }
433 case FSNOTIFY_EVENT_INODE: 433 case FSNOTIFY_EVENT_INODE:
434 event->inode = data; 434 event->inode = data;
435 break; 435 break;
436 case FSNOTIFY_EVENT_NONE: 436 case FSNOTIFY_EVENT_NONE:
437 event->inode = NULL; 437 event->inode = NULL;
438 event->path.dentry = NULL; 438 event->path.dentry = NULL;
439 event->path.mnt = NULL; 439 event->path.mnt = NULL;
440 break; 440 break;
441 default: 441 default:
442 BUG(); 442 BUG();
443 } 443 }
444 444
445 event->mask = mask; 445 event->mask = mask;
446 446
447 return event; 447 return event;
448 } 448 }
449 449
450 __init int fsnotify_notification_init(void) 450 static __init int fsnotify_notification_init(void)
451 { 451 {
452 fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); 452 fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
453 fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); 453 fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
454 454
455 q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL, 455 q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
456 FSNOTIFY_EVENT_NONE, NULL, 0, 456 FSNOTIFY_EVENT_NONE, NULL, 0,
457 GFP_KERNEL); 457 GFP_KERNEL);
458 if (!q_overflow_event) 458 if (!q_overflow_event)
459 panic("unable to allocate fsnotify q_overflow_event\n"); 459 panic("unable to allocate fsnotify q_overflow_event\n");
460 460
461 return 0; 461 return 0;
462 } 462 }
463 subsys_initcall(fsnotify_notification_init); 463 subsys_initcall(fsnotify_notification_init);
464 464
465