Blame view
kernel/audit_tree.c
25.6 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
74c3cbe33 [PATCH] audit: wa... |
2 |
#include "audit.h" |
28a3a7eb3 audit: reimplemen... |
3 |
#include <linux/fsnotify_backend.h> |
74c3cbe33 [PATCH] audit: wa... |
4 5 |
#include <linux/namei.h> #include <linux/mount.h> |
916d75761 Fix rule eviction... |
6 |
#include <linux/kthread.h> |
9d2378f8c audit: convert au... |
7 |
#include <linux/refcount.h> |
5a0e3ad6a include cleanup: ... |
8 |
#include <linux/slab.h> |
74c3cbe33 [PATCH] audit: wa... |
9 10 11 12 13 |
struct audit_tree; struct audit_chunk; struct audit_tree { |
9d2378f8c audit: convert au... |
14 |
refcount_t count; |
74c3cbe33 [PATCH] audit: wa... |
15 16 17 18 19 20 21 22 23 24 25 26 |
int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct rcu_head head; char pathname[]; }; struct audit_chunk { struct list_head hash; |
8d20d6e93 audit: Embed key ... |
27 |
unsigned long key; |
5f5161300 audit: Allocate f... |
28 |
struct fsnotify_mark *mark; |
74c3cbe33 [PATCH] audit: wa... |
29 |
struct list_head trees; /* with root here */ |
74c3cbe33 [PATCH] audit: wa... |
30 |
int count; |
8f7b0ba1c Fix inotify watch... |
31 |
atomic_long_t refs; |
74c3cbe33 [PATCH] audit: wa... |
32 33 34 35 36 37 38 |
struct rcu_head head; struct node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ } owners[]; }; |
5f5161300 audit: Allocate f... |
39 40 41 42 |
struct audit_tree_mark { struct fsnotify_mark mark; struct audit_chunk *chunk; }; |
74c3cbe33 [PATCH] audit: wa... |
43 44 |
static LIST_HEAD(tree_list); static LIST_HEAD(prune_list); |
f1aaf2622 audit: move the t... |
45 |
static struct task_struct *prune_thread; |
74c3cbe33 [PATCH] audit: wa... |
46 47 |
/* |
83d23bc8a audit: Replace ch... |
48 49 50 51 52 53 54 55 56 |
* One struct chunk is attached to each inode of interest through * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / * untagging, the mark is stable as long as there is chunk attached. The * association between mark and chunk is protected by hash_lock and * audit_tree_group->mark_mutex. Thus as long as we hold * audit_tree_group->mark_mutex and check that the mark is alive by * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to * the current chunk. * |
74c3cbe33 [PATCH] audit: wa... |
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
* Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * |
83d23bc8a audit: Replace ch... |
75 76 77 78 79 80 |
* chunk is refcounted by embedded .refs. Mark associated with the chunk holds * one chunk reference. This reference is dropped either when a mark is going * to be freed (corresponding inode goes away) or when chunk attached to the * mark gets replaced. This reference must be dropped using * audit_mark_put_chunk() to make sure the reference is dropped only after RCU * grace period as it protects RCU readers of the hash table. |
74c3cbe33 [PATCH] audit: wa... |
81 82 83 84 85 86 |
* * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some. */ |
28a3a7eb3 audit: reimplemen... |
87 |
static struct fsnotify_group *audit_tree_group; |
5f5161300 audit: Allocate f... |
88 |
static struct kmem_cache *audit_tree_mark_cachep __read_mostly; |
74c3cbe33 [PATCH] audit: wa... |
89 90 91 92 93 94 95 |
static struct audit_tree *alloc_tree(const char *s) { struct audit_tree *tree; tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); if (tree) { |
9d2378f8c audit: convert au... |
96 |
refcount_set(&tree->count, 1); |
74c3cbe33 [PATCH] audit: wa... |
97 98 99 100 101 102 103 104 105 106 107 108 109 |
tree->goner = 0; INIT_LIST_HEAD(&tree->chunks); INIT_LIST_HEAD(&tree->rules); INIT_LIST_HEAD(&tree->list); INIT_LIST_HEAD(&tree->same_root); tree->root = NULL; strcpy(tree->pathname, s); } return tree; } static inline void get_tree(struct audit_tree *tree) { |
9d2378f8c audit: convert au... |
110 |
refcount_inc(&tree->count); |
74c3cbe33 [PATCH] audit: wa... |
111 |
} |
74c3cbe33 [PATCH] audit: wa... |
112 113 |
static inline void put_tree(struct audit_tree *tree) { |
9d2378f8c audit: convert au... |
114 |
if (refcount_dec_and_test(&tree->count)) |
3b097c469 audit_tree,rcu: C... |
115 |
kfree_rcu(tree, head); |
74c3cbe33 [PATCH] audit: wa... |
116 117 118 119 120 121 122 |
} /* to avoid bringing the entire thing in audit.h */ const char *audit_tree_path(struct audit_tree *tree) { return tree->pathname; } |
8f7b0ba1c Fix inotify watch... |
123 |
static void free_chunk(struct audit_chunk *chunk) |
74c3cbe33 [PATCH] audit: wa... |
124 |
{ |
74c3cbe33 [PATCH] audit: wa... |
125 126 127 128 129 130 131 132 |
int i; for (i = 0; i < chunk->count; i++) { if (chunk->owners[i].owner) put_tree(chunk->owners[i].owner); } kfree(chunk); } |
8f7b0ba1c Fix inotify watch... |
133 |
void audit_put_chunk(struct audit_chunk *chunk) |
74c3cbe33 [PATCH] audit: wa... |
134 |
{ |
8f7b0ba1c Fix inotify watch... |
135 136 |
if (atomic_long_dec_and_test(&chunk->refs)) free_chunk(chunk); |
74c3cbe33 [PATCH] audit: wa... |
137 |
} |
8f7b0ba1c Fix inotify watch... |
138 |
static void __put_chunk(struct rcu_head *rcu) |
74c3cbe33 [PATCH] audit: wa... |
139 |
{ |
8f7b0ba1c Fix inotify watch... |
140 141 |
struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); audit_put_chunk(chunk); |
74c3cbe33 [PATCH] audit: wa... |
142 |
} |
a8375713f audit: Provide he... |
143 144 145 146 147 148 149 150 151 |
/* * Drop reference to the chunk that was held by the mark. This is the reference * that gets dropped after we've removed the chunk from the hash table and we * use it to make sure chunk cannot be freed before RCU grace period expires. */ static void audit_mark_put_chunk(struct audit_chunk *chunk) { call_rcu(&chunk->head, __put_chunk); } |
f905c2fc3 audit: Use 'mark'... |
152 |
static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark) |
5f5161300 audit: Allocate f... |
153 |
{ |
f905c2fc3 audit: Use 'mark'... |
154 |
return container_of(mark, struct audit_tree_mark, mark); |
5f5161300 audit: Allocate f... |
155 156 157 158 159 160 |
} static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark) { return audit_mark(mark)->chunk; } |
f905c2fc3 audit: Use 'mark'... |
161 |
static void audit_tree_destroy_watch(struct fsnotify_mark *mark) |
28a3a7eb3 audit: reimplemen... |
162 |
{ |
f905c2fc3 audit: Use 'mark'... |
163 |
kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark)); |
5f5161300 audit: Allocate f... |
164 165 166 167 168 169 170 171 172 173 174 175 |
} static struct fsnotify_mark *alloc_mark(void) { struct audit_tree_mark *amark; amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL); if (!amark) return NULL; fsnotify_init_mark(&amark->mark, audit_tree_group); amark->mark.mask = FS_IN_IGNORED; return &amark->mark; |
28a3a7eb3 audit: reimplemen... |
176 177 178 179 180 |
} static struct audit_chunk *alloc_chunk(int count) { struct audit_chunk *chunk; |
28a3a7eb3 audit: reimplemen... |
181 |
int i; |
bbccc11bc audit: Use struct... |
182 |
chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); |
28a3a7eb3 audit: reimplemen... |
183 184 185 186 187 188 189 190 191 192 193 |
if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->hash); INIT_LIST_HEAD(&chunk->trees); chunk->count = count; atomic_long_set(&chunk->refs, 1); for (i = 0; i < count; i++) { INIT_LIST_HEAD(&chunk->owners[i].list); chunk->owners[i].index = i; } |
28a3a7eb3 audit: reimplemen... |
194 195 |
return chunk; } |
74c3cbe33 [PATCH] audit: wa... |
196 197 198 |
enum {HASH_SIZE = 128}; static struct list_head chunk_hash_heads[HASH_SIZE]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); |
f410ff655 audit: Abstract h... |
199 200 |
/* Function to return search key in our hash from inode. */ static unsigned long inode_to_key(const struct inode *inode) |
74c3cbe33 [PATCH] audit: wa... |
201 |
{ |
36f10f55f fsnotify: let con... |
202 203 |
/* Use address pointed to by connector->obj as the key */ return (unsigned long)&inode->i_fsnotify_marks; |
f410ff655 audit: Abstract h... |
204 |
} |
f410ff655 audit: Abstract h... |
205 206 207 |
static inline struct list_head *chunk_hash(unsigned long key) { unsigned long n = key / L1_CACHE_BYTES; |
74c3cbe33 [PATCH] audit: wa... |
208 209 |
return chunk_hash_heads + n % HASH_SIZE; } |
f905c2fc3 audit: Use 'mark'... |
210 |
/* hash_lock & mark->group->mark_mutex is held by caller */ |
74c3cbe33 [PATCH] audit: wa... |
211 212 |
static void insert_hash(struct audit_chunk *chunk) { |
28a3a7eb3 audit: reimplemen... |
213 |
struct list_head *list; |
1635e5722 audit: Make hash ... |
214 215 216 217 218 219 |
/* * Make sure chunk is fully initialized before making it visible in the * hash. Pairs with a data dependency barrier in READ_ONCE() in * audit_tree_lookup(). */ smp_wmb(); |
8d20d6e93 audit: Embed key ... |
220 221 |
WARN_ON_ONCE(!chunk->key); list = chunk_hash(chunk->key); |
74c3cbe33 [PATCH] audit: wa... |
222 223 224 225 226 227 |
list_add_rcu(&chunk->hash, list); } /* called under rcu_read_lock */ struct audit_chunk *audit_tree_lookup(const struct inode *inode) { |
f410ff655 audit: Abstract h... |
228 229 |
unsigned long key = inode_to_key(inode); struct list_head *list = chunk_hash(key); |
6793a051f [PATCH] list_for_... |
230 |
struct audit_chunk *p; |
74c3cbe33 [PATCH] audit: wa... |
231 |
|
6793a051f [PATCH] list_for_... |
232 |
list_for_each_entry_rcu(p, list, hash) { |
1635e5722 audit: Make hash ... |
233 234 235 236 237 |
/* * We use a data dependency barrier in READ_ONCE() to make sure * the chunk we see is fully initialized. */ if (READ_ONCE(p->key) == key) { |
8f7b0ba1c Fix inotify watch... |
238 |
atomic_long_inc(&p->refs); |
74c3cbe33 [PATCH] audit: wa... |
239 240 241 242 243 |
return p; } } return NULL; } |
6f1b5d7af audit: audit_tree... |
244 |
bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
74c3cbe33 [PATCH] audit: wa... |
245 246 247 248 |
{ int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) |
6f1b5d7af audit: audit_tree... |
249 250 |
return true; return false; |
74c3cbe33 [PATCH] audit: wa... |
251 252 253 |
} /* tagging and untagging inodes with trees */ |
8f7b0ba1c Fix inotify watch... |
254 255 256 257 258 259 |
static struct audit_chunk *find_chunk(struct node *p) { int index = p->index & ~(1U<<31); p -= index; return container_of(p, struct audit_chunk, owners[0]); } |
f905c2fc3 audit: Use 'mark'... |
260 |
static void replace_mark_chunk(struct fsnotify_mark *mark, |
83d23bc8a audit: Replace ch... |
261 262 263 264 265 |
struct audit_chunk *chunk) { struct audit_chunk *old; assert_spin_locked(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
266 267 |
old = mark_chunk(mark); audit_mark(mark)->chunk = chunk; |
83d23bc8a audit: Replace ch... |
268 |
if (chunk) |
f905c2fc3 audit: Use 'mark'... |
269 |
chunk->mark = mark; |
83d23bc8a audit: Replace ch... |
270 271 272 |
if (old) old->mark = NULL; } |
c22fcde77 audit: Drop all u... |
273 |
static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old) |
d31b326d3 audit: Factor out... |
274 275 276 277 278 279 280 281 282 |
{ struct audit_tree *owner; int i, j; new->key = old->key; list_splice_init(&old->trees, &new->trees); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; for (i = j = 0; j < old->count; i++, j++) { |
c22fcde77 audit: Drop all u... |
283 |
if (!old->owners[j].owner) { |
d31b326d3 audit: Factor out... |
284 285 286 287 288 289 290 291 292 293 294 |
i--; continue; } owner = old->owners[j].owner; new->owners[i].owner = owner; new->owners[i].index = old->owners[j].index - j + i; if (!owner) /* result of earlier fallback */ continue; get_tree(owner); list_replace_init(&old->owners[j].list, &new->owners[i].list); } |
83d23bc8a audit: Replace ch... |
295 |
replace_mark_chunk(old->mark, new); |
d31b326d3 audit: Factor out... |
296 297 298 299 300 301 302 303 |
/* * Make sure chunk is fully initialized before making it visible in the * hash. Pairs with a data dependency barrier in READ_ONCE() in * audit_tree_lookup(). */ smp_wmb(); list_replace_rcu(&old->hash, &new->hash); } |
49a4ee7d9 audit: Guarantee ... |
304 305 306 307 308 309 310 311 312 313 314 315 |
static void remove_chunk_node(struct audit_chunk *chunk, struct node *p) { struct audit_tree *owner = p->owner; if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); } |
c22fcde77 audit: Drop all u... |
316 317 318 319 320 321 322 323 324 325 |
static int chunk_count_trees(struct audit_chunk *chunk) { int i; int ret = 0; for (i = 0; i < chunk->count; i++) if (chunk->owners[i].owner) ret++; return ret; } |
f905c2fc3 audit: Use 'mark'... |
326 |
static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark) |
74c3cbe33 [PATCH] audit: wa... |
327 |
{ |
8432c7006 audit: Simplify l... |
328 |
struct audit_chunk *new; |
c22fcde77 audit: Drop all u... |
329 |
int size; |
74c3cbe33 [PATCH] audit: wa... |
330 |
|
8432c7006 audit: Simplify l... |
331 |
mutex_lock(&audit_tree_group->mark_mutex); |
6b3f05d24 fsnotify: Detach ... |
332 |
/* |
83d23bc8a audit: Replace ch... |
333 334 |
* mark_mutex stabilizes chunk attached to the mark so we can check * whether it didn't change while we've dropped hash_lock. |
6b3f05d24 fsnotify: Detach ... |
335 |
*/ |
f905c2fc3 audit: Use 'mark'... |
336 337 |
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) || mark_chunk(mark) != chunk) |
8432c7006 audit: Simplify l... |
338 |
goto out_mutex; |
74c3cbe33 [PATCH] audit: wa... |
339 |
|
c22fcde77 audit: Drop all u... |
340 |
size = chunk_count_trees(chunk); |
74c3cbe33 [PATCH] audit: wa... |
341 |
if (!size) { |
74c3cbe33 [PATCH] audit: wa... |
342 343 |
spin_lock(&hash_lock); list_del_init(&chunk->trees); |
74c3cbe33 [PATCH] audit: wa... |
344 |
list_del_rcu(&chunk->hash); |
f905c2fc3 audit: Use 'mark'... |
345 |
replace_mark_chunk(mark, NULL); |
74c3cbe33 [PATCH] audit: wa... |
346 |
spin_unlock(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
347 |
fsnotify_detach_mark(mark); |
8432c7006 audit: Simplify l... |
348 |
mutex_unlock(&audit_tree_group->mark_mutex); |
83d23bc8a audit: Replace ch... |
349 |
audit_mark_put_chunk(chunk); |
f905c2fc3 audit: Use 'mark'... |
350 |
fsnotify_free_mark(mark); |
8432c7006 audit: Simplify l... |
351 |
return; |
74c3cbe33 [PATCH] audit: wa... |
352 |
} |
c22fcde77 audit: Drop all u... |
353 |
new = alloc_chunk(size); |
74c3cbe33 [PATCH] audit: wa... |
354 |
if (!new) |
49a4ee7d9 audit: Guarantee ... |
355 |
goto out_mutex; |
f7a998a94 in untag_chunk() ... |
356 |
|
74c3cbe33 [PATCH] audit: wa... |
357 |
spin_lock(&hash_lock); |
1635e5722 audit: Make hash ... |
358 |
/* |
d31b326d3 audit: Factor out... |
359 360 |
* This has to go last when updating chunk as once replace_chunk() is * called, new RCU readers can see the new chunk. |
1635e5722 audit: Make hash ... |
361 |
*/ |
c22fcde77 audit: Drop all u... |
362 |
replace_chunk(new, chunk); |
74c3cbe33 [PATCH] audit: wa... |
363 |
spin_unlock(&hash_lock); |
8432c7006 audit: Simplify l... |
364 |
mutex_unlock(&audit_tree_group->mark_mutex); |
83d23bc8a audit: Replace ch... |
365 |
audit_mark_put_chunk(chunk); |
8432c7006 audit: Simplify l... |
366 |
return; |
74c3cbe33 [PATCH] audit: wa... |
367 |
|
49a4ee7d9 audit: Guarantee ... |
368 |
out_mutex: |
8432c7006 audit: Simplify l... |
369 |
mutex_unlock(&audit_tree_group->mark_mutex); |
74c3cbe33 [PATCH] audit: wa... |
370 |
} |
a5789b07b audit: Fix possib... |
371 |
/* Call with group->mark_mutex held, releases it */ |
74c3cbe33 [PATCH] audit: wa... |
372 373 |
static int create_chunk(struct inode *inode, struct audit_tree *tree) { |
f905c2fc3 audit: Use 'mark'... |
374 |
struct fsnotify_mark *mark; |
74c3cbe33 [PATCH] audit: wa... |
375 |
struct audit_chunk *chunk = alloc_chunk(1); |
a5789b07b audit: Fix possib... |
376 377 378 |
if (!chunk) { mutex_unlock(&audit_tree_group->mark_mutex); |
74c3cbe33 [PATCH] audit: wa... |
379 |
return -ENOMEM; |
a5789b07b audit: Fix possib... |
380 |
} |
74c3cbe33 [PATCH] audit: wa... |
381 |
|
f905c2fc3 audit: Use 'mark'... |
382 383 |
mark = alloc_mark(); if (!mark) { |
83d23bc8a audit: Replace ch... |
384 385 386 387 |
mutex_unlock(&audit_tree_group->mark_mutex); kfree(chunk); return -ENOMEM; } |
f905c2fc3 audit: Use 'mark'... |
388 |
if (fsnotify_add_inode_mark_locked(mark, inode, 0)) { |
a5789b07b audit: Fix possib... |
389 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
390 |
fsnotify_put_mark(mark); |
83d23bc8a audit: Replace ch... |
391 |
kfree(chunk); |
74c3cbe33 [PATCH] audit: wa... |
392 393 |
return -ENOSPC; } |
74c3cbe33 [PATCH] audit: wa... |
394 395 396 |
spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
397 |
fsnotify_detach_mark(mark); |
a5789b07b audit: Fix possib... |
398 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
399 400 |
fsnotify_free_mark(mark); fsnotify_put_mark(mark); |
83d23bc8a audit: Replace ch... |
401 |
kfree(chunk); |
74c3cbe33 [PATCH] audit: wa... |
402 403 |
return 0; } |
f905c2fc3 audit: Use 'mark'... |
404 |
replace_mark_chunk(mark, chunk); |
74c3cbe33 [PATCH] audit: wa... |
405 406 407 408 409 410 411 412 |
chunk->owners[0].index = (1U << 31); chunk->owners[0].owner = tree; get_tree(tree); list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } |
8d20d6e93 audit: Embed key ... |
413 |
chunk->key = inode_to_key(inode); |
1635e5722 audit: Make hash ... |
414 415 416 417 |
/* * Inserting into the hash table has to go last as once we do that RCU * readers can see the chunk. */ |
74c3cbe33 [PATCH] audit: wa... |
418 419 |
insert_hash(chunk); spin_unlock(&hash_lock); |
a5789b07b audit: Fix possib... |
420 |
mutex_unlock(&audit_tree_group->mark_mutex); |
83d23bc8a audit: Replace ch... |
421 422 423 424 425 |
/* * Drop our initial reference. When mark we point to is getting freed, * we get notification through ->freeing_mark callback and cleanup * chunk pointing to this mark. */ |
f905c2fc3 audit: Use 'mark'... |
426 |
fsnotify_put_mark(mark); |
74c3cbe33 [PATCH] audit: wa... |
427 428 429 430 431 432 |
return 0; } /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { |
f905c2fc3 audit: Use 'mark'... |
433 |
struct fsnotify_mark *mark; |
74c3cbe33 [PATCH] audit: wa... |
434 435 436 |
struct audit_chunk *chunk, *old; struct node *p; int n; |
a5789b07b audit: Fix possib... |
437 |
mutex_lock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
438 439 |
mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group); if (!mark) |
74c3cbe33 [PATCH] audit: wa... |
440 |
return create_chunk(inode, tree); |
83d23bc8a audit: Replace ch... |
441 442 443 444 445 |
/* * Found mark is guaranteed to be attached and mark_mutex protects mark * from getting detached and thus it makes sure there is chunk attached * to the mark. */ |
74c3cbe33 [PATCH] audit: wa... |
446 447 |
/* are we already there? */ spin_lock(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
448 |
old = mark_chunk(mark); |
74c3cbe33 [PATCH] audit: wa... |
449 450 451 |
for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); |
a5789b07b audit: Fix possib... |
452 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
453 |
fsnotify_put_mark(mark); |
74c3cbe33 [PATCH] audit: wa... |
454 455 456 457 458 459 |
return 0; } } spin_unlock(&hash_lock); chunk = alloc_chunk(old->count + 1); |
b4c30aad3 fix more leaks in... |
460 |
if (!chunk) { |
a5789b07b audit: Fix possib... |
461 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
462 |
fsnotify_put_mark(mark); |
74c3cbe33 [PATCH] audit: wa... |
463 |
return -ENOMEM; |
b4c30aad3 fix more leaks in... |
464 |
} |
74c3cbe33 [PATCH] audit: wa... |
465 |
|
74c3cbe33 [PATCH] audit: wa... |
466 467 468 |
spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); |
a5789b07b audit: Fix possib... |
469 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
470 |
fsnotify_put_mark(mark); |
83d23bc8a audit: Replace ch... |
471 |
kfree(chunk); |
74c3cbe33 [PATCH] audit: wa... |
472 473 |
return 0; } |
d31b326d3 audit: Factor out... |
474 |
p = &chunk->owners[chunk->count - 1]; |
74c3cbe33 [PATCH] audit: wa... |
475 476 477 478 |
p->index = (chunk->count - 1) | (1U<<31); p->owner = tree; get_tree(tree); list_add(&p->list, &tree->chunks); |
74c3cbe33 [PATCH] audit: wa... |
479 480 481 482 |
if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } |
1635e5722 audit: Make hash ... |
483 |
/* |
d31b326d3 audit: Factor out... |
484 485 |
* This has to go last when updating chunk as once replace_chunk() is * called, new RCU readers can see the new chunk. |
1635e5722 audit: Make hash ... |
486 |
*/ |
c22fcde77 audit: Drop all u... |
487 |
replace_chunk(chunk, old); |
74c3cbe33 [PATCH] audit: wa... |
488 |
spin_unlock(&hash_lock); |
a5789b07b audit: Fix possib... |
489 |
mutex_unlock(&audit_tree_group->mark_mutex); |
f905c2fc3 audit: Use 'mark'... |
490 |
fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */ |
83d23bc8a audit: Replace ch... |
491 |
audit_mark_put_chunk(old); |
74c3cbe33 [PATCH] audit: wa... |
492 493 |
return 0; } |
9e36a5d49 audit: hand taken... |
494 495 |
static void audit_tree_log_remove_rule(struct audit_context *context, struct audit_krule *rule) |
0644ec0cc audit: catch poss... |
496 497 |
{ struct audit_buffer *ab; |
65a8766f5 audit: check audi... |
498 499 |
if (!audit_enabled) return; |
9e36a5d49 audit: hand taken... |
500 |
ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
0644ec0cc audit: catch poss... |
501 502 |
if (unlikely(!ab)) return; |
d0a3f18a7 audit: minimize o... |
503 |
audit_log_format(ab, "op=remove_rule dir="); |
0644ec0cc audit: catch poss... |
504 505 506 507 508 |
audit_log_untrustedstring(ab, rule->tree->pathname); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); } |
9e36a5d49 audit: hand taken... |
509 |
static void kill_rules(struct audit_context *context, struct audit_tree *tree) |
74c3cbe33 [PATCH] audit: wa... |
510 511 512 |
{ struct audit_krule *rule, *next; struct audit_entry *entry; |
74c3cbe33 [PATCH] audit: wa... |
513 514 515 516 517 518 519 |
list_for_each_entry_safe(rule, next, &tree->rules, rlist) { entry = container_of(rule, struct audit_entry, rule); list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */ |
9e36a5d49 audit: hand taken... |
520 |
audit_tree_log_remove_rule(context, rule); |
34d99af52 audit: implement ... |
521 522 |
if (entry->rule.exe) audit_remove_mark(entry->rule.exe); |
74c3cbe33 [PATCH] audit: wa... |
523 524 |
rule->tree = NULL; list_del_rcu(&entry->list); |
e45aa212e audit rules order... |
525 |
list_del(&entry->rule.list); |
74c3cbe33 [PATCH] audit: wa... |
526 527 528 529 530 531 |
call_rcu(&entry->rcu, audit_free_rule_rcu); } } } /* |
8432c7006 audit: Simplify l... |
532 533 534 |
* Remove tree from chunks. If 'tagged' is set, remove tree only from tagged * chunks. The function expects tagged chunks are all at the beginning of the * chunks list. |
74c3cbe33 [PATCH] audit: wa... |
535 |
*/ |
8432c7006 audit: Simplify l... |
536 |
static void prune_tree_chunks(struct audit_tree *victim, bool tagged) |
74c3cbe33 [PATCH] audit: wa... |
537 538 539 540 |
{ spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct node *p; |
8432c7006 audit: Simplify l... |
541 542 543 544 545 546 547 548 549 550 |
struct audit_chunk *chunk; struct fsnotify_mark *mark; p = list_first_entry(&victim->chunks, struct node, list); /* have we run out of marked? */ if (tagged && !(p->index & (1U<<31))) break; chunk = find_chunk(p); mark = chunk->mark; remove_chunk_node(chunk, p); |
83d23bc8a audit: Replace ch... |
551 552 553 |
/* Racing with audit_tree_freeing_mark()? */ if (!mark) continue; |
8432c7006 audit: Simplify l... |
554 555 |
fsnotify_get_mark(mark); spin_unlock(&hash_lock); |
74c3cbe33 [PATCH] audit: wa... |
556 |
|
8432c7006 audit: Simplify l... |
557 558 |
untag_chunk(chunk, mark); fsnotify_put_mark(mark); |
74c3cbe33 [PATCH] audit: wa... |
559 |
|
8432c7006 audit: Simplify l... |
560 |
spin_lock(&hash_lock); |
74c3cbe33 [PATCH] audit: wa... |
561 562 563 564 |
} spin_unlock(&hash_lock); put_tree(victim); } |
8432c7006 audit: Simplify l... |
565 566 567 568 569 570 571 |
/* * finish killing struct audit_tree */ static void prune_one(struct audit_tree *victim) { prune_tree_chunks(victim, false); } |
74c3cbe33 [PATCH] audit: wa... |
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 |
/* trim the uncommitted chunks from tree */ static void trim_marked(struct audit_tree *tree) { struct list_head *p, *q; spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); return; } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { struct node *node = list_entry(p, struct node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); list_add(p, &tree->chunks); } } |
8432c7006 audit: Simplify l... |
591 |
spin_unlock(&hash_lock); |
74c3cbe33 [PATCH] audit: wa... |
592 |
|
8432c7006 audit: Simplify l... |
593 |
prune_tree_chunks(tree, true); |
74c3cbe33 [PATCH] audit: wa... |
594 |
|
8432c7006 audit: Simplify l... |
595 |
spin_lock(&hash_lock); |
74c3cbe33 [PATCH] audit: wa... |
596 597 598 599 |
if (!tree->root && !tree->goner) { tree->goner = 1; spin_unlock(&hash_lock); mutex_lock(&audit_filter_mutex); |
9e36a5d49 audit: hand taken... |
600 |
kill_rules(audit_context(), tree); |
74c3cbe33 [PATCH] audit: wa... |
601 602 603 604 605 606 607 |
list_del_init(&tree->list); mutex_unlock(&audit_filter_mutex); prune_one(tree); } else { spin_unlock(&hash_lock); } } |
916d75761 Fix rule eviction... |
608 |
static void audit_schedule_prune(void); |
74c3cbe33 [PATCH] audit: wa... |
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 |
/* called with audit_filter_mutex */ int audit_remove_tree_rule(struct audit_krule *rule) { struct audit_tree *tree; tree = rule->tree; if (tree) { spin_lock(&hash_lock); list_del_init(&rule->rlist); if (list_empty(&tree->rules) && !tree->goner) { tree->root = NULL; list_del_init(&tree->same_root); tree->goner = 1; list_move(&tree->list, &prune_list); rule->tree = NULL; spin_unlock(&hash_lock); audit_schedule_prune(); return 1; } rule->tree = NULL; spin_unlock(&hash_lock); return 1; } return 0; } |
1f707137b new helper: itera... |
633 634 |
static int compare_root(struct vfsmount *mnt, void *arg) { |
f410ff655 audit: Abstract h... |
635 636 |
return inode_to_key(d_backing_inode(mnt->mnt_root)) == (unsigned long)arg; |
1f707137b new helper: itera... |
637 |
} |
74c3cbe33 [PATCH] audit: wa... |
638 639 640 641 642 643 644 645 |
void audit_trim_trees(void) { struct list_head cursor; mutex_lock(&audit_filter_mutex); list_add(&cursor, &tree_list); while (cursor.next != &tree_list) { struct audit_tree *tree; |
98bc993f9 [PATCH] get rid o... |
646 |
struct path path; |
74c3cbe33 [PATCH] audit: wa... |
647 648 |
struct vfsmount *root_mnt; struct node *node; |
74c3cbe33 [PATCH] audit: wa... |
649 650 651 652 653 654 655 |
int err; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); |
98bc993f9 [PATCH] get rid o... |
656 |
err = kern_path(tree->pathname, 0, &path); |
74c3cbe33 [PATCH] audit: wa... |
657 658 |
if (err) goto skip_it; |
589ff870e Switch collect_mo... |
659 |
root_mnt = collect_mounts(&path); |
98bc993f9 [PATCH] get rid o... |
660 |
path_put(&path); |
be34d1a3b VFS: Make clone_m... |
661 |
if (IS_ERR(root_mnt)) |
74c3cbe33 [PATCH] audit: wa... |
662 |
goto skip_it; |
74c3cbe33 [PATCH] audit: wa... |
663 664 |
spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { |
28a3a7eb3 audit: reimplemen... |
665 |
struct audit_chunk *chunk = find_chunk(node); |
25985edce Fix common misspe... |
666 |
/* this could be NULL if the watch is dying else where... */ |
74c3cbe33 [PATCH] audit: wa... |
667 |
node->index |= 1U<<31; |
f410ff655 audit: Abstract h... |
668 |
if (iterate_mounts(compare_root, |
8d20d6e93 audit: Embed key ... |
669 |
(void *)(chunk->key), |
f410ff655 audit: Abstract h... |
670 |
root_mnt)) |
1f707137b new helper: itera... |
671 |
node->index &= ~(1U<<31); |
74c3cbe33 [PATCH] audit: wa... |
672 673 674 |
} spin_unlock(&hash_lock); trim_marked(tree); |
74c3cbe33 [PATCH] audit: wa... |
675 676 |
drop_collected_mounts(root_mnt); skip_it: |
12b2f117f kernel/audit_tree... |
677 |
put_tree(tree); |
74c3cbe33 [PATCH] audit: wa... |
678 679 680 681 682 |
mutex_lock(&audit_filter_mutex); } list_del(&cursor); mutex_unlock(&audit_filter_mutex); } |
74c3cbe33 [PATCH] audit: wa... |
683 684 685 686 687 |
int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) { if (pathname[0] != '/' || rule->listnr != AUDIT_FILTER_EXIT || |
5af75d8d5 audit: validate c... |
688 |
op != Audit_equal || |
74c3cbe33 [PATCH] audit: wa... |
689 690 691 692 693 694 695 696 697 698 699 700 |
rule->inode_f || rule->watch || rule->tree) return -EINVAL; rule->tree = alloc_tree(pathname); if (!rule->tree) return -ENOMEM; return 0; } void audit_put_tree(struct audit_tree *tree) { put_tree(tree); } |
1f707137b new helper: itera... |
701 702 |
static int tag_mount(struct vfsmount *mnt, void *arg) { |
3b362157b VFS: audit: d_bac... |
703 |
return tag_chunk(d_backing_inode(mnt->mnt_root), arg); |
1f707137b new helper: itera... |
704 |
} |
f1aaf2622 audit: move the t... |
705 706 707 708 709 710 711 |
/* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread. */ static int prune_tree_thread(void *unused) { for (;;) { |
0bf676d1f audit: cleanup pr... |
712 713 |
if (list_empty(&prune_list)) { set_current_state(TASK_INTERRUPTIBLE); |
f1aaf2622 audit: move the t... |
714 |
schedule(); |
0bf676d1f audit: cleanup pr... |
715 |
} |
f1aaf2622 audit: move the t... |
716 |
|
ce423631c audit: track the ... |
717 |
audit_ctl_lock(); |
f1aaf2622 audit: move the t... |
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 |
mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { struct audit_tree *victim; victim = list_entry(prune_list.next, struct audit_tree, list); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); |
ce423631c audit: track the ... |
735 |
audit_ctl_unlock(); |
f1aaf2622 audit: move the t... |
736 737 738 739 740 741 742 743 |
} return 0; } static int audit_launch_prune(void) { if (prune_thread) return 0; |
0bf676d1f audit: cleanup pr... |
744 |
prune_thread = kthread_run(prune_tree_thread, NULL, |
f1aaf2622 audit: move the t... |
745 746 747 748 749 |
"audit_prune_tree"); if (IS_ERR(prune_thread)) { pr_err("cannot start thread audit_prune_tree"); prune_thread = NULL; return -ENOMEM; |
f1aaf2622 audit: move the t... |
750 |
} |
0bf676d1f audit: cleanup pr... |
751 |
return 0; |
f1aaf2622 audit: move the t... |
752 |
} |
74c3cbe33 [PATCH] audit: wa... |
753 754 755 756 |
/* called with audit_filter_mutex */ int audit_add_tree_rule(struct audit_krule *rule) { struct audit_tree *seed = rule->tree, *tree; |
98bc993f9 [PATCH] get rid o... |
757 |
struct path path; |
1f707137b new helper: itera... |
758 |
struct vfsmount *mnt; |
74c3cbe33 [PATCH] audit: wa... |
759 |
int err; |
736f3203a kernel/audit_tree... |
760 |
rule->tree = NULL; |
74c3cbe33 [PATCH] audit: wa... |
761 762 763 764 765 766 767 768 769 770 771 772 773 |
list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); rule->tree = tree; list_add(&rule->rlist, &tree->rules); return 0; } } tree = seed; list_add(&tree->list, &tree_list); list_add(&rule->rlist, &tree->rules); /* do not set rule->tree yet */ mutex_unlock(&audit_filter_mutex); |
f1aaf2622 audit: move the t... |
774 775 776 777 778 |
if (unlikely(!prune_thread)) { err = audit_launch_prune(); if (err) goto Err; } |
98bc993f9 [PATCH] get rid o... |
779 |
err = kern_path(tree->pathname, 0, &path); |
74c3cbe33 [PATCH] audit: wa... |
780 781 |
if (err) goto Err; |
589ff870e Switch collect_mo... |
782 |
mnt = collect_mounts(&path); |
98bc993f9 [PATCH] get rid o... |
783 |
path_put(&path); |
be34d1a3b VFS: Make clone_m... |
784 785 |
if (IS_ERR(mnt)) { err = PTR_ERR(mnt); |
74c3cbe33 [PATCH] audit: wa... |
786 787 |
goto Err; } |
74c3cbe33 [PATCH] audit: wa... |
788 789 |
get_tree(tree); |
1f707137b new helper: itera... |
790 |
err = iterate_mounts(tag_mount, tree, mnt); |
74c3cbe33 [PATCH] audit: wa... |
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 |
drop_collected_mounts(mnt); if (!err) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); goto Err; } mutex_lock(&audit_filter_mutex); if (list_empty(&rule->rlist)) { put_tree(tree); return -ENOENT; } rule->tree = tree; put_tree(tree); return 0; Err: mutex_lock(&audit_filter_mutex); list_del_init(&tree->list); list_del_init(&tree->rules); put_tree(tree); return err; } int audit_tag_tree(char *old, char *new) { struct list_head cursor, barrier; int failed = 0; |
2096f759a New helper: path_... |
825 |
struct path path1, path2; |
74c3cbe33 [PATCH] audit: wa... |
826 |
struct vfsmount *tagged; |
74c3cbe33 [PATCH] audit: wa... |
827 |
int err; |
2096f759a New helper: path_... |
828 |
err = kern_path(new, 0, &path2); |
74c3cbe33 [PATCH] audit: wa... |
829 830 |
if (err) return err; |
2096f759a New helper: path_... |
831 832 |
tagged = collect_mounts(&path2); path_put(&path2); |
be34d1a3b VFS: Make clone_m... |
833 834 |
if (IS_ERR(tagged)) return PTR_ERR(tagged); |
74c3cbe33 [PATCH] audit: wa... |
835 |
|
2096f759a New helper: path_... |
836 |
err = kern_path(old, 0, &path1); |
74c3cbe33 [PATCH] audit: wa... |
837 838 839 840 |
if (err) { drop_collected_mounts(tagged); return err; } |
74c3cbe33 [PATCH] audit: wa... |
841 |
|
74c3cbe33 [PATCH] audit: wa... |
842 843 844 845 846 847 |
mutex_lock(&audit_filter_mutex); list_add(&barrier, &tree_list); list_add(&cursor, &barrier); while (cursor.next != &tree_list) { struct audit_tree *tree; |
2096f759a New helper: path_... |
848 |
int good_one = 0; |
74c3cbe33 [PATCH] audit: wa... |
849 850 851 852 853 854 |
tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); |
2096f759a New helper: path_... |
855 856 857 858 |
err = kern_path(tree->pathname, 0, &path2); if (!err) { good_one = path_is_under(&path1, &path2); path_put(&path2); |
74c3cbe33 [PATCH] audit: wa... |
859 |
} |
2096f759a New helper: path_... |
860 |
if (!good_one) { |
74c3cbe33 [PATCH] audit: wa... |
861 862 863 864 |
put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } |
74c3cbe33 [PATCH] audit: wa... |
865 |
|
1f707137b new helper: itera... |
866 |
failed = iterate_mounts(tag_mount, tree, tagged); |
74c3cbe33 [PATCH] audit: wa... |
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 |
if (failed) { put_tree(tree); mutex_lock(&audit_filter_mutex); break; } mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { list_del(&tree->list); list_add(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); } while (barrier.prev != &tree_list) { struct audit_tree *tree; tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); list_del(&tree->list); list_add(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); } put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&barrier); list_del(&cursor); |
74c3cbe33 [PATCH] audit: wa... |
907 |
mutex_unlock(&audit_filter_mutex); |
2096f759a New helper: path_... |
908 |
path_put(&path1); |
74c3cbe33 [PATCH] audit: wa... |
909 910 911 |
drop_collected_mounts(tagged); return failed; } |
916d75761 Fix rule eviction... |
912 913 914 |
static void audit_schedule_prune(void) { |
f1aaf2622 audit: move the t... |
915 |
wake_up_process(prune_thread); |
916d75761 Fix rule eviction... |
916 917 918 919 920 921 |
} /* * ... and that one is done if evict_chunk() decides to delay until the end * of syscall. Runs synchronously. */ |
9e36a5d49 audit: hand taken... |
922 |
void audit_kill_trees(struct audit_context *context) |
916d75761 Fix rule eviction... |
923 |
{ |
9e36a5d49 audit: hand taken... |
924 |
struct list_head *list = &context->killed_trees; |
ce423631c audit: track the ... |
925 |
audit_ctl_lock(); |
916d75761 Fix rule eviction... |
926 927 928 929 930 931 |
mutex_lock(&audit_filter_mutex); while (!list_empty(list)) { struct audit_tree *victim; victim = list_entry(list->next, struct audit_tree, list); |
9e36a5d49 audit: hand taken... |
932 |
kill_rules(context, victim); |
916d75761 Fix rule eviction... |
933 934 935 936 937 938 939 940 941 942 |
list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); |
ce423631c audit: track the ... |
943 |
audit_ctl_unlock(); |
74c3cbe33 [PATCH] audit: wa... |
944 945 946 947 948 |
} /* * Here comes the stuff asynchronous to auditctl operations */ |
74c3cbe33 [PATCH] audit: wa... |
949 950 951 |
static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; |
916d75761 Fix rule eviction... |
952 953 |
struct list_head *postponed = audit_killed_trees(); int need_prune = 0; |
74c3cbe33 [PATCH] audit: wa... |
954 |
int n; |
74c3cbe33 [PATCH] audit: wa... |
955 956 957 958 959 960 961 962 963 |
mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); while (!list_empty(&chunk->trees)) { owner = list_entry(chunk->trees.next, struct audit_tree, same_root); owner->goner = 1; owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); |
916d75761 Fix rule eviction... |
964 |
if (!postponed) { |
9e36a5d49 audit: hand taken... |
965 |
kill_rules(audit_context(), owner); |
916d75761 Fix rule eviction... |
966 967 968 969 970 |
list_move(&owner->list, &prune_list); need_prune = 1; } else { list_move(&owner->list, postponed); } |
74c3cbe33 [PATCH] audit: wa... |
971 972 973 974 975 976 |
spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); |
f1aaf2622 audit: move the t... |
977 |
mutex_unlock(&audit_filter_mutex); |
916d75761 Fix rule eviction... |
978 979 |
if (need_prune) audit_schedule_prune(); |
74c3cbe33 [PATCH] audit: wa... |
980 |
} |
b9a1b9772 fsnotify: create ... |
981 982 |
static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask, struct inode *inode, struct inode *dir, |
c9be99c86 fsnotify: general... |
983 |
const struct qstr *file_name, u32 cookie) |
74c3cbe33 [PATCH] audit: wa... |
984 |
{ |
83c4c4b0a fsnotify: remove ... |
985 |
return 0; |
28a3a7eb3 audit: reimplemen... |
986 |
} |
74c3cbe33 [PATCH] audit: wa... |
987 |
|
f905c2fc3 audit: Use 'mark'... |
988 989 |
static void audit_tree_freeing_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) |
28a3a7eb3 audit: reimplemen... |
990 |
{ |
83d23bc8a audit: Replace ch... |
991 |
struct audit_chunk *chunk; |
28a3a7eb3 audit: reimplemen... |
992 |
|
f905c2fc3 audit: Use 'mark'... |
993 |
mutex_lock(&mark->group->mark_mutex); |
83d23bc8a audit: Replace ch... |
994 |
spin_lock(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
995 996 |
chunk = mark_chunk(mark); replace_mark_chunk(mark, NULL); |
83d23bc8a audit: Replace ch... |
997 |
spin_unlock(&hash_lock); |
f905c2fc3 audit: Use 'mark'... |
998 |
mutex_unlock(&mark->group->mark_mutex); |
83d23bc8a audit: Replace ch... |
999 1000 1001 1002 |
if (chunk) { evict_chunk(chunk); audit_mark_put_chunk(chunk); } |
b3e8692b4 audit: clean up r... |
1003 1004 1005 1006 1007 |
/* * We are guaranteed to have at least one reference to the mark from * either the inode or the caller of fsnotify_destroy_mark(). */ |
f905c2fc3 audit: Use 'mark'... |
1008 |
BUG_ON(refcount_read(&mark->refcnt) < 1); |
74c3cbe33 [PATCH] audit: wa... |
1009 |
} |
28a3a7eb3 audit: reimplemen... |
1010 |
static const struct fsnotify_ops audit_tree_ops = { |
b9a1b9772 fsnotify: create ... |
1011 |
.handle_inode_event = audit_tree_handle_event, |
28a3a7eb3 audit: reimplemen... |
1012 |
.freeing_mark = audit_tree_freeing_mark, |
054c636e5 fsnotify: Move ->... |
1013 |
.free_mark = audit_tree_destroy_watch, |
74c3cbe33 [PATCH] audit: wa... |
1014 1015 1016 1017 1018 |
}; static int __init audit_tree_init(void) { int i; |
5f5161300 audit: Allocate f... |
1019 |
audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC); |
0d2e2a1d0 fsnotify: drop ma... |
1020 |
audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
28a3a7eb3 audit: reimplemen... |
1021 1022 |
if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); |
74c3cbe33 [PATCH] audit: wa... |
1023 1024 1025 1026 1027 1028 1029 |
for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); return 0; } __initcall(audit_tree_init); |