1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/fsnotify_backend.h>
4 #include <linux/namei.h>
5 #include <linux/mount.h>
6 #include <linux/kthread.h>
7 #include <linux/refcount.h>
8 #include <linux/slab.h>
16 struct audit_chunk
*root
;
17 struct list_head chunks
;
18 struct list_head rules
;
19 struct list_head list
;
20 struct list_head same_root
;
26 struct list_head hash
;
27 struct fsnotify_mark mark
;
28 struct list_head trees
; /* with root here */
34 struct list_head list
;
35 struct audit_tree
*owner
;
36 unsigned index
; /* index; upper bit indicates 'will prune' */
40 static LIST_HEAD(tree_list
);
41 static LIST_HEAD(prune_list
);
42 static struct task_struct
*prune_thread
;
45 * One struct chunk is attached to each inode of interest.
46 * We replace struct chunk on tagging/untagging.
47 * Rules have pointer to struct audit_tree.
48 * Rules have struct list_head rlist forming a list of rules over
50 * References to struct chunk are collected at audit_inode{,_child}()
51 * time and used in AUDIT_TREE rule matching.
52 * These references are dropped at the same time we are calling
53 * audit_free_names(), etc.
55 * Cyclic lists galore:
56 * tree.chunks anchors chunk.owners[].list hash_lock
57 * tree.rules anchors rule.rlist audit_filter_mutex
58 * chunk.trees anchors tree.same_root hash_lock
59 * chunk.hash is a hash with middle bits of watch.inode as
60 * a hash function. RCU, hash_lock
62 * tree is refcounted; one reference for "some rules on rules_list refer to
63 * it", one for each chunk with pointer to it.
65 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
66 * of watch contributes 1 to .refs).
68 * node.index allows to get from node.list to containing chunk.
69 * MSB of that sucker is stolen to mark taggings that we might have to
70 * revert - several operations have very unpleasant cleanup logics and
71 * that makes a difference. Some.
74 static struct fsnotify_group
*audit_tree_group
;
76 static struct audit_tree
*alloc_tree(const char *s
)
78 struct audit_tree
*tree
;
80 tree
= kmalloc(sizeof(struct audit_tree
) + strlen(s
) + 1, GFP_KERNEL
);
82 refcount_set(&tree
->count
, 1);
84 INIT_LIST_HEAD(&tree
->chunks
);
85 INIT_LIST_HEAD(&tree
->rules
);
86 INIT_LIST_HEAD(&tree
->list
);
87 INIT_LIST_HEAD(&tree
->same_root
);
89 strcpy(tree
->pathname
, s
);
94 static inline void get_tree(struct audit_tree
*tree
)
96 refcount_inc(&tree
->count
);
99 static inline void put_tree(struct audit_tree
*tree
)
101 if (refcount_dec_and_test(&tree
->count
))
102 kfree_rcu(tree
, head
);
105 /* to avoid bringing the entire thing in audit.h */
106 const char *audit_tree_path(struct audit_tree
*tree
)
108 return tree
->pathname
;
111 static void free_chunk(struct audit_chunk
*chunk
)
115 for (i
= 0; i
< chunk
->count
; i
++) {
116 if (chunk
->owners
[i
].owner
)
117 put_tree(chunk
->owners
[i
].owner
);
122 void audit_put_chunk(struct audit_chunk
*chunk
)
124 if (atomic_long_dec_and_test(&chunk
->refs
))
128 static void __put_chunk(struct rcu_head
*rcu
)
130 struct audit_chunk
*chunk
= container_of(rcu
, struct audit_chunk
, head
);
131 audit_put_chunk(chunk
);
134 static void audit_tree_destroy_watch(struct fsnotify_mark
*entry
)
136 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
137 call_rcu(&chunk
->head
, __put_chunk
);
140 static struct audit_chunk
*alloc_chunk(int count
)
142 struct audit_chunk
*chunk
;
146 size
= offsetof(struct audit_chunk
, owners
) + count
* sizeof(struct node
);
147 chunk
= kzalloc(size
, GFP_KERNEL
);
151 INIT_LIST_HEAD(&chunk
->hash
);
152 INIT_LIST_HEAD(&chunk
->trees
);
153 chunk
->count
= count
;
154 atomic_long_set(&chunk
->refs
, 1);
155 for (i
= 0; i
< count
; i
++) {
156 INIT_LIST_HEAD(&chunk
->owners
[i
].list
);
157 chunk
->owners
[i
].index
= i
;
159 fsnotify_init_mark(&chunk
->mark
, audit_tree_group
);
160 chunk
->mark
.mask
= FS_IN_IGNORED
;
164 enum {HASH_SIZE
= 128};
165 static struct list_head chunk_hash_heads
[HASH_SIZE
];
166 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(hash_lock
);
168 /* Function to return search key in our hash from inode. */
169 static unsigned long inode_to_key(const struct inode
*inode
)
171 /* Use address pointed to by connector->obj as the key */
172 return (unsigned long)&inode
->i_fsnotify_marks
;
176 * Function to return search key in our hash from chunk. Key 0 is special and
177 * should never be present in the hash.
179 static unsigned long chunk_to_key(struct audit_chunk
*chunk
)
182 * We have a reference to the mark so it should be attached to a
185 if (WARN_ON_ONCE(!chunk
->mark
.connector
))
187 return (unsigned long)chunk
->mark
.connector
->obj
;
190 static inline struct list_head
*chunk_hash(unsigned long key
)
192 unsigned long n
= key
/ L1_CACHE_BYTES
;
193 return chunk_hash_heads
+ n
% HASH_SIZE
;
196 /* hash_lock & entry->lock is held by caller */
197 static void insert_hash(struct audit_chunk
*chunk
)
199 unsigned long key
= chunk_to_key(chunk
);
200 struct list_head
*list
;
202 if (!(chunk
->mark
.flags
& FSNOTIFY_MARK_FLAG_ATTACHED
))
204 list
= chunk_hash(key
);
205 list_add_rcu(&chunk
->hash
, list
);
208 /* called under rcu_read_lock */
209 struct audit_chunk
*audit_tree_lookup(const struct inode
*inode
)
211 unsigned long key
= inode_to_key(inode
);
212 struct list_head
*list
= chunk_hash(key
);
213 struct audit_chunk
*p
;
215 list_for_each_entry_rcu(p
, list
, hash
) {
216 if (chunk_to_key(p
) == key
) {
217 atomic_long_inc(&p
->refs
);
224 bool audit_tree_match(struct audit_chunk
*chunk
, struct audit_tree
*tree
)
227 for (n
= 0; n
< chunk
->count
; n
++)
228 if (chunk
->owners
[n
].owner
== tree
)
233 /* tagging and untagging inodes with trees */
235 static struct audit_chunk
*find_chunk(struct node
*p
)
237 int index
= p
->index
& ~(1U<<31);
239 return container_of(p
, struct audit_chunk
, owners
[0]);
242 static void untag_chunk(struct node
*p
)
244 struct audit_chunk
*chunk
= find_chunk(p
);
245 struct fsnotify_mark
*entry
= &chunk
->mark
;
246 struct audit_chunk
*new = NULL
;
247 struct audit_tree
*owner
;
248 int size
= chunk
->count
- 1;
251 fsnotify_get_mark(entry
);
253 spin_unlock(&hash_lock
);
256 new = alloc_chunk(size
);
258 mutex_lock(&entry
->group
->mark_mutex
);
259 spin_lock(&entry
->lock
);
261 * mark_mutex protects mark from getting detached and thus also from
262 * mark->connector->obj getting NULL.
264 if (chunk
->dead
|| !(entry
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
265 spin_unlock(&entry
->lock
);
266 mutex_unlock(&entry
->group
->mark_mutex
);
268 fsnotify_put_mark(&new->mark
);
276 spin_lock(&hash_lock
);
277 list_del_init(&chunk
->trees
);
278 if (owner
->root
== chunk
)
280 list_del_init(&p
->list
);
281 list_del_rcu(&chunk
->hash
);
282 spin_unlock(&hash_lock
);
283 spin_unlock(&entry
->lock
);
284 mutex_unlock(&entry
->group
->mark_mutex
);
285 fsnotify_destroy_mark(entry
, audit_tree_group
);
292 if (fsnotify_add_mark_locked(&new->mark
, entry
->connector
->obj
,
293 FSNOTIFY_OBJ_TYPE_INODE
, 1)) {
294 fsnotify_put_mark(&new->mark
);
299 spin_lock(&hash_lock
);
300 list_replace_init(&chunk
->trees
, &new->trees
);
301 if (owner
->root
== chunk
) {
302 list_del_init(&owner
->same_root
);
306 for (i
= j
= 0; j
<= size
; i
++, j
++) {
307 struct audit_tree
*s
;
308 if (&chunk
->owners
[j
] == p
) {
309 list_del_init(&p
->list
);
313 s
= chunk
->owners
[j
].owner
;
314 new->owners
[i
].owner
= s
;
315 new->owners
[i
].index
= chunk
->owners
[j
].index
- j
+ i
;
316 if (!s
) /* result of earlier fallback */
319 list_replace_init(&chunk
->owners
[j
].list
, &new->owners
[i
].list
);
322 list_replace_rcu(&chunk
->hash
, &new->hash
);
323 list_for_each_entry(owner
, &new->trees
, same_root
)
325 spin_unlock(&hash_lock
);
326 spin_unlock(&entry
->lock
);
327 mutex_unlock(&entry
->group
->mark_mutex
);
328 fsnotify_destroy_mark(entry
, audit_tree_group
);
329 fsnotify_put_mark(&new->mark
); /* drop initial reference */
333 // do the best we can
334 spin_lock(&hash_lock
);
335 if (owner
->root
== chunk
) {
336 list_del_init(&owner
->same_root
);
339 list_del_init(&p
->list
);
342 spin_unlock(&hash_lock
);
343 spin_unlock(&entry
->lock
);
344 mutex_unlock(&entry
->group
->mark_mutex
);
346 fsnotify_put_mark(entry
);
347 spin_lock(&hash_lock
);
350 static int create_chunk(struct inode
*inode
, struct audit_tree
*tree
)
352 struct fsnotify_mark
*entry
;
353 struct audit_chunk
*chunk
= alloc_chunk(1);
357 entry
= &chunk
->mark
;
358 if (fsnotify_add_inode_mark(entry
, inode
, 0)) {
359 fsnotify_put_mark(entry
);
363 spin_lock(&entry
->lock
);
364 spin_lock(&hash_lock
);
366 spin_unlock(&hash_lock
);
368 spin_unlock(&entry
->lock
);
369 fsnotify_destroy_mark(entry
, audit_tree_group
);
370 fsnotify_put_mark(entry
);
373 chunk
->owners
[0].index
= (1U << 31);
374 chunk
->owners
[0].owner
= tree
;
376 list_add(&chunk
->owners
[0].list
, &tree
->chunks
);
379 list_add(&tree
->same_root
, &chunk
->trees
);
382 spin_unlock(&hash_lock
);
383 spin_unlock(&entry
->lock
);
384 fsnotify_put_mark(entry
); /* drop initial reference */
388 /* the first tagged inode becomes root of tree */
389 static int tag_chunk(struct inode
*inode
, struct audit_tree
*tree
)
391 struct fsnotify_mark
*old_entry
, *chunk_entry
;
392 struct audit_tree
*owner
;
393 struct audit_chunk
*chunk
, *old
;
397 old_entry
= fsnotify_find_mark(&inode
->i_fsnotify_marks
,
400 return create_chunk(inode
, tree
);
402 old
= container_of(old_entry
, struct audit_chunk
, mark
);
404 /* are we already there? */
405 spin_lock(&hash_lock
);
406 for (n
= 0; n
< old
->count
; n
++) {
407 if (old
->owners
[n
].owner
== tree
) {
408 spin_unlock(&hash_lock
);
409 fsnotify_put_mark(old_entry
);
413 spin_unlock(&hash_lock
);
415 chunk
= alloc_chunk(old
->count
+ 1);
417 fsnotify_put_mark(old_entry
);
421 chunk_entry
= &chunk
->mark
;
423 mutex_lock(&old_entry
->group
->mark_mutex
);
424 spin_lock(&old_entry
->lock
);
426 * mark_mutex protects mark from getting detached and thus also from
427 * mark->connector->obj getting NULL.
429 if (!(old_entry
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
430 /* old_entry is being shot, lets just lie */
431 spin_unlock(&old_entry
->lock
);
432 mutex_unlock(&old_entry
->group
->mark_mutex
);
433 fsnotify_put_mark(old_entry
);
434 fsnotify_put_mark(&chunk
->mark
);
438 if (fsnotify_add_mark_locked(chunk_entry
, old_entry
->connector
->obj
,
439 FSNOTIFY_OBJ_TYPE_INODE
, 1)) {
440 spin_unlock(&old_entry
->lock
);
441 mutex_unlock(&old_entry
->group
->mark_mutex
);
442 fsnotify_put_mark(chunk_entry
);
443 fsnotify_put_mark(old_entry
);
447 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
448 spin_lock(&chunk_entry
->lock
);
449 spin_lock(&hash_lock
);
451 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
453 spin_unlock(&hash_lock
);
455 spin_unlock(&chunk_entry
->lock
);
456 spin_unlock(&old_entry
->lock
);
457 mutex_unlock(&old_entry
->group
->mark_mutex
);
459 fsnotify_destroy_mark(chunk_entry
, audit_tree_group
);
461 fsnotify_put_mark(chunk_entry
);
462 fsnotify_put_mark(old_entry
);
465 list_replace_init(&old
->trees
, &chunk
->trees
);
466 for (n
= 0, p
= chunk
->owners
; n
< old
->count
; n
++, p
++) {
467 struct audit_tree
*s
= old
->owners
[n
].owner
;
469 p
->index
= old
->owners
[n
].index
;
470 if (!s
) /* result of fallback in untag */
473 list_replace_init(&old
->owners
[n
].list
, &p
->list
);
475 p
->index
= (chunk
->count
- 1) | (1U<<31);
478 list_add(&p
->list
, &tree
->chunks
);
479 list_replace_rcu(&old
->hash
, &chunk
->hash
);
480 list_for_each_entry(owner
, &chunk
->trees
, same_root
)
485 list_add(&tree
->same_root
, &chunk
->trees
);
487 spin_unlock(&hash_lock
);
488 spin_unlock(&chunk_entry
->lock
);
489 spin_unlock(&old_entry
->lock
);
490 mutex_unlock(&old_entry
->group
->mark_mutex
);
491 fsnotify_destroy_mark(old_entry
, audit_tree_group
);
492 fsnotify_put_mark(chunk_entry
); /* drop initial reference */
493 fsnotify_put_mark(old_entry
); /* pair to fsnotify_find mark_entry */
497 static void audit_tree_log_remove_rule(struct audit_krule
*rule
)
499 struct audit_buffer
*ab
;
503 ab
= audit_log_start(NULL
, GFP_KERNEL
, AUDIT_CONFIG_CHANGE
);
506 audit_log_format(ab
, "op=remove_rule");
507 audit_log_format(ab
, " dir=");
508 audit_log_untrustedstring(ab
, rule
->tree
->pathname
);
509 audit_log_key(ab
, rule
->filterkey
);
510 audit_log_format(ab
, " list=%d res=1", rule
->listnr
);
514 static void kill_rules(struct audit_tree
*tree
)
516 struct audit_krule
*rule
, *next
;
517 struct audit_entry
*entry
;
519 list_for_each_entry_safe(rule
, next
, &tree
->rules
, rlist
) {
520 entry
= container_of(rule
, struct audit_entry
, rule
);
522 list_del_init(&rule
->rlist
);
524 /* not a half-baked one */
525 audit_tree_log_remove_rule(rule
);
527 audit_remove_mark(entry
->rule
.exe
);
529 list_del_rcu(&entry
->list
);
530 list_del(&entry
->rule
.list
);
531 call_rcu(&entry
->rcu
, audit_free_rule_rcu
);
537 * finish killing struct audit_tree
539 static void prune_one(struct audit_tree
*victim
)
541 spin_lock(&hash_lock
);
542 while (!list_empty(&victim
->chunks
)) {
545 p
= list_entry(victim
->chunks
.next
, struct node
, list
);
549 spin_unlock(&hash_lock
);
553 /* trim the uncommitted chunks from tree */
555 static void trim_marked(struct audit_tree
*tree
)
557 struct list_head
*p
, *q
;
558 spin_lock(&hash_lock
);
560 spin_unlock(&hash_lock
);
564 for (p
= tree
->chunks
.next
; p
!= &tree
->chunks
; p
= q
) {
565 struct node
*node
= list_entry(p
, struct node
, list
);
567 if (node
->index
& (1U<<31)) {
569 list_add(p
, &tree
->chunks
);
573 while (!list_empty(&tree
->chunks
)) {
576 node
= list_entry(tree
->chunks
.next
, struct node
, list
);
578 /* have we run out of marked? */
579 if (!(node
->index
& (1U<<31)))
584 if (!tree
->root
&& !tree
->goner
) {
586 spin_unlock(&hash_lock
);
587 mutex_lock(&audit_filter_mutex
);
589 list_del_init(&tree
->list
);
590 mutex_unlock(&audit_filter_mutex
);
593 spin_unlock(&hash_lock
);
597 static void audit_schedule_prune(void);
599 /* called with audit_filter_mutex */
600 int audit_remove_tree_rule(struct audit_krule
*rule
)
602 struct audit_tree
*tree
;
605 spin_lock(&hash_lock
);
606 list_del_init(&rule
->rlist
);
607 if (list_empty(&tree
->rules
) && !tree
->goner
) {
609 list_del_init(&tree
->same_root
);
611 list_move(&tree
->list
, &prune_list
);
613 spin_unlock(&hash_lock
);
614 audit_schedule_prune();
618 spin_unlock(&hash_lock
);
624 static int compare_root(struct vfsmount
*mnt
, void *arg
)
626 return inode_to_key(d_backing_inode(mnt
->mnt_root
)) ==
630 void audit_trim_trees(void)
632 struct list_head cursor
;
634 mutex_lock(&audit_filter_mutex
);
635 list_add(&cursor
, &tree_list
);
636 while (cursor
.next
!= &tree_list
) {
637 struct audit_tree
*tree
;
639 struct vfsmount
*root_mnt
;
643 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
646 list_add(&cursor
, &tree
->list
);
647 mutex_unlock(&audit_filter_mutex
);
649 err
= kern_path(tree
->pathname
, 0, &path
);
653 root_mnt
= collect_mounts(&path
);
655 if (IS_ERR(root_mnt
))
658 spin_lock(&hash_lock
);
659 list_for_each_entry(node
, &tree
->chunks
, list
) {
660 struct audit_chunk
*chunk
= find_chunk(node
);
661 /* this could be NULL if the watch is dying else where... */
662 node
->index
|= 1U<<31;
663 if (iterate_mounts(compare_root
,
664 (void *)chunk_to_key(chunk
),
666 node
->index
&= ~(1U<<31);
668 spin_unlock(&hash_lock
);
670 drop_collected_mounts(root_mnt
);
673 mutex_lock(&audit_filter_mutex
);
676 mutex_unlock(&audit_filter_mutex
);
679 int audit_make_tree(struct audit_krule
*rule
, char *pathname
, u32 op
)
682 if (pathname
[0] != '/' ||
683 rule
->listnr
!= AUDIT_FILTER_EXIT
||
685 rule
->inode_f
|| rule
->watch
|| rule
->tree
)
687 rule
->tree
= alloc_tree(pathname
);
693 void audit_put_tree(struct audit_tree
*tree
)
698 static int tag_mount(struct vfsmount
*mnt
, void *arg
)
700 return tag_chunk(d_backing_inode(mnt
->mnt_root
), arg
);
704 * That gets run when evict_chunk() ends up needing to kill audit_tree.
705 * Runs from a separate thread.
707 static int prune_tree_thread(void *unused
)
710 if (list_empty(&prune_list
)) {
711 set_current_state(TASK_INTERRUPTIBLE
);
716 mutex_lock(&audit_filter_mutex
);
718 while (!list_empty(&prune_list
)) {
719 struct audit_tree
*victim
;
721 victim
= list_entry(prune_list
.next
,
722 struct audit_tree
, list
);
723 list_del_init(&victim
->list
);
725 mutex_unlock(&audit_filter_mutex
);
729 mutex_lock(&audit_filter_mutex
);
732 mutex_unlock(&audit_filter_mutex
);
738 static int audit_launch_prune(void)
742 prune_thread
= kthread_run(prune_tree_thread
, NULL
,
744 if (IS_ERR(prune_thread
)) {
745 pr_err("cannot start thread audit_prune_tree");
752 /* called with audit_filter_mutex */
753 int audit_add_tree_rule(struct audit_krule
*rule
)
755 struct audit_tree
*seed
= rule
->tree
, *tree
;
757 struct vfsmount
*mnt
;
761 list_for_each_entry(tree
, &tree_list
, list
) {
762 if (!strcmp(seed
->pathname
, tree
->pathname
)) {
765 list_add(&rule
->rlist
, &tree
->rules
);
770 list_add(&tree
->list
, &tree_list
);
771 list_add(&rule
->rlist
, &tree
->rules
);
772 /* do not set rule->tree yet */
773 mutex_unlock(&audit_filter_mutex
);
775 if (unlikely(!prune_thread
)) {
776 err
= audit_launch_prune();
781 err
= kern_path(tree
->pathname
, 0, &path
);
784 mnt
= collect_mounts(&path
);
792 err
= iterate_mounts(tag_mount
, tree
, mnt
);
793 drop_collected_mounts(mnt
);
797 spin_lock(&hash_lock
);
798 list_for_each_entry(node
, &tree
->chunks
, list
)
799 node
->index
&= ~(1U<<31);
800 spin_unlock(&hash_lock
);
806 mutex_lock(&audit_filter_mutex
);
807 if (list_empty(&rule
->rlist
)) {
816 mutex_lock(&audit_filter_mutex
);
817 list_del_init(&tree
->list
);
818 list_del_init(&tree
->rules
);
823 int audit_tag_tree(char *old
, char *new)
825 struct list_head cursor
, barrier
;
827 struct path path1
, path2
;
828 struct vfsmount
*tagged
;
831 err
= kern_path(new, 0, &path2
);
834 tagged
= collect_mounts(&path2
);
837 return PTR_ERR(tagged
);
839 err
= kern_path(old
, 0, &path1
);
841 drop_collected_mounts(tagged
);
845 mutex_lock(&audit_filter_mutex
);
846 list_add(&barrier
, &tree_list
);
847 list_add(&cursor
, &barrier
);
849 while (cursor
.next
!= &tree_list
) {
850 struct audit_tree
*tree
;
853 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
856 list_add(&cursor
, &tree
->list
);
857 mutex_unlock(&audit_filter_mutex
);
859 err
= kern_path(tree
->pathname
, 0, &path2
);
861 good_one
= path_is_under(&path1
, &path2
);
867 mutex_lock(&audit_filter_mutex
);
871 failed
= iterate_mounts(tag_mount
, tree
, tagged
);
874 mutex_lock(&audit_filter_mutex
);
878 mutex_lock(&audit_filter_mutex
);
879 spin_lock(&hash_lock
);
881 list_del(&tree
->list
);
882 list_add(&tree
->list
, &tree_list
);
884 spin_unlock(&hash_lock
);
888 while (barrier
.prev
!= &tree_list
) {
889 struct audit_tree
*tree
;
891 tree
= container_of(barrier
.prev
, struct audit_tree
, list
);
893 list_del(&tree
->list
);
894 list_add(&tree
->list
, &barrier
);
895 mutex_unlock(&audit_filter_mutex
);
899 spin_lock(&hash_lock
);
900 list_for_each_entry(node
, &tree
->chunks
, list
)
901 node
->index
&= ~(1U<<31);
902 spin_unlock(&hash_lock
);
908 mutex_lock(&audit_filter_mutex
);
912 mutex_unlock(&audit_filter_mutex
);
914 drop_collected_mounts(tagged
);
919 static void audit_schedule_prune(void)
921 wake_up_process(prune_thread
);
925 * ... and that one is done if evict_chunk() decides to delay until the end
926 * of syscall. Runs synchronously.
928 void audit_kill_trees(struct list_head
*list
)
931 mutex_lock(&audit_filter_mutex
);
933 while (!list_empty(list
)) {
934 struct audit_tree
*victim
;
936 victim
= list_entry(list
->next
, struct audit_tree
, list
);
938 list_del_init(&victim
->list
);
940 mutex_unlock(&audit_filter_mutex
);
944 mutex_lock(&audit_filter_mutex
);
947 mutex_unlock(&audit_filter_mutex
);
952 * Here comes the stuff asynchronous to auditctl operations
955 static void evict_chunk(struct audit_chunk
*chunk
)
957 struct audit_tree
*owner
;
958 struct list_head
*postponed
= audit_killed_trees();
966 mutex_lock(&audit_filter_mutex
);
967 spin_lock(&hash_lock
);
968 while (!list_empty(&chunk
->trees
)) {
969 owner
= list_entry(chunk
->trees
.next
,
970 struct audit_tree
, same_root
);
973 list_del_init(&owner
->same_root
);
974 spin_unlock(&hash_lock
);
977 list_move(&owner
->list
, &prune_list
);
980 list_move(&owner
->list
, postponed
);
982 spin_lock(&hash_lock
);
984 list_del_rcu(&chunk
->hash
);
985 for (n
= 0; n
< chunk
->count
; n
++)
986 list_del_init(&chunk
->owners
[n
].list
);
987 spin_unlock(&hash_lock
);
988 mutex_unlock(&audit_filter_mutex
);
990 audit_schedule_prune();
993 static int audit_tree_handle_event(struct fsnotify_group
*group
,
994 struct inode
*to_tell
,
995 u32 mask
, const void *data
, int data_type
,
996 const unsigned char *file_name
, u32 cookie
,
997 struct fsnotify_iter_info
*iter_info
)
1002 static void audit_tree_freeing_mark(struct fsnotify_mark
*entry
, struct fsnotify_group
*group
)
1004 struct audit_chunk
*chunk
= container_of(entry
, struct audit_chunk
, mark
);
1009 * We are guaranteed to have at least one reference to the mark from
1010 * either the inode or the caller of fsnotify_destroy_mark().
1012 BUG_ON(refcount_read(&entry
->refcnt
) < 1);
1015 static const struct fsnotify_ops audit_tree_ops
= {
1016 .handle_event
= audit_tree_handle_event
,
1017 .freeing_mark
= audit_tree_freeing_mark
,
1018 .free_mark
= audit_tree_destroy_watch
,
1021 static int __init
audit_tree_init(void)
1025 audit_tree_group
= fsnotify_alloc_group(&audit_tree_ops
);
1026 if (IS_ERR(audit_tree_group
))
1027 audit_panic("cannot initialize fsnotify group for rectree watches");
1029 for (i
= 0; i
< HASH_SIZE
; i
++)
1030 INIT_LIST_HEAD(&chunk_hash_heads
[i
]);
1034 __initcall(audit_tree_init
);