2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
12 struct audit_chunk
*root
;
13 struct list_head chunks
;
14 struct list_head rules
;
15 struct list_head list
;
16 struct list_head same_root
;
22 struct list_head hash
;
23 struct inotify_watch watch
;
24 struct list_head trees
; /* with root here */
30 struct list_head list
;
31 struct audit_tree
*owner
;
32 unsigned index
; /* index; upper bit indicates 'will prune' */
36 static LIST_HEAD(tree_list
);
37 static LIST_HEAD(prune_list
);
40 * One struct chunk is attached to each inode of interest.
41 * We replace struct chunk on tagging/untagging.
42 * Rules have pointer to struct audit_tree.
43 * Rules have struct list_head rlist forming a list of rules over
45 * References to struct chunk are collected at audit_inode{,_child}()
46 * time and used in AUDIT_TREE rule matching.
47 * These references are dropped at the same time we are calling
48 * audit_free_names(), etc.
50 * Cyclic lists galore:
51 * tree.chunks anchors chunk.owners[].list hash_lock
52 * tree.rules anchors rule.rlist audit_filter_mutex
53 * chunk.trees anchors tree.same_root hash_lock
54 * chunk.hash is a hash with middle bits of watch.inode as
55 * a hash function. RCU, hash_lock
57 * tree is refcounted; one reference for "some rules on rules_list refer to
58 * it", one for each chunk with pointer to it.
60 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
61 * of watch contributes 1 to .refs).
63 * node.index allows to get from node.list to containing chunk.
64 * MSB of that sucker is stolen to mark taggings that we might have to
65 * revert - several operations have very unpleasant cleanup logics and
66 * that makes a difference. Some.
69 static struct inotify_handle
*rtree_ih
;
71 static struct audit_tree
*alloc_tree(const char *s
)
73 struct audit_tree
*tree
;
75 tree
= kmalloc(sizeof(struct audit_tree
) + strlen(s
) + 1, GFP_KERNEL
);
77 atomic_set(&tree
->count
, 1);
79 INIT_LIST_HEAD(&tree
->chunks
);
80 INIT_LIST_HEAD(&tree
->rules
);
81 INIT_LIST_HEAD(&tree
->list
);
82 INIT_LIST_HEAD(&tree
->same_root
);
84 strcpy(tree
->pathname
, s
);
89 static inline void get_tree(struct audit_tree
*tree
)
91 atomic_inc(&tree
->count
);
94 static void __put_tree(struct rcu_head
*rcu
)
96 struct audit_tree
*tree
= container_of(rcu
, struct audit_tree
, head
);
100 static inline void put_tree(struct audit_tree
*tree
)
102 if (atomic_dec_and_test(&tree
->count
))
103 call_rcu(&tree
->head
, __put_tree
);
106 /* to avoid bringing the entire thing in audit.h */
107 const char *audit_tree_path(struct audit_tree
*tree
)
109 return tree
->pathname
;
112 static struct audit_chunk
*alloc_chunk(int count
)
114 struct audit_chunk
*chunk
;
118 size
= offsetof(struct audit_chunk
, owners
) + count
* sizeof(struct node
);
119 chunk
= kzalloc(size
, GFP_KERNEL
);
123 INIT_LIST_HEAD(&chunk
->hash
);
124 INIT_LIST_HEAD(&chunk
->trees
);
125 chunk
->count
= count
;
126 atomic_long_set(&chunk
->refs
, 1);
127 for (i
= 0; i
< count
; i
++) {
128 INIT_LIST_HEAD(&chunk
->owners
[i
].list
);
129 chunk
->owners
[i
].index
= i
;
131 inotify_init_watch(&chunk
->watch
);
135 static void free_chunk(struct audit_chunk
*chunk
)
139 for (i
= 0; i
< chunk
->count
; i
++) {
140 if (chunk
->owners
[i
].owner
)
141 put_tree(chunk
->owners
[i
].owner
);
146 void audit_put_chunk(struct audit_chunk
*chunk
)
148 if (atomic_long_dec_and_test(&chunk
->refs
))
152 static void __put_chunk(struct rcu_head
*rcu
)
154 struct audit_chunk
*chunk
= container_of(rcu
, struct audit_chunk
, head
);
155 audit_put_chunk(chunk
);
158 enum {HASH_SIZE
= 128};
159 static struct list_head chunk_hash_heads
[HASH_SIZE
];
160 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(hash_lock
);
162 static inline struct list_head
*chunk_hash(const struct inode
*inode
)
164 unsigned long n
= (unsigned long)inode
/ L1_CACHE_BYTES
;
165 return chunk_hash_heads
+ n
% HASH_SIZE
;
168 /* hash_lock is held by caller */
169 static void insert_hash(struct audit_chunk
*chunk
)
171 struct list_head
*list
= chunk_hash(chunk
->watch
.inode
);
172 list_add_rcu(&chunk
->hash
, list
);
175 /* called under rcu_read_lock */
176 struct audit_chunk
*audit_tree_lookup(const struct inode
*inode
)
178 struct list_head
*list
= chunk_hash(inode
);
179 struct audit_chunk
*p
;
181 list_for_each_entry_rcu(p
, list
, hash
) {
182 if (p
->watch
.inode
== inode
) {
183 atomic_long_inc(&p
->refs
);
190 int audit_tree_match(struct audit_chunk
*chunk
, struct audit_tree
*tree
)
193 for (n
= 0; n
< chunk
->count
; n
++)
194 if (chunk
->owners
[n
].owner
== tree
)
199 /* tagging and untagging inodes with trees */
201 static struct audit_chunk
*find_chunk(struct node
*p
)
203 int index
= p
->index
& ~(1U<<31);
205 return container_of(p
, struct audit_chunk
, owners
[0]);
208 static void untag_chunk(struct node
*p
)
210 struct audit_chunk
*chunk
= find_chunk(p
);
211 struct audit_chunk
*new;
212 struct audit_tree
*owner
;
213 int size
= chunk
->count
- 1;
216 if (!pin_inotify_watch(&chunk
->watch
)) {
218 * Filesystem is shutting down; all watches are getting
219 * evicted, just take it off the node list for this
220 * tree and let the eviction logics take care of the
224 if (owner
->root
== chunk
) {
225 list_del_init(&owner
->same_root
);
228 list_del_init(&p
->list
);
234 spin_unlock(&hash_lock
);
237 * pin_inotify_watch() succeeded, so the watch won't go away
240 mutex_lock(&chunk
->watch
.inode
->inotify_mutex
);
242 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
250 spin_lock(&hash_lock
);
251 list_del_init(&chunk
->trees
);
252 if (owner
->root
== chunk
)
254 list_del_init(&p
->list
);
255 list_del_rcu(&chunk
->hash
);
256 spin_unlock(&hash_lock
);
257 inotify_evict_watch(&chunk
->watch
);
258 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
259 put_inotify_watch(&chunk
->watch
);
263 new = alloc_chunk(size
);
266 if (inotify_clone_watch(&chunk
->watch
, &new->watch
) < 0) {
272 spin_lock(&hash_lock
);
273 list_replace_init(&chunk
->trees
, &new->trees
);
274 if (owner
->root
== chunk
) {
275 list_del_init(&owner
->same_root
);
279 for (i
= j
= 0; i
< size
; i
++, j
++) {
280 struct audit_tree
*s
;
281 if (&chunk
->owners
[j
] == p
) {
282 list_del_init(&p
->list
);
286 s
= chunk
->owners
[j
].owner
;
287 new->owners
[i
].owner
= s
;
288 new->owners
[i
].index
= chunk
->owners
[j
].index
- j
+ i
;
289 if (!s
) /* result of earlier fallback */
292 list_replace_init(&chunk
->owners
[i
].list
, &new->owners
[j
].list
);
295 list_replace_rcu(&chunk
->hash
, &new->hash
);
296 list_for_each_entry(owner
, &new->trees
, same_root
)
298 spin_unlock(&hash_lock
);
299 inotify_evict_watch(&chunk
->watch
);
300 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
301 put_inotify_watch(&chunk
->watch
);
305 // do the best we can
306 spin_lock(&hash_lock
);
307 if (owner
->root
== chunk
) {
308 list_del_init(&owner
->same_root
);
311 list_del_init(&p
->list
);
314 spin_unlock(&hash_lock
);
315 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
317 unpin_inotify_watch(&chunk
->watch
);
318 spin_lock(&hash_lock
);
321 static int create_chunk(struct inode
*inode
, struct audit_tree
*tree
)
323 struct audit_chunk
*chunk
= alloc_chunk(1);
327 if (inotify_add_watch(rtree_ih
, &chunk
->watch
, inode
, IN_IGNORED
| IN_DELETE_SELF
) < 0) {
332 mutex_lock(&inode
->inotify_mutex
);
333 spin_lock(&hash_lock
);
335 spin_unlock(&hash_lock
);
337 inotify_evict_watch(&chunk
->watch
);
338 mutex_unlock(&inode
->inotify_mutex
);
339 put_inotify_watch(&chunk
->watch
);
342 chunk
->owners
[0].index
= (1U << 31);
343 chunk
->owners
[0].owner
= tree
;
345 list_add(&chunk
->owners
[0].list
, &tree
->chunks
);
348 list_add(&tree
->same_root
, &chunk
->trees
);
351 spin_unlock(&hash_lock
);
352 mutex_unlock(&inode
->inotify_mutex
);
356 /* the first tagged inode becomes root of tree */
357 static int tag_chunk(struct inode
*inode
, struct audit_tree
*tree
)
359 struct inotify_watch
*watch
;
360 struct audit_tree
*owner
;
361 struct audit_chunk
*chunk
, *old
;
365 if (inotify_find_watch(rtree_ih
, inode
, &watch
) < 0)
366 return create_chunk(inode
, tree
);
368 old
= container_of(watch
, struct audit_chunk
, watch
);
370 /* are we already there? */
371 spin_lock(&hash_lock
);
372 for (n
= 0; n
< old
->count
; n
++) {
373 if (old
->owners
[n
].owner
== tree
) {
374 spin_unlock(&hash_lock
);
375 put_inotify_watch(watch
);
379 spin_unlock(&hash_lock
);
381 chunk
= alloc_chunk(old
->count
+ 1);
385 mutex_lock(&inode
->inotify_mutex
);
386 if (inotify_clone_watch(&old
->watch
, &chunk
->watch
) < 0) {
387 mutex_unlock(&inode
->inotify_mutex
);
388 put_inotify_watch(&old
->watch
);
392 spin_lock(&hash_lock
);
394 spin_unlock(&hash_lock
);
396 inotify_evict_watch(&chunk
->watch
);
397 mutex_unlock(&inode
->inotify_mutex
);
398 put_inotify_watch(&old
->watch
);
399 put_inotify_watch(&chunk
->watch
);
402 list_replace_init(&old
->trees
, &chunk
->trees
);
403 for (n
= 0, p
= chunk
->owners
; n
< old
->count
; n
++, p
++) {
404 struct audit_tree
*s
= old
->owners
[n
].owner
;
406 p
->index
= old
->owners
[n
].index
;
407 if (!s
) /* result of fallback in untag */
410 list_replace_init(&old
->owners
[n
].list
, &p
->list
);
412 p
->index
= (chunk
->count
- 1) | (1U<<31);
415 list_add(&p
->list
, &tree
->chunks
);
416 list_replace_rcu(&old
->hash
, &chunk
->hash
);
417 list_for_each_entry(owner
, &chunk
->trees
, same_root
)
422 list_add(&tree
->same_root
, &chunk
->trees
);
424 spin_unlock(&hash_lock
);
425 inotify_evict_watch(&old
->watch
);
426 mutex_unlock(&inode
->inotify_mutex
);
427 put_inotify_watch(&old
->watch
);
431 static void kill_rules(struct audit_tree
*tree
)
433 struct audit_krule
*rule
, *next
;
434 struct audit_entry
*entry
;
435 struct audit_buffer
*ab
;
437 list_for_each_entry_safe(rule
, next
, &tree
->rules
, rlist
) {
438 entry
= container_of(rule
, struct audit_entry
, rule
);
440 list_del_init(&rule
->rlist
);
442 /* not a half-baked one */
443 ab
= audit_log_start(NULL
, GFP_KERNEL
, AUDIT_CONFIG_CHANGE
);
444 audit_log_format(ab
, "op=");
445 audit_log_string(ab
, "remove rule");
446 audit_log_format(ab
, " dir=");
447 audit_log_untrustedstring(ab
, rule
->tree
->pathname
);
448 audit_log_key(ab
, rule
->filterkey
);
449 audit_log_format(ab
, " list=%d res=1", rule
->listnr
);
452 list_del_rcu(&entry
->list
);
453 list_del(&entry
->rule
.list
);
454 call_rcu(&entry
->rcu
, audit_free_rule_rcu
);
460 * finish killing struct audit_tree
462 static void prune_one(struct audit_tree
*victim
)
464 spin_lock(&hash_lock
);
465 while (!list_empty(&victim
->chunks
)) {
468 p
= list_entry(victim
->chunks
.next
, struct node
, list
);
472 spin_unlock(&hash_lock
);
476 /* trim the uncommitted chunks from tree */
478 static void trim_marked(struct audit_tree
*tree
)
480 struct list_head
*p
, *q
;
481 spin_lock(&hash_lock
);
483 spin_unlock(&hash_lock
);
487 for (p
= tree
->chunks
.next
; p
!= &tree
->chunks
; p
= q
) {
488 struct node
*node
= list_entry(p
, struct node
, list
);
490 if (node
->index
& (1U<<31)) {
492 list_add(p
, &tree
->chunks
);
496 while (!list_empty(&tree
->chunks
)) {
499 node
= list_entry(tree
->chunks
.next
, struct node
, list
);
501 /* have we run out of marked? */
502 if (!(node
->index
& (1U<<31)))
507 if (!tree
->root
&& !tree
->goner
) {
509 spin_unlock(&hash_lock
);
510 mutex_lock(&audit_filter_mutex
);
512 list_del_init(&tree
->list
);
513 mutex_unlock(&audit_filter_mutex
);
516 spin_unlock(&hash_lock
);
520 /* called with audit_filter_mutex */
521 int audit_remove_tree_rule(struct audit_krule
*rule
)
523 struct audit_tree
*tree
;
526 spin_lock(&hash_lock
);
527 list_del_init(&rule
->rlist
);
528 if (list_empty(&tree
->rules
) && !tree
->goner
) {
530 list_del_init(&tree
->same_root
);
532 list_move(&tree
->list
, &prune_list
);
534 spin_unlock(&hash_lock
);
535 audit_schedule_prune();
539 spin_unlock(&hash_lock
);
545 void audit_trim_trees(void)
547 struct list_head cursor
;
549 mutex_lock(&audit_filter_mutex
);
550 list_add(&cursor
, &tree_list
);
551 while (cursor
.next
!= &tree_list
) {
552 struct audit_tree
*tree
;
554 struct vfsmount
*root_mnt
;
556 struct list_head list
;
559 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
562 list_add(&cursor
, &tree
->list
);
563 mutex_unlock(&audit_filter_mutex
);
565 err
= kern_path(tree
->pathname
, 0, &path
);
569 root_mnt
= collect_mounts(&path
);
574 list_add_tail(&list
, &root_mnt
->mnt_list
);
575 spin_lock(&hash_lock
);
576 list_for_each_entry(node
, &tree
->chunks
, list
) {
577 struct audit_chunk
*chunk
= find_chunk(node
);
578 struct inode
*inode
= chunk
->watch
.inode
;
579 struct vfsmount
*mnt
;
580 node
->index
|= 1U<<31;
581 list_for_each_entry(mnt
, &list
, mnt_list
) {
582 if (mnt
->mnt_root
->d_inode
== inode
) {
583 node
->index
&= ~(1U<<31);
588 spin_unlock(&hash_lock
);
591 list_del_init(&list
);
592 drop_collected_mounts(root_mnt
);
594 mutex_lock(&audit_filter_mutex
);
597 mutex_unlock(&audit_filter_mutex
);
600 static int is_under(struct vfsmount
*mnt
, struct dentry
*dentry
,
603 if (mnt
!= path
->mnt
) {
605 if (mnt
->mnt_parent
== mnt
)
607 if (mnt
->mnt_parent
== path
->mnt
)
609 mnt
= mnt
->mnt_parent
;
611 dentry
= mnt
->mnt_mountpoint
;
613 return is_subdir(dentry
, path
->dentry
);
616 int audit_make_tree(struct audit_krule
*rule
, char *pathname
, u32 op
)
619 if (pathname
[0] != '/' ||
620 rule
->listnr
!= AUDIT_FILTER_EXIT
||
622 rule
->inode_f
|| rule
->watch
|| rule
->tree
)
624 rule
->tree
= alloc_tree(pathname
);
630 void audit_put_tree(struct audit_tree
*tree
)
635 /* called with audit_filter_mutex */
636 int audit_add_tree_rule(struct audit_krule
*rule
)
638 struct audit_tree
*seed
= rule
->tree
, *tree
;
640 struct vfsmount
*mnt
, *p
;
641 struct list_head list
;
644 list_for_each_entry(tree
, &tree_list
, list
) {
645 if (!strcmp(seed
->pathname
, tree
->pathname
)) {
648 list_add(&rule
->rlist
, &tree
->rules
);
653 list_add(&tree
->list
, &tree_list
);
654 list_add(&rule
->rlist
, &tree
->rules
);
655 /* do not set rule->tree yet */
656 mutex_unlock(&audit_filter_mutex
);
658 err
= kern_path(tree
->pathname
, 0, &path
);
661 mnt
= collect_mounts(&path
);
667 list_add_tail(&list
, &mnt
->mnt_list
);
670 list_for_each_entry(p
, &list
, mnt_list
) {
671 err
= tag_chunk(p
->mnt_root
->d_inode
, tree
);
677 drop_collected_mounts(mnt
);
681 spin_lock(&hash_lock
);
682 list_for_each_entry(node
, &tree
->chunks
, list
)
683 node
->index
&= ~(1U<<31);
684 spin_unlock(&hash_lock
);
690 mutex_lock(&audit_filter_mutex
);
691 if (list_empty(&rule
->rlist
)) {
700 mutex_lock(&audit_filter_mutex
);
701 list_del_init(&tree
->list
);
702 list_del_init(&tree
->rules
);
707 int audit_tag_tree(char *old
, char *new)
709 struct list_head cursor
, barrier
;
712 struct vfsmount
*tagged
;
713 struct list_head list
;
714 struct vfsmount
*mnt
;
715 struct dentry
*dentry
;
718 err
= kern_path(new, 0, &path
);
721 tagged
= collect_mounts(&path
);
726 err
= kern_path(old
, 0, &path
);
728 drop_collected_mounts(tagged
);
731 mnt
= mntget(path
.mnt
);
732 dentry
= dget(path
.dentry
);
735 list_add_tail(&list
, &tagged
->mnt_list
);
737 mutex_lock(&audit_filter_mutex
);
738 list_add(&barrier
, &tree_list
);
739 list_add(&cursor
, &barrier
);
741 while (cursor
.next
!= &tree_list
) {
742 struct audit_tree
*tree
;
745 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
748 list_add(&cursor
, &tree
->list
);
749 mutex_unlock(&audit_filter_mutex
);
751 err
= kern_path(tree
->pathname
, 0, &path
);
754 mutex_lock(&audit_filter_mutex
);
758 spin_lock(&vfsmount_lock
);
759 if (!is_under(mnt
, dentry
, &path
)) {
760 spin_unlock(&vfsmount_lock
);
763 mutex_lock(&audit_filter_mutex
);
766 spin_unlock(&vfsmount_lock
);
769 list_for_each_entry(p
, &list
, mnt_list
) {
770 failed
= tag_chunk(p
->mnt_root
->d_inode
, tree
);
777 mutex_lock(&audit_filter_mutex
);
781 mutex_lock(&audit_filter_mutex
);
782 spin_lock(&hash_lock
);
784 list_del(&tree
->list
);
785 list_add(&tree
->list
, &tree_list
);
787 spin_unlock(&hash_lock
);
791 while (barrier
.prev
!= &tree_list
) {
792 struct audit_tree
*tree
;
794 tree
= container_of(barrier
.prev
, struct audit_tree
, list
);
796 list_del(&tree
->list
);
797 list_add(&tree
->list
, &barrier
);
798 mutex_unlock(&audit_filter_mutex
);
802 spin_lock(&hash_lock
);
803 list_for_each_entry(node
, &tree
->chunks
, list
)
804 node
->index
&= ~(1U<<31);
805 spin_unlock(&hash_lock
);
811 mutex_lock(&audit_filter_mutex
);
816 mutex_unlock(&audit_filter_mutex
);
819 drop_collected_mounts(tagged
);
824 * That gets run when evict_chunk() ends up needing to kill audit_tree.
825 * Runs from a separate thread, with audit_cmd_mutex held.
827 void audit_prune_trees(void)
829 mutex_lock(&audit_filter_mutex
);
831 while (!list_empty(&prune_list
)) {
832 struct audit_tree
*victim
;
834 victim
= list_entry(prune_list
.next
, struct audit_tree
, list
);
835 list_del_init(&victim
->list
);
837 mutex_unlock(&audit_filter_mutex
);
841 mutex_lock(&audit_filter_mutex
);
844 mutex_unlock(&audit_filter_mutex
);
848 * Here comes the stuff asynchronous to auditctl operations
851 /* inode->inotify_mutex is locked */
852 static void evict_chunk(struct audit_chunk
*chunk
)
854 struct audit_tree
*owner
;
861 mutex_lock(&audit_filter_mutex
);
862 spin_lock(&hash_lock
);
863 while (!list_empty(&chunk
->trees
)) {
864 owner
= list_entry(chunk
->trees
.next
,
865 struct audit_tree
, same_root
);
868 list_del_init(&owner
->same_root
);
869 spin_unlock(&hash_lock
);
871 list_move(&owner
->list
, &prune_list
);
872 audit_schedule_prune();
873 spin_lock(&hash_lock
);
875 list_del_rcu(&chunk
->hash
);
876 for (n
= 0; n
< chunk
->count
; n
++)
877 list_del_init(&chunk
->owners
[n
].list
);
878 spin_unlock(&hash_lock
);
879 mutex_unlock(&audit_filter_mutex
);
882 static void handle_event(struct inotify_watch
*watch
, u32 wd
, u32 mask
,
883 u32 cookie
, const char *dname
, struct inode
*inode
)
885 struct audit_chunk
*chunk
= container_of(watch
, struct audit_chunk
, watch
);
887 if (mask
& IN_IGNORED
) {
889 put_inotify_watch(watch
);
893 static void destroy_watch(struct inotify_watch
*watch
)
895 struct audit_chunk
*chunk
= container_of(watch
, struct audit_chunk
, watch
);
896 call_rcu(&chunk
->head
, __put_chunk
);
899 static const struct inotify_operations rtree_inotify_ops
= {
900 .handle_event
= handle_event
,
901 .destroy_watch
= destroy_watch
,
904 static int __init
audit_tree_init(void)
908 rtree_ih
= inotify_init(&rtree_inotify_ops
);
909 if (IS_ERR(rtree_ih
))
910 audit_panic("cannot initialize inotify handle for rectree watches");
912 for (i
= 0; i
< HASH_SIZE
; i
++)
913 INIT_LIST_HEAD(&chunk_hash_heads
[i
]);
917 __initcall(audit_tree_init
);