2 #include <linux/inotify.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
12 struct audit_chunk
*root
;
13 struct list_head chunks
;
14 struct list_head rules
;
15 struct list_head list
;
16 struct list_head same_root
;
22 struct list_head hash
;
23 struct inotify_watch watch
;
24 struct list_head trees
; /* with root here */
29 struct list_head list
;
30 struct audit_tree
*owner
;
31 unsigned index
; /* index; upper bit indicates 'will prune' */
35 static LIST_HEAD(tree_list
);
36 static LIST_HEAD(prune_list
);
39 * One struct chunk is attached to each inode of interest.
40 * We replace struct chunk on tagging/untagging.
41 * Rules have pointer to struct audit_tree.
42 * Rules have struct list_head rlist forming a list of rules over
44 * References to struct chunk are collected at audit_inode{,_child}()
45 * time and used in AUDIT_TREE rule matching.
46 * These references are dropped at the same time we are calling
47 * audit_free_names(), etc.
49 * Cyclic lists galore:
50 * tree.chunks anchors chunk.owners[].list hash_lock
51 * tree.rules anchors rule.rlist audit_filter_mutex
52 * chunk.trees anchors tree.same_root hash_lock
53 * chunk.hash is a hash with middle bits of watch.inode as
54 * a hash function. RCU, hash_lock
56 * tree is refcounted; one reference for "some rules on rules_list refer to
57 * it", one for each chunk with pointer to it.
59 * chunk is refcounted by embedded inotify_watch.
61 * node.index allows to get from node.list to containing chunk.
62 * MSB of that sucker is stolen to mark taggings that we might have to
63 * revert - several operations have very unpleasant cleanup logics and
64 * that makes a difference. Some.
67 static struct inotify_handle
*rtree_ih
;
69 static struct audit_tree
*alloc_tree(const char *s
)
71 struct audit_tree
*tree
;
73 tree
= kmalloc(sizeof(struct audit_tree
) + strlen(s
) + 1, GFP_KERNEL
);
75 atomic_set(&tree
->count
, 1);
77 INIT_LIST_HEAD(&tree
->chunks
);
78 INIT_LIST_HEAD(&tree
->rules
);
79 INIT_LIST_HEAD(&tree
->list
);
80 INIT_LIST_HEAD(&tree
->same_root
);
82 strcpy(tree
->pathname
, s
);
87 static inline void get_tree(struct audit_tree
*tree
)
89 atomic_inc(&tree
->count
);
92 static void __put_tree(struct rcu_head
*rcu
)
94 struct audit_tree
*tree
= container_of(rcu
, struct audit_tree
, head
);
98 static inline void put_tree(struct audit_tree
*tree
)
100 if (atomic_dec_and_test(&tree
->count
))
101 call_rcu(&tree
->head
, __put_tree
);
104 /* to avoid bringing the entire thing in audit.h */
105 const char *audit_tree_path(struct audit_tree
*tree
)
107 return tree
->pathname
;
110 static struct audit_chunk
*alloc_chunk(int count
)
112 struct audit_chunk
*chunk
;
116 size
= offsetof(struct audit_chunk
, owners
) + count
* sizeof(struct node
);
117 chunk
= kzalloc(size
, GFP_KERNEL
);
121 INIT_LIST_HEAD(&chunk
->hash
);
122 INIT_LIST_HEAD(&chunk
->trees
);
123 chunk
->count
= count
;
124 for (i
= 0; i
< count
; i
++) {
125 INIT_LIST_HEAD(&chunk
->owners
[i
].list
);
126 chunk
->owners
[i
].index
= i
;
128 inotify_init_watch(&chunk
->watch
);
132 static void __free_chunk(struct rcu_head
*rcu
)
134 struct audit_chunk
*chunk
= container_of(rcu
, struct audit_chunk
, head
);
137 for (i
= 0; i
< chunk
->count
; i
++) {
138 if (chunk
->owners
[i
].owner
)
139 put_tree(chunk
->owners
[i
].owner
);
144 static inline void free_chunk(struct audit_chunk
*chunk
)
146 call_rcu(&chunk
->head
, __free_chunk
);
149 void audit_put_chunk(struct audit_chunk
*chunk
)
151 put_inotify_watch(&chunk
->watch
);
154 enum {HASH_SIZE
= 128};
155 static struct list_head chunk_hash_heads
[HASH_SIZE
];
156 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(hash_lock
);
158 static inline struct list_head
*chunk_hash(const struct inode
*inode
)
160 unsigned long n
= (unsigned long)inode
/ L1_CACHE_BYTES
;
161 return chunk_hash_heads
+ n
% HASH_SIZE
;
164 /* hash_lock is held by caller */
165 static void insert_hash(struct audit_chunk
*chunk
)
167 struct list_head
*list
= chunk_hash(chunk
->watch
.inode
);
168 list_add_rcu(&chunk
->hash
, list
);
171 /* called under rcu_read_lock */
172 struct audit_chunk
*audit_tree_lookup(const struct inode
*inode
)
174 struct list_head
*list
= chunk_hash(inode
);
175 struct list_head
*pos
;
177 list_for_each_rcu(pos
, list
) {
178 struct audit_chunk
*p
= container_of(pos
, struct audit_chunk
, hash
);
179 if (p
->watch
.inode
== inode
) {
180 get_inotify_watch(&p
->watch
);
187 int audit_tree_match(struct audit_chunk
*chunk
, struct audit_tree
*tree
)
190 for (n
= 0; n
< chunk
->count
; n
++)
191 if (chunk
->owners
[n
].owner
== tree
)
196 /* tagging and untagging inodes with trees */
198 static void untag_chunk(struct audit_chunk
*chunk
, struct node
*p
)
200 struct audit_chunk
*new;
201 struct audit_tree
*owner
;
202 int size
= chunk
->count
- 1;
205 mutex_lock(&chunk
->watch
.inode
->inotify_mutex
);
207 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
215 spin_lock(&hash_lock
);
216 list_del_init(&chunk
->trees
);
217 if (owner
->root
== chunk
)
219 list_del_init(&p
->list
);
220 list_del_rcu(&chunk
->hash
);
221 spin_unlock(&hash_lock
);
222 inotify_evict_watch(&chunk
->watch
);
223 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
224 put_inotify_watch(&chunk
->watch
);
228 new = alloc_chunk(size
);
231 if (inotify_clone_watch(&chunk
->watch
, &new->watch
) < 0) {
237 spin_lock(&hash_lock
);
238 list_replace_init(&chunk
->trees
, &new->trees
);
239 if (owner
->root
== chunk
) {
240 list_del_init(&owner
->same_root
);
244 for (i
= j
= 0; i
< size
; i
++, j
++) {
245 struct audit_tree
*s
;
246 if (&chunk
->owners
[j
] == p
) {
247 list_del_init(&p
->list
);
251 s
= chunk
->owners
[j
].owner
;
252 new->owners
[i
].owner
= s
;
253 new->owners
[i
].index
= chunk
->owners
[j
].index
- j
+ i
;
254 if (!s
) /* result of earlier fallback */
257 list_replace_init(&chunk
->owners
[i
].list
, &new->owners
[j
].list
);
260 list_replace_rcu(&chunk
->hash
, &new->hash
);
261 list_for_each_entry(owner
, &new->trees
, same_root
)
263 spin_unlock(&hash_lock
);
264 inotify_evict_watch(&chunk
->watch
);
265 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
266 put_inotify_watch(&chunk
->watch
);
270 // do the best we can
271 spin_lock(&hash_lock
);
272 if (owner
->root
== chunk
) {
273 list_del_init(&owner
->same_root
);
276 list_del_init(&p
->list
);
279 spin_unlock(&hash_lock
);
280 mutex_unlock(&chunk
->watch
.inode
->inotify_mutex
);
283 static int create_chunk(struct inode
*inode
, struct audit_tree
*tree
)
285 struct audit_chunk
*chunk
= alloc_chunk(1);
289 if (inotify_add_watch(rtree_ih
, &chunk
->watch
, inode
, IN_IGNORED
| IN_DELETE_SELF
) < 0) {
294 mutex_lock(&inode
->inotify_mutex
);
295 spin_lock(&hash_lock
);
297 spin_unlock(&hash_lock
);
299 inotify_evict_watch(&chunk
->watch
);
300 mutex_unlock(&inode
->inotify_mutex
);
301 put_inotify_watch(&chunk
->watch
);
304 chunk
->owners
[0].index
= (1U << 31);
305 chunk
->owners
[0].owner
= tree
;
307 list_add(&chunk
->owners
[0].list
, &tree
->chunks
);
310 list_add(&tree
->same_root
, &chunk
->trees
);
313 spin_unlock(&hash_lock
);
314 mutex_unlock(&inode
->inotify_mutex
);
318 /* the first tagged inode becomes root of tree */
319 static int tag_chunk(struct inode
*inode
, struct audit_tree
*tree
)
321 struct inotify_watch
*watch
;
322 struct audit_tree
*owner
;
323 struct audit_chunk
*chunk
, *old
;
327 if (inotify_find_watch(rtree_ih
, inode
, &watch
) < 0)
328 return create_chunk(inode
, tree
);
330 old
= container_of(watch
, struct audit_chunk
, watch
);
332 /* are we already there? */
333 spin_lock(&hash_lock
);
334 for (n
= 0; n
< old
->count
; n
++) {
335 if (old
->owners
[n
].owner
== tree
) {
336 spin_unlock(&hash_lock
);
337 put_inotify_watch(watch
);
341 spin_unlock(&hash_lock
);
343 chunk
= alloc_chunk(old
->count
+ 1);
347 mutex_lock(&inode
->inotify_mutex
);
348 if (inotify_clone_watch(&old
->watch
, &chunk
->watch
) < 0) {
349 mutex_unlock(&inode
->inotify_mutex
);
353 spin_lock(&hash_lock
);
355 spin_unlock(&hash_lock
);
357 inotify_evict_watch(&chunk
->watch
);
358 mutex_unlock(&inode
->inotify_mutex
);
359 put_inotify_watch(&chunk
->watch
);
362 list_replace_init(&old
->trees
, &chunk
->trees
);
363 for (n
= 0, p
= chunk
->owners
; n
< old
->count
; n
++, p
++) {
364 struct audit_tree
*s
= old
->owners
[n
].owner
;
366 p
->index
= old
->owners
[n
].index
;
367 if (!s
) /* result of fallback in untag */
370 list_replace_init(&old
->owners
[n
].list
, &p
->list
);
372 p
->index
= (chunk
->count
- 1) | (1U<<31);
375 list_add(&p
->list
, &tree
->chunks
);
376 list_replace_rcu(&old
->hash
, &chunk
->hash
);
377 list_for_each_entry(owner
, &chunk
->trees
, same_root
)
382 list_add(&tree
->same_root
, &chunk
->trees
);
384 spin_unlock(&hash_lock
);
385 inotify_evict_watch(&old
->watch
);
386 mutex_unlock(&inode
->inotify_mutex
);
387 put_inotify_watch(&old
->watch
);
391 static struct audit_chunk
*find_chunk(struct node
*p
)
393 int index
= p
->index
& ~(1U<<31);
395 return container_of(p
, struct audit_chunk
, owners
[0]);
398 static void kill_rules(struct audit_tree
*tree
)
400 struct audit_krule
*rule
, *next
;
401 struct audit_entry
*entry
;
402 struct audit_buffer
*ab
;
404 list_for_each_entry_safe(rule
, next
, &tree
->rules
, rlist
) {
405 entry
= container_of(rule
, struct audit_entry
, rule
);
407 list_del_init(&rule
->rlist
);
409 /* not a half-baked one */
410 ab
= audit_log_start(NULL
, GFP_KERNEL
, AUDIT_CONFIG_CHANGE
);
411 audit_log_format(ab
, "op=remove rule dir=");
412 audit_log_untrustedstring(ab
, rule
->tree
->pathname
);
413 if (rule
->filterkey
) {
414 audit_log_format(ab
, " key=");
415 audit_log_untrustedstring(ab
, rule
->filterkey
);
417 audit_log_format(ab
, " key=(null)");
418 audit_log_format(ab
, " list=%d res=1", rule
->listnr
);
421 list_del_rcu(&entry
->list
);
422 call_rcu(&entry
->rcu
, audit_free_rule_rcu
);
428 * finish killing struct audit_tree
430 static void prune_one(struct audit_tree
*victim
)
432 spin_lock(&hash_lock
);
433 while (!list_empty(&victim
->chunks
)) {
435 struct audit_chunk
*chunk
;
437 p
= list_entry(victim
->chunks
.next
, struct node
, list
);
438 chunk
= find_chunk(p
);
439 get_inotify_watch(&chunk
->watch
);
440 spin_unlock(&hash_lock
);
442 untag_chunk(chunk
, p
);
444 put_inotify_watch(&chunk
->watch
);
445 spin_lock(&hash_lock
);
447 spin_unlock(&hash_lock
);
451 /* trim the uncommitted chunks from tree */
453 static void trim_marked(struct audit_tree
*tree
)
455 struct list_head
*p
, *q
;
456 spin_lock(&hash_lock
);
458 spin_unlock(&hash_lock
);
462 for (p
= tree
->chunks
.next
; p
!= &tree
->chunks
; p
= q
) {
463 struct node
*node
= list_entry(p
, struct node
, list
);
465 if (node
->index
& (1U<<31)) {
467 list_add(p
, &tree
->chunks
);
471 while (!list_empty(&tree
->chunks
)) {
473 struct audit_chunk
*chunk
;
475 node
= list_entry(tree
->chunks
.next
, struct node
, list
);
477 /* have we run out of marked? */
478 if (!(node
->index
& (1U<<31)))
481 chunk
= find_chunk(node
);
482 get_inotify_watch(&chunk
->watch
);
483 spin_unlock(&hash_lock
);
485 untag_chunk(chunk
, node
);
487 put_inotify_watch(&chunk
->watch
);
488 spin_lock(&hash_lock
);
490 if (!tree
->root
&& !tree
->goner
) {
492 spin_unlock(&hash_lock
);
493 mutex_lock(&audit_filter_mutex
);
495 list_del_init(&tree
->list
);
496 mutex_unlock(&audit_filter_mutex
);
499 spin_unlock(&hash_lock
);
503 /* called with audit_filter_mutex */
504 int audit_remove_tree_rule(struct audit_krule
*rule
)
506 struct audit_tree
*tree
;
509 spin_lock(&hash_lock
);
510 list_del_init(&rule
->rlist
);
511 if (list_empty(&tree
->rules
) && !tree
->goner
) {
513 list_del_init(&tree
->same_root
);
515 list_move(&tree
->list
, &prune_list
);
517 spin_unlock(&hash_lock
);
518 audit_schedule_prune();
522 spin_unlock(&hash_lock
);
528 void audit_trim_trees(void)
530 struct list_head cursor
;
532 mutex_lock(&audit_filter_mutex
);
533 list_add(&cursor
, &tree_list
);
534 while (cursor
.next
!= &tree_list
) {
535 struct audit_tree
*tree
;
537 struct vfsmount
*root_mnt
;
539 struct list_head list
;
542 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
545 list_add(&cursor
, &tree
->list
);
546 mutex_unlock(&audit_filter_mutex
);
548 err
= path_lookup(tree
->pathname
, 0, &nd
);
552 root_mnt
= collect_mounts(nd
.mnt
, nd
.dentry
);
557 list_add_tail(&list
, &root_mnt
->mnt_list
);
558 spin_lock(&hash_lock
);
559 list_for_each_entry(node
, &tree
->chunks
, list
) {
560 struct audit_chunk
*chunk
= find_chunk(node
);
561 struct inode
*inode
= chunk
->watch
.inode
;
562 struct vfsmount
*mnt
;
563 node
->index
|= 1U<<31;
564 list_for_each_entry(mnt
, &list
, mnt_list
) {
565 if (mnt
->mnt_root
->d_inode
== inode
) {
566 node
->index
&= ~(1U<<31);
571 spin_unlock(&hash_lock
);
574 list_del_init(&list
);
575 drop_collected_mounts(root_mnt
);
577 mutex_lock(&audit_filter_mutex
);
580 mutex_unlock(&audit_filter_mutex
);
583 static int is_under(struct vfsmount
*mnt
, struct dentry
*dentry
,
584 struct nameidata
*nd
)
586 if (mnt
!= nd
->mnt
) {
588 if (mnt
->mnt_parent
== mnt
)
590 if (mnt
->mnt_parent
== nd
->mnt
)
592 mnt
= mnt
->mnt_parent
;
594 dentry
= mnt
->mnt_mountpoint
;
596 return is_subdir(dentry
, nd
->dentry
);
599 int audit_make_tree(struct audit_krule
*rule
, char *pathname
, u32 op
)
602 if (pathname
[0] != '/' ||
603 rule
->listnr
!= AUDIT_FILTER_EXIT
||
605 rule
->inode_f
|| rule
->watch
|| rule
->tree
)
607 rule
->tree
= alloc_tree(pathname
);
613 void audit_put_tree(struct audit_tree
*tree
)
618 /* called with audit_filter_mutex */
619 int audit_add_tree_rule(struct audit_krule
*rule
)
621 struct audit_tree
*seed
= rule
->tree
, *tree
;
623 struct vfsmount
*mnt
, *p
;
624 struct list_head list
;
627 list_for_each_entry(tree
, &tree_list
, list
) {
628 if (!strcmp(seed
->pathname
, tree
->pathname
)) {
631 list_add(&rule
->rlist
, &tree
->rules
);
636 list_add(&tree
->list
, &tree_list
);
637 list_add(&rule
->rlist
, &tree
->rules
);
638 /* do not set rule->tree yet */
639 mutex_unlock(&audit_filter_mutex
);
641 err
= path_lookup(tree
->pathname
, 0, &nd
);
644 mnt
= collect_mounts(nd
.mnt
, nd
.dentry
);
650 list_add_tail(&list
, &mnt
->mnt_list
);
653 list_for_each_entry(p
, &list
, mnt_list
) {
654 err
= tag_chunk(p
->mnt_root
->d_inode
, tree
);
660 drop_collected_mounts(mnt
);
664 spin_lock(&hash_lock
);
665 list_for_each_entry(node
, &tree
->chunks
, list
)
666 node
->index
&= ~(1U<<31);
667 spin_unlock(&hash_lock
);
673 mutex_lock(&audit_filter_mutex
);
674 if (list_empty(&rule
->rlist
)) {
683 mutex_lock(&audit_filter_mutex
);
684 list_del_init(&tree
->list
);
685 list_del_init(&tree
->rules
);
690 int audit_tag_tree(char *old
, char *new)
692 struct list_head cursor
, barrier
;
695 struct vfsmount
*tagged
;
696 struct list_head list
;
697 struct vfsmount
*mnt
;
698 struct dentry
*dentry
;
701 err
= path_lookup(new, 0, &nd
);
704 tagged
= collect_mounts(nd
.mnt
, nd
.dentry
);
709 err
= path_lookup(old
, 0, &nd
);
711 drop_collected_mounts(tagged
);
714 mnt
= mntget(nd
.mnt
);
715 dentry
= dget(nd
.dentry
);
718 if (dentry
== tagged
->mnt_root
&& dentry
== mnt
->mnt_root
)
719 follow_up(&mnt
, &dentry
);
721 list_add_tail(&list
, &tagged
->mnt_list
);
723 mutex_lock(&audit_filter_mutex
);
724 list_add(&barrier
, &tree_list
);
725 list_add(&cursor
, &barrier
);
727 while (cursor
.next
!= &tree_list
) {
728 struct audit_tree
*tree
;
731 tree
= container_of(cursor
.next
, struct audit_tree
, list
);
734 list_add(&cursor
, &tree
->list
);
735 mutex_unlock(&audit_filter_mutex
);
737 err
= path_lookup(tree
->pathname
, 0, &nd
);
740 mutex_lock(&audit_filter_mutex
);
744 spin_lock(&vfsmount_lock
);
745 if (!is_under(mnt
, dentry
, &nd
)) {
746 spin_unlock(&vfsmount_lock
);
749 mutex_lock(&audit_filter_mutex
);
752 spin_unlock(&vfsmount_lock
);
755 list_for_each_entry(p
, &list
, mnt_list
) {
756 failed
= tag_chunk(p
->mnt_root
->d_inode
, tree
);
763 mutex_lock(&audit_filter_mutex
);
767 mutex_lock(&audit_filter_mutex
);
768 spin_lock(&hash_lock
);
770 list_del(&tree
->list
);
771 list_add(&tree
->list
, &tree_list
);
773 spin_unlock(&hash_lock
);
777 while (barrier
.prev
!= &tree_list
) {
778 struct audit_tree
*tree
;
780 tree
= container_of(barrier
.prev
, struct audit_tree
, list
);
782 list_del(&tree
->list
);
783 list_add(&tree
->list
, &barrier
);
784 mutex_unlock(&audit_filter_mutex
);
788 spin_lock(&hash_lock
);
789 list_for_each_entry(node
, &tree
->chunks
, list
)
790 node
->index
&= ~(1U<<31);
791 spin_unlock(&hash_lock
);
797 mutex_lock(&audit_filter_mutex
);
802 mutex_unlock(&audit_filter_mutex
);
805 drop_collected_mounts(tagged
);
810 * That gets run when evict_chunk() ends up needing to kill audit_tree.
811 * Runs from a separate thread, with audit_cmd_mutex held.
813 void audit_prune_trees(void)
815 mutex_lock(&audit_filter_mutex
);
817 while (!list_empty(&prune_list
)) {
818 struct audit_tree
*victim
;
820 victim
= list_entry(prune_list
.next
, struct audit_tree
, list
);
821 list_del_init(&victim
->list
);
823 mutex_unlock(&audit_filter_mutex
);
827 mutex_lock(&audit_filter_mutex
);
830 mutex_unlock(&audit_filter_mutex
);
834 * Here comes the stuff asynchronous to auditctl operations
837 /* inode->inotify_mutex is locked */
838 static void evict_chunk(struct audit_chunk
*chunk
)
840 struct audit_tree
*owner
;
847 mutex_lock(&audit_filter_mutex
);
848 spin_lock(&hash_lock
);
849 while (!list_empty(&chunk
->trees
)) {
850 owner
= list_entry(chunk
->trees
.next
,
851 struct audit_tree
, same_root
);
854 list_del_init(&owner
->same_root
);
855 spin_unlock(&hash_lock
);
857 list_move(&owner
->list
, &prune_list
);
858 audit_schedule_prune();
859 spin_lock(&hash_lock
);
861 list_del_rcu(&chunk
->hash
);
862 for (n
= 0; n
< chunk
->count
; n
++)
863 list_del_init(&chunk
->owners
[n
].list
);
864 spin_unlock(&hash_lock
);
865 mutex_unlock(&audit_filter_mutex
);
868 static void handle_event(struct inotify_watch
*watch
, u32 wd
, u32 mask
,
869 u32 cookie
, const char *dname
, struct inode
*inode
)
871 struct audit_chunk
*chunk
= container_of(watch
, struct audit_chunk
, watch
);
873 if (mask
& IN_IGNORED
) {
875 put_inotify_watch(watch
);
879 static void destroy_watch(struct inotify_watch
*watch
)
881 struct audit_chunk
*chunk
= container_of(watch
, struct audit_chunk
, watch
);
885 static const struct inotify_operations rtree_inotify_ops
= {
886 .handle_event
= handle_event
,
887 .destroy_watch
= destroy_watch
,
890 static int __init
audit_tree_init(void)
894 rtree_ih
= inotify_init(&rtree_inotify_ops
);
895 if (IS_ERR(rtree_ih
))
896 audit_panic("cannot initialize inotify handle for rectree watches");
898 for (i
= 0; i
< HASH_SIZE
; i
++)
899 INIT_LIST_HEAD(&chunk_hash_heads
[i
]);
903 __initcall(audit_tree_init
);