AppArmor: cleanup generated files correctly
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / notify / mark.c
blob325185e514bbda65cab96525d8e2e26577be5e4e
1 /*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * fsnotify inode mark locking/lifetime/and refcnting
22 * REFCNT:
23 * The mark->refcnt tells how many "things" in the kernel currently are
24 * referencing this object. The object typically will live inside the kernel
25 * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
26 * which can find this object holding the appropriete locks, can take a reference
27 * and the object itself is guarenteed to survive until the reference is dropped.
29 * LOCKING:
30 * There are 3 spinlocks involved with fsnotify inode marks and they MUST
31 * be taken in order as follows:
33 * mark->lock
34 * group->mark_lock
35 * inode->i_lock
37 * mark->lock protects 2 things, mark->group and mark->inode. You must hold
38 * that lock to dereference either of these things (they could be NULL even with
39 * the lock)
41 * group->mark_lock protects the marks_list anchored inside a given group
42 * and each mark is hooked via the g_list. It also sorta protects the
43 * free_g_list, which when used is anchored by a private list on the stack of the
44 * task which held the group->mark_lock.
46 * inode->i_lock protects the i_fsnotify_marks list anchored inside a
47 * given inode and each mark is hooked via the i_list. (and sorta the
48 * free_i_list)
51 * LIFETIME:
52 * Inode marks survive between when they are added to an inode and when their
53 * refcnt==0.
55 * The inode mark can be cleared for a number of different reasons including:
56 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
57 * - The inode is being evicted from cache. (fsnotify_inode_delete)
58 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
59 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
60 * - The fsnotify_group associated with the mark is going away and all such marks
61 * need to be cleaned up. (fsnotify_clear_marks_by_group)
63 * Worst case we are given an inode and need to clean up all the marks on that
64 * inode. We take i_lock and walk the i_fsnotify_marks safely. For each
65 * mark on the list we take a reference (so the mark can't disappear under us).
66 * We remove that mark form the inode's list of marks and we add this mark to a
67 * private list anchored on the stack using i_free_list; At this point we no
68 * longer fear anything finding the mark using the inode's list of marks.
70 * We can safely and locklessly run the private list on the stack of everything
71 * we just unattached from the original inode. For each mark on the private list
72 * we grab the mark-> and can thus dereference mark->group and mark->inode. If
73 * we see the group and inode are not NULL we take those locks. Now holding all
74 * 3 locks we can completely remove the mark from other tasks finding it in the
75 * future. Remember, 10 things might already be referencing this mark, but they
76 * better be holding a ref. We drop our reference we took before we unhooked it
77 * from the inode. When the ref hits 0 we can free the mark.
79 * Very similarly for freeing by group, except we use free_g_list.
81 * This has the very interesting property of being able to run concurrently with
82 * any (or all) other directions.
85 #include <linux/fs.h>
86 #include <linux/init.h>
87 #include <linux/kernel.h>
88 #include <linux/kthread.h>
89 #include <linux/module.h>
90 #include <linux/mutex.h>
91 #include <linux/slab.h>
92 #include <linux/spinlock.h>
93 #include <linux/srcu.h>
94 #include <linux/writeback.h> /* for inode_lock */
96 #include <asm/atomic.h>
98 #include <linux/fsnotify_backend.h>
99 #include "fsnotify.h"
101 struct srcu_struct fsnotify_mark_srcu;
102 static DEFINE_SPINLOCK(destroy_lock);
103 static LIST_HEAD(destroy_list);
104 static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
106 void fsnotify_get_mark(struct fsnotify_mark *mark)
108 atomic_inc(&mark->refcnt);
111 void fsnotify_put_mark(struct fsnotify_mark *mark)
113 if (atomic_dec_and_test(&mark->refcnt))
114 mark->free_mark(mark);
118 * Any time a mark is getting freed we end up here.
119 * The caller had better be holding a reference to this mark so we don't actually
120 * do the final put under the mark->lock
122 void fsnotify_destroy_mark(struct fsnotify_mark *mark)
124 struct fsnotify_group *group;
125 struct inode *inode = NULL;
127 spin_lock(&mark->lock);
129 group = mark->group;
131 /* something else already called this function on this mark */
132 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
133 spin_unlock(&mark->lock);
134 return;
137 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
139 /* 1 from caller and 1 for being on i_list/g_list */
140 BUG_ON(atomic_read(&mark->refcnt) < 2);
142 spin_lock(&group->mark_lock);
144 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
145 inode = mark->i.inode;
146 fsnotify_destroy_inode_mark(mark);
147 } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
148 fsnotify_destroy_vfsmount_mark(mark);
149 else
150 BUG();
152 list_del_init(&mark->g_list);
154 spin_unlock(&group->mark_lock);
155 spin_unlock(&mark->lock);
157 spin_lock(&destroy_lock);
158 list_add(&mark->destroy_list, &destroy_list);
159 spin_unlock(&destroy_lock);
160 wake_up(&destroy_waitq);
163 * Some groups like to know that marks are being freed. This is a
164 * callback to the group function to let it know that this mark
165 * is being freed.
167 if (group->ops->freeing_mark)
168 group->ops->freeing_mark(mark, group);
171 * __fsnotify_update_child_dentry_flags(inode);
173 * I really want to call that, but we can't, we have no idea if the inode
174 * still exists the second we drop the mark->lock.
176 * The next time an event arrive to this inode from one of it's children
177 * __fsnotify_parent will see that the inode doesn't care about it's
178 * children and will update all of these flags then. So really this
179 * is just a lazy update (and could be a perf win...)
182 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
183 iput(inode);
186 * it's possible that this group tried to destroy itself, but this
187 * this mark was simultaneously being freed by inode. If that's the
188 * case, we finish freeing the group here.
190 if (unlikely(atomic_dec_and_test(&group->num_marks)))
191 fsnotify_final_destroy_group(group);
194 void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
196 assert_spin_locked(&mark->lock);
198 mark->mask = mask;
200 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
201 fsnotify_set_inode_mark_mask_locked(mark, mask);
204 void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
206 assert_spin_locked(&mark->lock);
208 mark->ignored_mask = mask;
212 * Attach an initialized mark to a given group and fs object.
213 * These marks may be used for the fsnotify backend to determine which
214 * event types should be delivered to which group.
216 int fsnotify_add_mark(struct fsnotify_mark *mark,
217 struct fsnotify_group *group, struct inode *inode,
218 struct vfsmount *mnt, int allow_dups)
220 int ret = 0;
222 BUG_ON(inode && mnt);
223 BUG_ON(!inode && !mnt);
226 * LOCKING ORDER!!!!
227 * mark->lock
228 * group->mark_lock
229 * inode->i_lock
231 spin_lock(&mark->lock);
232 spin_lock(&group->mark_lock);
234 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
236 mark->group = group;
237 list_add(&mark->g_list, &group->marks_list);
238 atomic_inc(&group->num_marks);
239 fsnotify_get_mark(mark); /* for i_list and g_list */
241 if (inode) {
242 ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
243 if (ret)
244 goto err;
245 } else if (mnt) {
246 ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
247 if (ret)
248 goto err;
249 } else {
250 BUG();
253 spin_unlock(&group->mark_lock);
255 /* this will pin the object if appropriate */
256 fsnotify_set_mark_mask_locked(mark, mark->mask);
258 spin_unlock(&mark->lock);
260 if (inode)
261 __fsnotify_update_child_dentry_flags(inode);
263 return ret;
264 err:
265 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
266 list_del_init(&mark->g_list);
267 mark->group = NULL;
268 atomic_dec(&group->num_marks);
270 spin_unlock(&group->mark_lock);
271 spin_unlock(&mark->lock);
273 spin_lock(&destroy_lock);
274 list_add(&mark->destroy_list, &destroy_list);
275 spin_unlock(&destroy_lock);
276 wake_up(&destroy_waitq);
278 return ret;
282 * clear any marks in a group in which mark->flags & flags is true
284 void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
285 unsigned int flags)
287 struct fsnotify_mark *lmark, *mark;
288 LIST_HEAD(free_list);
290 spin_lock(&group->mark_lock);
291 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
292 if (mark->flags & flags) {
293 list_add(&mark->free_g_list, &free_list);
294 list_del_init(&mark->g_list);
295 fsnotify_get_mark(mark);
298 spin_unlock(&group->mark_lock);
300 list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
301 fsnotify_destroy_mark(mark);
302 fsnotify_put_mark(mark);
307 * Given a group, destroy all of the marks associated with that group.
309 void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
311 fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
314 void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
316 assert_spin_locked(&old->lock);
317 new->i.inode = old->i.inode;
318 new->m.mnt = old->m.mnt;
319 new->group = old->group;
320 new->mask = old->mask;
321 new->free_mark = old->free_mark;
325 * Nothing fancy, just initialize lists and locks and counters.
327 void fsnotify_init_mark(struct fsnotify_mark *mark,
328 void (*free_mark)(struct fsnotify_mark *mark))
330 memset(mark, 0, sizeof(*mark));
331 spin_lock_init(&mark->lock);
332 atomic_set(&mark->refcnt, 1);
333 mark->free_mark = free_mark;
336 static int fsnotify_mark_destroy(void *ignored)
338 struct fsnotify_mark *mark, *next;
339 LIST_HEAD(private_destroy_list);
341 for (;;) {
342 spin_lock(&destroy_lock);
343 /* exchange the list head */
344 list_replace_init(&destroy_list, &private_destroy_list);
345 spin_unlock(&destroy_lock);
347 synchronize_srcu(&fsnotify_mark_srcu);
349 list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
350 list_del_init(&mark->destroy_list);
351 fsnotify_put_mark(mark);
354 wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
357 return 0;
360 static int __init fsnotify_mark_init(void)
362 struct task_struct *thread;
364 thread = kthread_run(fsnotify_mark_destroy, NULL,
365 "fsnotify_mark");
366 if (IS_ERR(thread))
367 panic("unable to start fsnotify mark destruction thread.");
369 return 0;
371 device_initcall(fsnotify_mark_init);