2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * fsnotify inode mark locking/lifetime/and refcnting
23 * The group->recnt and mark->refcnt tell how many "things" in the kernel
24 * currently are referencing the objects. Both kind of objects typically will
25 * live inside the kernel with a refcnt of 2, one for its creation and one for
26 * the reference a group and a mark hold to each other.
27 * If you are holding the appropriate locks, you can take a reference and the
28 * object itself is guaranteed to survive until the reference is dropped.
31 * There are 3 locks involved with fsnotify inode marks and they MUST be taken
32 * in order as follows:
36 * mark->connector->lock
38 * group->mark_mutex protects the marks_list anchored inside a given group and
39 * each mark is hooked via the g_list. It also protects the groups private
40 * data (i.e group limits).
42 * mark->lock protects the marks attributes like its masks and flags.
43 * Furthermore it protects the access to a reference of the group that the mark
44 * is assigned to as well as the access to a reference of the inode/vfsmount
45 * that is being watched by the mark.
47 * mark->connector->lock protects the list of marks anchored inside an
48 * inode / vfsmount and each mark is hooked via the i_list.
50 * A list of notification marks relating to inode / mnt is contained in
51 * fsnotify_mark_connector. That structure is alive as long as there are any
52 * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets
53 * detached from fsnotify_mark_connector when last reference to the mark is
54 * dropped. Thus having mark reference is enough to protect mark->connector
55 * pointer and to make sure fsnotify_mark_connector cannot disappear. Also
56 * because we remove mark from g_list before dropping mark reference associated
57 * with that, any mark found through g_list is guaranteed to have
58 * mark->connector set until we drop group->mark_mutex.
61 * Inode marks survive between when they are added to an inode and when their
62 * refcnt==0. Marks are also protected by fsnotify_mark_srcu.
64 * The inode mark can be cleared for a number of different reasons including:
65 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
66 * - The inode is being evicted from cache. (fsnotify_inode_delete)
67 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
68 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
69 * - The fsnotify_group associated with the mark is going away and all such marks
70 * need to be cleaned up. (fsnotify_clear_marks_by_group)
72 * This has the very interesting property of being able to run concurrently with
73 * any (or all) other directions.
77 #include <linux/init.h>
78 #include <linux/kernel.h>
79 #include <linux/kthread.h>
80 #include <linux/module.h>
81 #include <linux/mutex.h>
82 #include <linux/slab.h>
83 #include <linux/spinlock.h>
84 #include <linux/srcu.h>
86 #include <linux/atomic.h>
88 #include <linux/fsnotify_backend.h>
91 #define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
93 struct srcu_struct fsnotify_mark_srcu
;
94 struct kmem_cache
*fsnotify_mark_connector_cachep
;
96 static DEFINE_SPINLOCK(destroy_lock
);
97 static LIST_HEAD(destroy_list
);
98 static struct fsnotify_mark_connector
*connector_destroy_list
;
100 static void fsnotify_mark_destroy_workfn(struct work_struct
*work
);
101 static DECLARE_DELAYED_WORK(reaper_work
, fsnotify_mark_destroy_workfn
);
103 static void fsnotify_connector_destroy_workfn(struct work_struct
*work
);
104 static DECLARE_WORK(connector_reaper_work
, fsnotify_connector_destroy_workfn
);
106 void fsnotify_get_mark(struct fsnotify_mark
*mark
)
108 WARN_ON_ONCE(!refcount_read(&mark
->refcnt
));
109 refcount_inc(&mark
->refcnt
);
112 static __u32
*fsnotify_conn_mask_p(struct fsnotify_mark_connector
*conn
)
114 if (conn
->type
== FSNOTIFY_OBJ_TYPE_INODE
)
115 return &fsnotify_conn_inode(conn
)->i_fsnotify_mask
;
116 else if (conn
->type
== FSNOTIFY_OBJ_TYPE_VFSMOUNT
)
117 return &fsnotify_conn_mount(conn
)->mnt_fsnotify_mask
;
121 __u32
fsnotify_conn_mask(struct fsnotify_mark_connector
*conn
)
123 if (WARN_ON(!fsnotify_valid_obj_type(conn
->type
)))
126 return *fsnotify_conn_mask_p(conn
);
129 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector
*conn
)
132 struct fsnotify_mark
*mark
;
134 assert_spin_locked(&conn
->lock
);
135 /* We can get detached connector here when inode is getting unlinked. */
136 if (!fsnotify_valid_obj_type(conn
->type
))
138 hlist_for_each_entry(mark
, &conn
->list
, obj_list
) {
139 if (mark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)
140 new_mask
|= mark
->mask
;
142 *fsnotify_conn_mask_p(conn
) = new_mask
;
146 * Calculate mask of events for a list of marks. The caller must make sure
147 * connector and connector->obj cannot disappear under us. Callers achieve
148 * this by holding a mark->lock or mark->group->mark_mutex for a mark on this
151 void fsnotify_recalc_mask(struct fsnotify_mark_connector
*conn
)
156 spin_lock(&conn
->lock
);
157 __fsnotify_recalc_mask(conn
);
158 spin_unlock(&conn
->lock
);
159 if (conn
->type
== FSNOTIFY_OBJ_TYPE_INODE
)
160 __fsnotify_update_child_dentry_flags(
161 fsnotify_conn_inode(conn
));
164 /* Free all connectors queued for freeing once SRCU period ends */
165 static void fsnotify_connector_destroy_workfn(struct work_struct
*work
)
167 struct fsnotify_mark_connector
*conn
, *free
;
169 spin_lock(&destroy_lock
);
170 conn
= connector_destroy_list
;
171 connector_destroy_list
= NULL
;
172 spin_unlock(&destroy_lock
);
174 synchronize_srcu(&fsnotify_mark_srcu
);
177 conn
= conn
->destroy_next
;
178 kmem_cache_free(fsnotify_mark_connector_cachep
, free
);
182 static struct inode
*fsnotify_detach_connector_from_object(
183 struct fsnotify_mark_connector
*conn
)
185 struct inode
*inode
= NULL
;
187 if (conn
->type
== FSNOTIFY_OBJ_TYPE_DETACHED
)
190 if (conn
->type
== FSNOTIFY_OBJ_TYPE_INODE
) {
191 inode
= fsnotify_conn_inode(conn
);
192 inode
->i_fsnotify_mask
= 0;
193 } else if (conn
->type
== FSNOTIFY_OBJ_TYPE_VFSMOUNT
) {
194 fsnotify_conn_mount(conn
)->mnt_fsnotify_mask
= 0;
197 rcu_assign_pointer(*(conn
->obj
), NULL
);
199 conn
->type
= FSNOTIFY_OBJ_TYPE_DETACHED
;
204 static void fsnotify_final_mark_destroy(struct fsnotify_mark
*mark
)
206 struct fsnotify_group
*group
= mark
->group
;
208 if (WARN_ON_ONCE(!group
))
210 group
->ops
->free_mark(mark
);
211 fsnotify_put_group(group
);
214 void fsnotify_put_mark(struct fsnotify_mark
*mark
)
216 struct fsnotify_mark_connector
*conn
;
217 struct inode
*inode
= NULL
;
218 bool free_conn
= false;
220 /* Catch marks that were actually never attached to object */
221 if (!mark
->connector
) {
222 if (refcount_dec_and_test(&mark
->refcnt
))
223 fsnotify_final_mark_destroy(mark
);
228 * We have to be careful so that traversals of obj_list under lock can
229 * safely grab mark reference.
231 if (!refcount_dec_and_lock(&mark
->refcnt
, &mark
->connector
->lock
))
234 conn
= mark
->connector
;
235 hlist_del_init_rcu(&mark
->obj_list
);
236 if (hlist_empty(&conn
->list
)) {
237 inode
= fsnotify_detach_connector_from_object(conn
);
240 __fsnotify_recalc_mask(conn
);
242 mark
->connector
= NULL
;
243 spin_unlock(&conn
->lock
);
248 spin_lock(&destroy_lock
);
249 conn
->destroy_next
= connector_destroy_list
;
250 connector_destroy_list
= conn
;
251 spin_unlock(&destroy_lock
);
252 queue_work(system_unbound_wq
, &connector_reaper_work
);
255 * Note that we didn't update flags telling whether inode cares about
256 * what's happening with children. We update these flags from
257 * __fsnotify_parent() lazily when next event happens on one of our
260 spin_lock(&destroy_lock
);
261 list_add(&mark
->g_list
, &destroy_list
);
262 spin_unlock(&destroy_lock
);
263 queue_delayed_work(system_unbound_wq
, &reaper_work
,
264 FSNOTIFY_REAPER_DELAY
);
268 * Get mark reference when we found the mark via lockless traversal of object
269 * list. Mark can be already removed from the list by now and on its way to be
270 * destroyed once SRCU period ends.
272 * Also pin the group so it doesn't disappear under us.
274 static bool fsnotify_get_mark_safe(struct fsnotify_mark
*mark
)
279 if (refcount_inc_not_zero(&mark
->refcnt
)) {
280 spin_lock(&mark
->lock
);
281 if (mark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
) {
282 /* mark is attached, group is still alive then */
283 atomic_inc(&mark
->group
->user_waits
);
284 spin_unlock(&mark
->lock
);
287 spin_unlock(&mark
->lock
);
288 fsnotify_put_mark(mark
);
294 * Puts marks and wakes up group destruction if necessary.
296 * Pairs with fsnotify_get_mark_safe()
298 static void fsnotify_put_mark_wake(struct fsnotify_mark
*mark
)
301 struct fsnotify_group
*group
= mark
->group
;
303 fsnotify_put_mark(mark
);
305 * We abuse notification_waitq on group shutdown for waiting for
306 * all marks pinned when waiting for userspace.
308 if (atomic_dec_and_test(&group
->user_waits
) && group
->shutdown
)
309 wake_up(&group
->notification_waitq
);
313 bool fsnotify_prepare_user_wait(struct fsnotify_iter_info
*iter_info
)
317 fsnotify_foreach_obj_type(type
) {
318 /* This can fail if mark is being removed */
319 if (!fsnotify_get_mark_safe(iter_info
->marks
[type
]))
324 * Now that both marks are pinned by refcount in the inode / vfsmount
325 * lists, we can drop SRCU lock, and safely resume the list iteration
326 * once userspace returns.
328 srcu_read_unlock(&fsnotify_mark_srcu
, iter_info
->srcu_idx
);
333 for (type
--; type
>= 0; type
--)
334 fsnotify_put_mark_wake(iter_info
->marks
[type
]);
338 void fsnotify_finish_user_wait(struct fsnotify_iter_info
*iter_info
)
342 iter_info
->srcu_idx
= srcu_read_lock(&fsnotify_mark_srcu
);
343 fsnotify_foreach_obj_type(type
)
344 fsnotify_put_mark_wake(iter_info
->marks
[type
]);
348 * Mark mark as detached, remove it from group list. Mark still stays in object
349 * list until its last reference is dropped. Note that we rely on mark being
350 * removed from group list before corresponding reference to it is dropped. In
351 * particular we rely on mark->connector being valid while we hold
352 * group->mark_mutex if we found the mark through g_list.
354 * Must be called with group->mark_mutex held. The caller must either hold
355 * reference to the mark or be protected by fsnotify_mark_srcu.
357 void fsnotify_detach_mark(struct fsnotify_mark
*mark
)
359 struct fsnotify_group
*group
= mark
->group
;
361 WARN_ON_ONCE(!mutex_is_locked(&group
->mark_mutex
));
362 WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu
) &&
363 refcount_read(&mark
->refcnt
) < 1 +
364 !!(mark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
));
366 spin_lock(&mark
->lock
);
367 /* something else already called this function on this mark */
368 if (!(mark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
369 spin_unlock(&mark
->lock
);
372 mark
->flags
&= ~FSNOTIFY_MARK_FLAG_ATTACHED
;
373 list_del_init(&mark
->g_list
);
374 spin_unlock(&mark
->lock
);
376 atomic_dec(&group
->num_marks
);
378 /* Drop mark reference acquired in fsnotify_add_mark_locked() */
379 fsnotify_put_mark(mark
);
383 * Free fsnotify mark. The mark is actually only marked as being freed. The
384 * freeing is actually happening only once last reference to the mark is
385 * dropped from a workqueue which first waits for srcu period end.
387 * Caller must have a reference to the mark or be protected by
388 * fsnotify_mark_srcu.
390 void fsnotify_free_mark(struct fsnotify_mark
*mark
)
392 struct fsnotify_group
*group
= mark
->group
;
394 spin_lock(&mark
->lock
);
395 /* something else already called this function on this mark */
396 if (!(mark
->flags
& FSNOTIFY_MARK_FLAG_ALIVE
)) {
397 spin_unlock(&mark
->lock
);
400 mark
->flags
&= ~FSNOTIFY_MARK_FLAG_ALIVE
;
401 spin_unlock(&mark
->lock
);
404 * Some groups like to know that marks are being freed. This is a
405 * callback to the group function to let it know that this mark
408 if (group
->ops
->freeing_mark
)
409 group
->ops
->freeing_mark(mark
, group
);
412 void fsnotify_destroy_mark(struct fsnotify_mark
*mark
,
413 struct fsnotify_group
*group
)
415 mutex_lock_nested(&group
->mark_mutex
, SINGLE_DEPTH_NESTING
);
416 fsnotify_detach_mark(mark
);
417 mutex_unlock(&group
->mark_mutex
);
418 fsnotify_free_mark(mark
);
422 * Sorting function for lists of fsnotify marks.
424 * Fanotify supports different notification classes (reflected as priority of
425 * notification group). Events shall be passed to notification groups in
426 * decreasing priority order. To achieve this marks in notification lists for
427 * inodes and vfsmounts are sorted so that priorities of corresponding groups
430 * Furthermore correct handling of the ignore mask requires processing inode
431 * and vfsmount marks of each group together. Using the group address as
432 * further sort criterion provides a unique sorting order and thus we can
433 * merge inode and vfsmount lists of marks in linear time and find groups
434 * present in both lists.
436 * A return value of 1 signifies that b has priority over a.
437 * A return value of 0 signifies that the two marks have to be handled together.
438 * A return value of -1 signifies that a has priority over b.
440 int fsnotify_compare_groups(struct fsnotify_group
*a
, struct fsnotify_group
*b
)
448 if (a
->priority
< b
->priority
)
450 if (a
->priority
> b
->priority
)
457 static int fsnotify_attach_connector_to_object(fsnotify_connp_t
*connp
,
460 struct inode
*inode
= NULL
;
461 struct fsnotify_mark_connector
*conn
;
463 conn
= kmem_cache_alloc(fsnotify_mark_connector_cachep
, GFP_KERNEL
);
466 spin_lock_init(&conn
->lock
);
467 INIT_HLIST_HEAD(&conn
->list
);
470 if (conn
->type
== FSNOTIFY_OBJ_TYPE_INODE
)
471 inode
= igrab(fsnotify_conn_inode(conn
));
473 * cmpxchg() provides the barrier so that readers of *connp can see
474 * only initialized structure
476 if (cmpxchg(connp
, NULL
, conn
)) {
477 /* Someone else created list structure for us */
480 kmem_cache_free(fsnotify_mark_connector_cachep
, conn
);
487 * Get mark connector, make sure it is alive and return with its lock held.
488 * This is for users that get connector pointer from inode or mount. Users that
489 * hold reference to a mark on the list may directly lock connector->lock as
490 * they are sure list cannot go away under them.
492 static struct fsnotify_mark_connector
*fsnotify_grab_connector(
493 fsnotify_connp_t
*connp
)
495 struct fsnotify_mark_connector
*conn
;
498 idx
= srcu_read_lock(&fsnotify_mark_srcu
);
499 conn
= srcu_dereference(*connp
, &fsnotify_mark_srcu
);
502 spin_lock(&conn
->lock
);
503 if (conn
->type
== FSNOTIFY_OBJ_TYPE_DETACHED
) {
504 spin_unlock(&conn
->lock
);
505 srcu_read_unlock(&fsnotify_mark_srcu
, idx
);
509 srcu_read_unlock(&fsnotify_mark_srcu
, idx
);
514 * Add mark into proper place in given list of marks. These marks may be used
515 * for the fsnotify backend to determine which event types should be delivered
516 * to which group and for which inodes. These marks are ordered according to
517 * priority, highest number first, and then by the group's location in memory.
519 static int fsnotify_add_mark_list(struct fsnotify_mark
*mark
,
520 fsnotify_connp_t
*connp
, unsigned int type
,
523 struct fsnotify_mark
*lmark
, *last
= NULL
;
524 struct fsnotify_mark_connector
*conn
;
528 if (WARN_ON(!fsnotify_valid_obj_type(type
)))
531 spin_lock(&mark
->lock
);
532 conn
= fsnotify_grab_connector(connp
);
534 spin_unlock(&mark
->lock
);
535 err
= fsnotify_attach_connector_to_object(connp
, type
);
541 /* is mark the first mark? */
542 if (hlist_empty(&conn
->list
)) {
543 hlist_add_head_rcu(&mark
->obj_list
, &conn
->list
);
547 /* should mark be in the middle of the current list? */
548 hlist_for_each_entry(lmark
, &conn
->list
, obj_list
) {
551 if ((lmark
->group
== mark
->group
) &&
552 (lmark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
) &&
558 cmp
= fsnotify_compare_groups(lmark
->group
, mark
->group
);
560 hlist_add_before_rcu(&mark
->obj_list
, &lmark
->obj_list
);
565 BUG_ON(last
== NULL
);
566 /* mark should be the last entry. last is the current last entry */
567 hlist_add_behind_rcu(&mark
->obj_list
, &last
->obj_list
);
569 mark
->connector
= conn
;
571 spin_unlock(&conn
->lock
);
572 spin_unlock(&mark
->lock
);
577 * Attach an initialized mark to a given group and fs object.
578 * These marks may be used for the fsnotify backend to determine which
579 * event types should be delivered to which group.
581 int fsnotify_add_mark_locked(struct fsnotify_mark
*mark
,
582 fsnotify_connp_t
*connp
, unsigned int type
,
585 struct fsnotify_group
*group
= mark
->group
;
588 BUG_ON(!mutex_is_locked(&group
->mark_mutex
));
594 * mark->connector->lock
596 spin_lock(&mark
->lock
);
597 mark
->flags
|= FSNOTIFY_MARK_FLAG_ALIVE
| FSNOTIFY_MARK_FLAG_ATTACHED
;
599 list_add(&mark
->g_list
, &group
->marks_list
);
600 atomic_inc(&group
->num_marks
);
601 fsnotify_get_mark(mark
); /* for g_list */
602 spin_unlock(&mark
->lock
);
604 ret
= fsnotify_add_mark_list(mark
, connp
, type
, allow_dups
);
609 fsnotify_recalc_mask(mark
->connector
);
613 spin_lock(&mark
->lock
);
614 mark
->flags
&= ~(FSNOTIFY_MARK_FLAG_ALIVE
|
615 FSNOTIFY_MARK_FLAG_ATTACHED
);
616 list_del_init(&mark
->g_list
);
617 spin_unlock(&mark
->lock
);
618 atomic_dec(&group
->num_marks
);
620 fsnotify_put_mark(mark
);
624 int fsnotify_add_mark(struct fsnotify_mark
*mark
, fsnotify_connp_t
*connp
,
625 unsigned int type
, int allow_dups
)
628 struct fsnotify_group
*group
= mark
->group
;
630 mutex_lock(&group
->mark_mutex
);
631 ret
= fsnotify_add_mark_locked(mark
, connp
, type
, allow_dups
);
632 mutex_unlock(&group
->mark_mutex
);
637 * Given a list of marks, find the mark associated with given group. If found
638 * take a reference to that mark and return it, else return NULL.
640 struct fsnotify_mark
*fsnotify_find_mark(fsnotify_connp_t
*connp
,
641 struct fsnotify_group
*group
)
643 struct fsnotify_mark_connector
*conn
;
644 struct fsnotify_mark
*mark
;
646 conn
= fsnotify_grab_connector(connp
);
650 hlist_for_each_entry(mark
, &conn
->list
, obj_list
) {
651 if (mark
->group
== group
&&
652 (mark
->flags
& FSNOTIFY_MARK_FLAG_ATTACHED
)) {
653 fsnotify_get_mark(mark
);
654 spin_unlock(&conn
->lock
);
658 spin_unlock(&conn
->lock
);
662 /* Clear any marks in a group with given type mask */
663 void fsnotify_clear_marks_by_group(struct fsnotify_group
*group
,
664 unsigned int type_mask
)
666 struct fsnotify_mark
*lmark
, *mark
;
668 struct list_head
*head
= &to_free
;
670 /* Skip selection step if we want to clear all marks. */
671 if (type_mask
== FSNOTIFY_OBJ_ALL_TYPES_MASK
) {
672 head
= &group
->marks_list
;
676 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
677 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
678 * to_free list so we have to use mark_mutex even when accessing that
679 * list. And freeing mark requires us to drop mark_mutex. So we can
680 * reliably free only the first mark in the list. That's why we first
681 * move marks to free to to_free list in one go and then free marks in
682 * to_free list one by one.
684 mutex_lock_nested(&group
->mark_mutex
, SINGLE_DEPTH_NESTING
);
685 list_for_each_entry_safe(mark
, lmark
, &group
->marks_list
, g_list
) {
686 if ((1U << mark
->connector
->type
) & type_mask
)
687 list_move(&mark
->g_list
, &to_free
);
689 mutex_unlock(&group
->mark_mutex
);
693 mutex_lock_nested(&group
->mark_mutex
, SINGLE_DEPTH_NESTING
);
694 if (list_empty(head
)) {
695 mutex_unlock(&group
->mark_mutex
);
698 mark
= list_first_entry(head
, struct fsnotify_mark
, g_list
);
699 fsnotify_get_mark(mark
);
700 fsnotify_detach_mark(mark
);
701 mutex_unlock(&group
->mark_mutex
);
702 fsnotify_free_mark(mark
);
703 fsnotify_put_mark(mark
);
707 /* Destroy all marks attached to an object via connector */
708 void fsnotify_destroy_marks(fsnotify_connp_t
*connp
)
710 struct fsnotify_mark_connector
*conn
;
711 struct fsnotify_mark
*mark
, *old_mark
= NULL
;
714 conn
= fsnotify_grab_connector(connp
);
718 * We have to be careful since we can race with e.g.
719 * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the
720 * list can get modified. However we are holding mark reference and
721 * thus our mark cannot be removed from obj_list so we can continue
722 * iteration after regaining conn->lock.
724 hlist_for_each_entry(mark
, &conn
->list
, obj_list
) {
725 fsnotify_get_mark(mark
);
726 spin_unlock(&conn
->lock
);
728 fsnotify_put_mark(old_mark
);
730 fsnotify_destroy_mark(mark
, mark
->group
);
731 spin_lock(&conn
->lock
);
734 * Detach list from object now so that we don't pin inode until all
735 * mark references get dropped. It would lead to strange results such
736 * as delaying inode deletion or blocking unmount.
738 inode
= fsnotify_detach_connector_from_object(conn
);
739 spin_unlock(&conn
->lock
);
741 fsnotify_put_mark(old_mark
);
746 * Nothing fancy, just initialize lists and locks and counters.
748 void fsnotify_init_mark(struct fsnotify_mark
*mark
,
749 struct fsnotify_group
*group
)
751 memset(mark
, 0, sizeof(*mark
));
752 spin_lock_init(&mark
->lock
);
753 refcount_set(&mark
->refcnt
, 1);
754 fsnotify_get_group(group
);
759 * Destroy all marks in destroy_list, waits for SRCU period to finish before
760 * actually freeing marks.
762 static void fsnotify_mark_destroy_workfn(struct work_struct
*work
)
764 struct fsnotify_mark
*mark
, *next
;
765 struct list_head private_destroy_list
;
767 spin_lock(&destroy_lock
);
768 /* exchange the list head */
769 list_replace_init(&destroy_list
, &private_destroy_list
);
770 spin_unlock(&destroy_lock
);
772 synchronize_srcu(&fsnotify_mark_srcu
);
774 list_for_each_entry_safe(mark
, next
, &private_destroy_list
, g_list
) {
775 list_del_init(&mark
->g_list
);
776 fsnotify_final_mark_destroy(mark
);
780 /* Wait for all marks queued for destruction to be actually destroyed */
781 void fsnotify_wait_marks_destroyed(void)
783 flush_delayed_work(&reaper_work
);