2 * fs/inotify.c - inode-based file event notifications
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
10 * Copyright (C) 2005 John McCutchan
11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2, or (at your option) any
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/idr.h>
28 #include <linux/slab.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/writeback.h>
34 #include <linux/inotify.h>
35 #include <linux/fsnotify_backend.h>
37 static atomic_t inotify_cookie
;
42 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
43 * iprune_mutex (synchronize shrink_icache_memory())
44 * inode_lock (protects the super_block->s_inodes list)
45 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
46 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
48 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
49 * of a caller's event handler. Thus, the caller must not hold any locks
50 * taken in their event handler while calling any of the published inotify
55 * Lifetimes of the three main data structures--inotify_handle, inode, and
56 * inotify_watch--are managed by reference count.
58 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
59 * Additional references can bump the count via get_inotify_handle() and drop
60 * the count via put_inotify_handle().
62 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
63 * to remove_watch_no_event(). Additional references can bump the count via
64 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
65 * is reponsible for the final put after receiving IN_IGNORED, or when using
66 * IN_ONESHOT after receiving the first event. Inotify does the final put if
67 * inotify_destroy() is called.
69 * inode: Pinned so long as the inode is associated with a watch, from
70 * inotify_add_watch() to the final put_inotify_watch().
74 * struct inotify_handle - represents an inotify instance
76 * This structure is protected by the mutex 'mutex'.
78 struct inotify_handle
{
79 struct idr idr
; /* idr mapping wd -> watch */
80 struct mutex mutex
; /* protects this bad boy */
81 struct list_head watches
; /* list of watches */
82 atomic_t count
; /* reference count */
83 u32 last_wd
; /* the last wd allocated */
84 const struct inotify_operations
*in_ops
; /* inotify caller operations */
87 static inline void get_inotify_handle(struct inotify_handle
*ih
)
89 atomic_inc(&ih
->count
);
92 static inline void put_inotify_handle(struct inotify_handle
*ih
)
94 if (atomic_dec_and_test(&ih
->count
)) {
95 idr_destroy(&ih
->idr
);
101 * get_inotify_watch - grab a reference to an inotify_watch
102 * @watch: watch to grab
104 void get_inotify_watch(struct inotify_watch
*watch
)
106 atomic_inc(&watch
->count
);
108 EXPORT_SYMBOL_GPL(get_inotify_watch
);
110 int pin_inotify_watch(struct inotify_watch
*watch
)
112 struct super_block
*sb
= watch
->inode
->i_sb
;
113 if (atomic_inc_not_zero(&sb
->s_active
)) {
114 atomic_inc(&watch
->count
);
121 * put_inotify_watch - decrements the ref count on a given watch. cleans up
122 * watch references if the count reaches zero. inotify_watch is freed by
123 * inotify callers via the destroy_watch() op.
124 * @watch: watch to release
126 void put_inotify_watch(struct inotify_watch
*watch
)
128 if (atomic_dec_and_test(&watch
->count
)) {
129 struct inotify_handle
*ih
= watch
->ih
;
132 ih
->in_ops
->destroy_watch(watch
);
133 put_inotify_handle(ih
);
136 EXPORT_SYMBOL_GPL(put_inotify_watch
);
138 void unpin_inotify_watch(struct inotify_watch
*watch
)
140 struct super_block
*sb
= watch
->inode
->i_sb
;
141 put_inotify_watch(watch
);
142 deactivate_super(sb
);
146 * inotify_handle_get_wd - returns the next WD for use by the given handle
148 * Callers must hold ih->mutex. This function can sleep.
150 static int inotify_handle_get_wd(struct inotify_handle
*ih
,
151 struct inotify_watch
*watch
)
156 if (unlikely(!idr_pre_get(&ih
->idr
, GFP_NOFS
)))
158 ret
= idr_get_new_above(&ih
->idr
, watch
, ih
->last_wd
+1, &watch
->wd
);
159 } while (ret
== -EAGAIN
);
162 ih
->last_wd
= watch
->wd
;
168 * inotify_inode_watched - returns nonzero if there are watches on this inode
169 * and zero otherwise. We call this lockless, we do not care if we race.
171 static inline int inotify_inode_watched(struct inode
*inode
)
173 return !list_empty(&inode
->inotify_watches
);
177 * Get child dentry flag into synch with parent inode.
178 * Flag should always be clear for negative dentrys.
180 static void set_dentry_child_flags(struct inode
*inode
, int watched
)
182 struct dentry
*alias
;
184 spin_lock(&dcache_lock
);
185 list_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
186 struct dentry
*child
;
188 list_for_each_entry(child
, &alias
->d_subdirs
, d_u
.d_child
) {
192 spin_lock(&child
->d_lock
);
194 child
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
196 child
->d_flags
&=~DCACHE_INOTIFY_PARENT_WATCHED
;
197 spin_unlock(&child
->d_lock
);
200 spin_unlock(&dcache_lock
);
204 * inotify_find_handle - find the watch associated with the given inode and
207 * Callers must hold inode->inotify_mutex.
209 static struct inotify_watch
*inode_find_handle(struct inode
*inode
,
210 struct inotify_handle
*ih
)
212 struct inotify_watch
*watch
;
214 list_for_each_entry(watch
, &inode
->inotify_watches
, i_list
) {
223 * remove_watch_no_event - remove watch without the IN_IGNORED event.
225 * Callers must hold both inode->inotify_mutex and ih->mutex.
227 static void remove_watch_no_event(struct inotify_watch
*watch
,
228 struct inotify_handle
*ih
)
230 list_del(&watch
->i_list
);
231 list_del(&watch
->h_list
);
233 if (!inotify_inode_watched(watch
->inode
))
234 set_dentry_child_flags(watch
->inode
, 0);
236 idr_remove(&ih
->idr
, watch
->wd
);
240 * inotify_remove_watch_locked - Remove a watch from both the handle and the
241 * inode. Sends the IN_IGNORED event signifying that the inode is no longer
242 * watched. May be invoked from a caller's event handler.
243 * @ih: inotify handle associated with watch
244 * @watch: watch to remove
246 * Callers must hold both inode->inotify_mutex and ih->mutex.
248 void inotify_remove_watch_locked(struct inotify_handle
*ih
,
249 struct inotify_watch
*watch
)
251 remove_watch_no_event(watch
, ih
);
252 ih
->in_ops
->handle_event(watch
, watch
->wd
, IN_IGNORED
, 0, NULL
, NULL
);
254 EXPORT_SYMBOL_GPL(inotify_remove_watch_locked
);
256 /* Kernel API for producing events */
259 * inotify_d_instantiate - instantiate dcache entry for inode
261 void inotify_d_instantiate(struct dentry
*entry
, struct inode
*inode
)
263 struct dentry
*parent
;
268 spin_lock(&entry
->d_lock
);
269 parent
= entry
->d_parent
;
270 if (parent
->d_inode
&& inotify_inode_watched(parent
->d_inode
))
271 entry
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
272 spin_unlock(&entry
->d_lock
);
276 * inotify_d_move - dcache entry has been moved
278 void inotify_d_move(struct dentry
*entry
)
280 struct dentry
*parent
;
282 parent
= entry
->d_parent
;
283 if (inotify_inode_watched(parent
->d_inode
))
284 entry
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
286 entry
->d_flags
&= ~DCACHE_INOTIFY_PARENT_WATCHED
;
290 * inotify_inode_queue_event - queue an event to all watches on this inode
291 * @inode: inode event is originating from
292 * @mask: event mask describing this event
293 * @cookie: cookie for synchronization, or zero
294 * @name: filename, if any
295 * @n_inode: inode associated with name
297 void inotify_inode_queue_event(struct inode
*inode
, u32 mask
, u32 cookie
,
298 const char *name
, struct inode
*n_inode
)
300 struct inotify_watch
*watch
, *next
;
302 if (!inotify_inode_watched(inode
))
305 mutex_lock(&inode
->inotify_mutex
);
306 list_for_each_entry_safe(watch
, next
, &inode
->inotify_watches
, i_list
) {
307 u32 watch_mask
= watch
->mask
;
308 if (watch_mask
& mask
) {
309 struct inotify_handle
*ih
= watch
->ih
;
310 mutex_lock(&ih
->mutex
);
311 if (watch_mask
& IN_ONESHOT
)
312 remove_watch_no_event(watch
, ih
);
313 ih
->in_ops
->handle_event(watch
, watch
->wd
, mask
, cookie
,
315 mutex_unlock(&ih
->mutex
);
318 mutex_unlock(&inode
->inotify_mutex
);
320 EXPORT_SYMBOL_GPL(inotify_inode_queue_event
);
323 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
324 * @dentry: the dentry in question, we queue against this dentry's parent
325 * @mask: event mask describing this event
326 * @cookie: cookie for synchronization, or zero
327 * @name: filename, if any
329 void inotify_dentry_parent_queue_event(struct dentry
*dentry
, u32 mask
,
330 u32 cookie
, const char *name
)
332 struct dentry
*parent
;
335 if (!(dentry
->d_flags
& DCACHE_INOTIFY_PARENT_WATCHED
))
338 spin_lock(&dentry
->d_lock
);
339 parent
= dentry
->d_parent
;
340 inode
= parent
->d_inode
;
342 if (inotify_inode_watched(inode
)) {
344 spin_unlock(&dentry
->d_lock
);
345 inotify_inode_queue_event(inode
, mask
, cookie
, name
,
349 spin_unlock(&dentry
->d_lock
);
351 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event
);
354 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
356 u32
inotify_get_cookie(void)
358 return atomic_inc_return(&inotify_cookie
);
360 EXPORT_SYMBOL_GPL(inotify_get_cookie
);
363 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
364 * @list: list of inodes being unmounted (sb->s_inodes)
366 * Called with inode_lock held, protecting the unmounting super block's list
367 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
368 * We temporarily drop inode_lock, however, and CAN block.
370 void inotify_unmount_inodes(struct list_head
*list
)
372 struct inode
*inode
, *next_i
, *need_iput
= NULL
;
374 list_for_each_entry_safe(inode
, next_i
, list
, i_sb_list
) {
375 struct inotify_watch
*watch
, *next_w
;
376 struct inode
*need_iput_tmp
;
377 struct list_head
*watches
;
380 * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
381 * I_WILL_FREE, or I_NEW which is fine because by that point
382 * the inode cannot have any associated watches.
384 if (inode
->i_state
& (I_CLEAR
|I_FREEING
|I_WILL_FREE
|I_NEW
))
388 * If i_count is zero, the inode cannot have any watches and
389 * doing an __iget/iput with MS_ACTIVE clear would actually
390 * evict all inodes with zero i_count from icache which is
391 * unnecessarily violent and may in fact be illegal to do.
393 if (!atomic_read(&inode
->i_count
))
396 need_iput_tmp
= need_iput
;
398 /* In case inotify_remove_watch_locked() drops a reference. */
399 if (inode
!= need_iput_tmp
)
402 need_iput_tmp
= NULL
;
403 /* In case the dropping of a reference would nuke next_i. */
404 if ((&next_i
->i_sb_list
!= list
) &&
405 atomic_read(&next_i
->i_count
) &&
406 !(next_i
->i_state
& (I_CLEAR
| I_FREEING
|
413 * We can safely drop inode_lock here because we hold
414 * references on both inode and next_i. Also no new inodes
415 * will be added since the umount has begun. Finally,
416 * iprune_mutex keeps shrink_icache_memory() away.
418 spin_unlock(&inode_lock
);
423 /* for each watch, send IN_UNMOUNT and then remove it */
424 mutex_lock(&inode
->inotify_mutex
);
425 watches
= &inode
->inotify_watches
;
426 list_for_each_entry_safe(watch
, next_w
, watches
, i_list
) {
427 struct inotify_handle
*ih
= watch
->ih
;
428 get_inotify_watch(watch
);
429 mutex_lock(&ih
->mutex
);
430 ih
->in_ops
->handle_event(watch
, watch
->wd
, IN_UNMOUNT
, 0,
432 inotify_remove_watch_locked(ih
, watch
);
433 mutex_unlock(&ih
->mutex
);
434 put_inotify_watch(watch
);
436 mutex_unlock(&inode
->inotify_mutex
);
439 spin_lock(&inode_lock
);
442 EXPORT_SYMBOL_GPL(inotify_unmount_inodes
);
445 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
446 * @inode: inode that is about to be removed
448 void inotify_inode_is_dead(struct inode
*inode
)
450 struct inotify_watch
*watch
, *next
;
452 mutex_lock(&inode
->inotify_mutex
);
453 list_for_each_entry_safe(watch
, next
, &inode
->inotify_watches
, i_list
) {
454 struct inotify_handle
*ih
= watch
->ih
;
455 mutex_lock(&ih
->mutex
);
456 inotify_remove_watch_locked(ih
, watch
);
457 mutex_unlock(&ih
->mutex
);
459 mutex_unlock(&inode
->inotify_mutex
);
461 EXPORT_SYMBOL_GPL(inotify_inode_is_dead
);
463 /* Kernel Consumer API */
466 * inotify_init - allocate and initialize an inotify instance
467 * @ops: caller's inotify operations
469 struct inotify_handle
*inotify_init(const struct inotify_operations
*ops
)
471 struct inotify_handle
*ih
;
473 ih
= kmalloc(sizeof(struct inotify_handle
), GFP_KERNEL
);
475 return ERR_PTR(-ENOMEM
);
478 INIT_LIST_HEAD(&ih
->watches
);
479 mutex_init(&ih
->mutex
);
482 atomic_set(&ih
->count
, 0);
483 get_inotify_handle(ih
);
487 EXPORT_SYMBOL_GPL(inotify_init
);
490 * inotify_init_watch - initialize an inotify watch
491 * @watch: watch to initialize
493 void inotify_init_watch(struct inotify_watch
*watch
)
495 INIT_LIST_HEAD(&watch
->h_list
);
496 INIT_LIST_HEAD(&watch
->i_list
);
497 atomic_set(&watch
->count
, 0);
498 get_inotify_watch(watch
); /* initial get */
500 EXPORT_SYMBOL_GPL(inotify_init_watch
);
503 * Watch removals suck violently. To kick the watch out we need (in this
504 * order) inode->inotify_mutex and ih->mutex. That's fine if we have
505 * a hold on inode; however, for all other cases we need to make damn sure
506 * we don't race with umount. We can *NOT* just grab a reference to a
507 * watch - inotify_unmount_inodes() will happily sail past it and we'll end
508 * with reference to inode potentially outliving its superblock. Ideally
509 * we just want to grab an active reference to superblock if we can; that
510 * will make sure we won't go into inotify_umount_inodes() until we are
511 * done. Cleanup is just deactivate_super(). However, that leaves a messy
512 * case - what if we *are* racing with umount() and active references to
513 * superblock can't be acquired anymore? We can bump ->s_count, grab
514 * ->s_umount, which will wait until the superblock is shut down and the
515 * watch in question is pining for fjords.
517 * And yes, this is far beyond mere "not very pretty"; so's the entire
518 * concept of inotify to start with.
522 * pin_to_kill - pin the watch down for removal
523 * @ih: inotify handle
524 * @watch: watch to kill
526 * Called with ih->mutex held, drops it. Possible return values:
527 * 0 - nothing to do, it has died
528 * 1 - remove it, drop the reference and deactivate_super()
530 static int pin_to_kill(struct inotify_handle
*ih
, struct inotify_watch
*watch
)
532 struct super_block
*sb
= watch
->inode
->i_sb
;
534 if (atomic_inc_not_zero(&sb
->s_active
)) {
535 get_inotify_watch(watch
);
536 mutex_unlock(&ih
->mutex
);
537 return 1; /* the best outcome */
541 spin_unlock(&sb_lock
);
542 mutex_unlock(&ih
->mutex
); /* can't grab ->s_umount under it */
543 down_read(&sb
->s_umount
);
544 /* fs is already shut down; the watch is dead */
549 static void unpin_and_kill(struct inotify_watch
*watch
)
551 struct super_block
*sb
= watch
->inode
->i_sb
;
552 put_inotify_watch(watch
);
553 deactivate_super(sb
);
557 * inotify_destroy - clean up and destroy an inotify instance
558 * @ih: inotify handle
560 void inotify_destroy(struct inotify_handle
*ih
)
563 * Destroy all of the watches for this handle. Unfortunately, not very
564 * pretty. We cannot do a simple iteration over the list, because we
565 * do not know the inode until we iterate to the watch. But we need to
566 * hold inode->inotify_mutex before ih->mutex. The following works.
568 * AV: it had to become even uglier to start working ;-/
571 struct inotify_watch
*watch
;
572 struct list_head
*watches
;
573 struct super_block
*sb
;
576 mutex_lock(&ih
->mutex
);
577 watches
= &ih
->watches
;
578 if (list_empty(watches
)) {
579 mutex_unlock(&ih
->mutex
);
582 watch
= list_first_entry(watches
, struct inotify_watch
, h_list
);
583 sb
= watch
->inode
->i_sb
;
584 if (!pin_to_kill(ih
, watch
))
587 inode
= watch
->inode
;
588 mutex_lock(&inode
->inotify_mutex
);
589 mutex_lock(&ih
->mutex
);
591 /* make sure we didn't race with another list removal */
592 if (likely(idr_find(&ih
->idr
, watch
->wd
))) {
593 remove_watch_no_event(watch
, ih
);
594 put_inotify_watch(watch
);
597 mutex_unlock(&ih
->mutex
);
598 mutex_unlock(&inode
->inotify_mutex
);
599 unpin_and_kill(watch
);
602 /* free this handle: the put matching the get in inotify_init() */
603 put_inotify_handle(ih
);
605 EXPORT_SYMBOL_GPL(inotify_destroy
);
608 * inotify_find_watch - find an existing watch for an (ih,inode) pair
609 * @ih: inotify handle
610 * @inode: inode to watch
611 * @watchp: pointer to existing inotify_watch
613 * Caller must pin given inode (via nameidata).
615 s32
inotify_find_watch(struct inotify_handle
*ih
, struct inode
*inode
,
616 struct inotify_watch
**watchp
)
618 struct inotify_watch
*old
;
621 mutex_lock(&inode
->inotify_mutex
);
622 mutex_lock(&ih
->mutex
);
624 old
= inode_find_handle(inode
, ih
);
626 get_inotify_watch(old
); /* caller must put watch */
631 mutex_unlock(&ih
->mutex
);
632 mutex_unlock(&inode
->inotify_mutex
);
636 EXPORT_SYMBOL_GPL(inotify_find_watch
);
639 * inotify_find_update_watch - find and update the mask of an existing watch
640 * @ih: inotify handle
641 * @inode: inode's watch to update
642 * @mask: mask of events to watch
644 * Caller must pin given inode (via nameidata).
646 s32
inotify_find_update_watch(struct inotify_handle
*ih
, struct inode
*inode
,
649 struct inotify_watch
*old
;
653 if (mask
& IN_MASK_ADD
)
656 /* don't allow invalid bits: we don't want flags set */
657 mask
&= IN_ALL_EVENTS
| IN_ONESHOT
;
661 mutex_lock(&inode
->inotify_mutex
);
662 mutex_lock(&ih
->mutex
);
665 * Handle the case of re-adding a watch on an (inode,ih) pair that we
666 * are already watching. We just update the mask and return its wd.
668 old
= inode_find_handle(inode
, ih
);
669 if (unlikely(!old
)) {
680 mutex_unlock(&ih
->mutex
);
681 mutex_unlock(&inode
->inotify_mutex
);
684 EXPORT_SYMBOL_GPL(inotify_find_update_watch
);
687 * inotify_add_watch - add a watch to an inotify instance
688 * @ih: inotify handle
689 * @watch: caller allocated watch structure
690 * @inode: inode to watch
691 * @mask: mask of events to watch
693 * Caller must pin given inode (via nameidata).
694 * Caller must ensure it only calls inotify_add_watch() once per watch.
695 * Calls inotify_handle_get_wd() so may sleep.
697 s32
inotify_add_watch(struct inotify_handle
*ih
, struct inotify_watch
*watch
,
698 struct inode
*inode
, u32 mask
)
703 /* don't allow invalid bits: we don't want flags set */
704 mask
&= IN_ALL_EVENTS
| IN_ONESHOT
;
709 mutex_lock(&inode
->inotify_mutex
);
710 mutex_lock(&ih
->mutex
);
712 /* Initialize a new watch */
713 ret
= inotify_handle_get_wd(ih
, watch
);
718 /* save a reference to handle and bump the count to make it official */
719 get_inotify_handle(ih
);
723 * Save a reference to the inode and bump the ref count to make it
724 * official. We hold a reference to nameidata, which makes this safe.
726 watch
->inode
= igrab(inode
);
728 /* Add the watch to the handle's and the inode's list */
729 newly_watched
= !inotify_inode_watched(inode
);
730 list_add(&watch
->h_list
, &ih
->watches
);
731 list_add(&watch
->i_list
, &inode
->inotify_watches
);
733 * Set child flags _after_ adding the watch, so there is no race
734 * windows where newly instantiated children could miss their parent's
738 set_dentry_child_flags(inode
, 1);
741 mutex_unlock(&ih
->mutex
);
742 mutex_unlock(&inode
->inotify_mutex
);
745 EXPORT_SYMBOL_GPL(inotify_add_watch
);
748 * inotify_clone_watch - put the watch next to existing one
749 * @old: already installed watch
752 * Caller must hold the inotify_mutex of inode we are dealing with;
753 * it is expected to remove the old watch before unlocking the inode.
755 s32
inotify_clone_watch(struct inotify_watch
*old
, struct inotify_watch
*new)
757 struct inotify_handle
*ih
= old
->ih
;
760 new->mask
= old
->mask
;
763 mutex_lock(&ih
->mutex
);
765 /* Initialize a new watch */
766 ret
= inotify_handle_get_wd(ih
, new);
771 get_inotify_handle(ih
);
773 new->inode
= igrab(old
->inode
);
775 list_add(&new->h_list
, &ih
->watches
);
776 list_add(&new->i_list
, &old
->inode
->inotify_watches
);
778 mutex_unlock(&ih
->mutex
);
782 void inotify_evict_watch(struct inotify_watch
*watch
)
784 get_inotify_watch(watch
);
785 mutex_lock(&watch
->ih
->mutex
);
786 inotify_remove_watch_locked(watch
->ih
, watch
);
787 mutex_unlock(&watch
->ih
->mutex
);
791 * inotify_rm_wd - remove a watch from an inotify instance
792 * @ih: inotify handle
793 * @wd: watch descriptor to remove
797 int inotify_rm_wd(struct inotify_handle
*ih
, u32 wd
)
799 struct inotify_watch
*watch
;
800 struct super_block
*sb
;
803 mutex_lock(&ih
->mutex
);
804 watch
= idr_find(&ih
->idr
, wd
);
805 if (unlikely(!watch
)) {
806 mutex_unlock(&ih
->mutex
);
809 sb
= watch
->inode
->i_sb
;
810 if (!pin_to_kill(ih
, watch
))
813 inode
= watch
->inode
;
815 mutex_lock(&inode
->inotify_mutex
);
816 mutex_lock(&ih
->mutex
);
818 /* make sure that we did not race */
819 if (likely(idr_find(&ih
->idr
, wd
) == watch
))
820 inotify_remove_watch_locked(ih
, watch
);
822 mutex_unlock(&ih
->mutex
);
823 mutex_unlock(&inode
->inotify_mutex
);
824 unpin_and_kill(watch
);
828 EXPORT_SYMBOL_GPL(inotify_rm_wd
);
831 * inotify_rm_watch - remove a watch from an inotify instance
832 * @ih: inotify handle
833 * @watch: watch to remove
837 int inotify_rm_watch(struct inotify_handle
*ih
,
838 struct inotify_watch
*watch
)
840 return inotify_rm_wd(ih
, watch
->wd
);
842 EXPORT_SYMBOL_GPL(inotify_rm_watch
);
845 * inotify_setup - core initialization function
847 static int __init
inotify_setup(void)
849 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
850 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
851 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
852 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
853 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
854 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
855 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
856 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
857 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
858 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
859 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
860 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
861 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
863 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
864 BUILD_BUG_ON(IN_ISDIR
!= FS_IN_ISDIR
);
865 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
866 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
868 atomic_set(&inotify_cookie
, 0);
873 module_init(inotify_setup
);