2 * fs/inotify.c - inode-based file event notifications
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
10 * Copyright (C) 2005 John McCutchan
11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2, or (at your option) any
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/idr.h>
28 #include <linux/slab.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/writeback.h>
34 #include <linux/inotify.h>
36 static atomic_t inotify_cookie
;
41 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
42 * iprune_mutex (synchronize shrink_icache_memory())
43 * inode_lock (protects the super_block->s_inodes list)
44 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
45 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
47 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
48 * of a caller's event handler. Thus, the caller must not hold any locks
49 * taken in their event handler while calling any of the published inotify
54 * Lifetimes of the three main data structures--inotify_handle, inode, and
55 * inotify_watch--are managed by reference count.
57 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
58 * Additional references can bump the count via get_inotify_handle() and drop
59 * the count via put_inotify_handle().
61 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
62 * to remove_watch_no_event(). Additional references can bump the count via
63 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
64 * is reponsible for the final put after receiving IN_IGNORED, or when using
65 * IN_ONESHOT after receiving the first event. Inotify does the final put if
66 * inotify_destroy() is called.
68 * inode: Pinned so long as the inode is associated with a watch, from
69 * inotify_add_watch() to the final put_inotify_watch().
73 * struct inotify_handle - represents an inotify instance
75 * This structure is protected by the mutex 'mutex'.
77 struct inotify_handle
{
78 struct idr idr
; /* idr mapping wd -> watch */
79 struct mutex mutex
; /* protects this bad boy */
80 struct list_head watches
; /* list of watches */
81 atomic_t count
; /* reference count */
82 u32 last_wd
; /* the last wd allocated */
83 const struct inotify_operations
*in_ops
; /* inotify caller operations */
86 static inline void get_inotify_handle(struct inotify_handle
*ih
)
88 atomic_inc(&ih
->count
);
91 static inline void put_inotify_handle(struct inotify_handle
*ih
)
93 if (atomic_dec_and_test(&ih
->count
)) {
94 idr_destroy(&ih
->idr
);
100 * get_inotify_watch - grab a reference to an inotify_watch
101 * @watch: watch to grab
103 void get_inotify_watch(struct inotify_watch
*watch
)
105 atomic_inc(&watch
->count
);
107 EXPORT_SYMBOL_GPL(get_inotify_watch
);
109 int pin_inotify_watch(struct inotify_watch
*watch
)
111 struct super_block
*sb
= watch
->inode
->i_sb
;
113 if (sb
->s_count
>= S_BIAS
) {
114 atomic_inc(&sb
->s_active
);
115 spin_unlock(&sb_lock
);
116 atomic_inc(&watch
->count
);
119 spin_unlock(&sb_lock
);
124 * put_inotify_watch - decrements the ref count on a given watch. cleans up
125 * watch references if the count reaches zero. inotify_watch is freed by
126 * inotify callers via the destroy_watch() op.
127 * @watch: watch to release
129 void put_inotify_watch(struct inotify_watch
*watch
)
131 if (atomic_dec_and_test(&watch
->count
)) {
132 struct inotify_handle
*ih
= watch
->ih
;
135 ih
->in_ops
->destroy_watch(watch
);
136 put_inotify_handle(ih
);
139 EXPORT_SYMBOL_GPL(put_inotify_watch
);
141 void unpin_inotify_watch(struct inotify_watch
*watch
)
143 struct super_block
*sb
= watch
->inode
->i_sb
;
144 put_inotify_watch(watch
);
145 deactivate_super(sb
);
149 * inotify_handle_get_wd - returns the next WD for use by the given handle
151 * Callers must hold ih->mutex. This function can sleep.
153 static int inotify_handle_get_wd(struct inotify_handle
*ih
,
154 struct inotify_watch
*watch
)
159 if (unlikely(!idr_pre_get(&ih
->idr
, GFP_KERNEL
)))
161 ret
= idr_get_new_above(&ih
->idr
, watch
, ih
->last_wd
+1, &watch
->wd
);
162 } while (ret
== -EAGAIN
);
165 ih
->last_wd
= watch
->wd
;
171 * inotify_inode_watched - returns nonzero if there are watches on this inode
172 * and zero otherwise. We call this lockless, we do not care if we race.
174 static inline int inotify_inode_watched(struct inode
*inode
)
176 return !list_empty(&inode
->inotify_watches
);
180 * Get child dentry flag into synch with parent inode.
181 * Flag should always be clear for negative dentrys.
183 static void set_dentry_child_flags(struct inode
*inode
, int watched
)
185 struct dentry
*alias
;
187 spin_lock(&dcache_lock
);
188 list_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
189 struct dentry
*child
;
191 list_for_each_entry(child
, &alias
->d_subdirs
, d_u
.d_child
) {
195 spin_lock(&child
->d_lock
);
197 child
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
199 child
->d_flags
&=~DCACHE_INOTIFY_PARENT_WATCHED
;
200 spin_unlock(&child
->d_lock
);
203 spin_unlock(&dcache_lock
);
207 * inotify_find_handle - find the watch associated with the given inode and
210 * Callers must hold inode->inotify_mutex.
212 static struct inotify_watch
*inode_find_handle(struct inode
*inode
,
213 struct inotify_handle
*ih
)
215 struct inotify_watch
*watch
;
217 list_for_each_entry(watch
, &inode
->inotify_watches
, i_list
) {
226 * remove_watch_no_event - remove watch without the IN_IGNORED event.
228 * Callers must hold both inode->inotify_mutex and ih->mutex.
230 static void remove_watch_no_event(struct inotify_watch
*watch
,
231 struct inotify_handle
*ih
)
233 list_del(&watch
->i_list
);
234 list_del(&watch
->h_list
);
236 if (!inotify_inode_watched(watch
->inode
))
237 set_dentry_child_flags(watch
->inode
, 0);
239 idr_remove(&ih
->idr
, watch
->wd
);
243 * inotify_remove_watch_locked - Remove a watch from both the handle and the
244 * inode. Sends the IN_IGNORED event signifying that the inode is no longer
245 * watched. May be invoked from a caller's event handler.
246 * @ih: inotify handle associated with watch
247 * @watch: watch to remove
249 * Callers must hold both inode->inotify_mutex and ih->mutex.
251 void inotify_remove_watch_locked(struct inotify_handle
*ih
,
252 struct inotify_watch
*watch
)
254 remove_watch_no_event(watch
, ih
);
255 ih
->in_ops
->handle_event(watch
, watch
->wd
, IN_IGNORED
, 0, NULL
, NULL
);
257 EXPORT_SYMBOL_GPL(inotify_remove_watch_locked
);
259 /* Kernel API for producing events */
262 * inotify_d_instantiate - instantiate dcache entry for inode
264 void inotify_d_instantiate(struct dentry
*entry
, struct inode
*inode
)
266 struct dentry
*parent
;
271 spin_lock(&entry
->d_lock
);
272 parent
= entry
->d_parent
;
273 if (parent
->d_inode
&& inotify_inode_watched(parent
->d_inode
))
274 entry
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
275 spin_unlock(&entry
->d_lock
);
279 * inotify_d_move - dcache entry has been moved
281 void inotify_d_move(struct dentry
*entry
)
283 struct dentry
*parent
;
285 parent
= entry
->d_parent
;
286 if (inotify_inode_watched(parent
->d_inode
))
287 entry
->d_flags
|= DCACHE_INOTIFY_PARENT_WATCHED
;
289 entry
->d_flags
&= ~DCACHE_INOTIFY_PARENT_WATCHED
;
293 * inotify_inode_queue_event - queue an event to all watches on this inode
294 * @inode: inode event is originating from
295 * @mask: event mask describing this event
296 * @cookie: cookie for synchronization, or zero
297 * @name: filename, if any
298 * @n_inode: inode associated with name
300 void inotify_inode_queue_event(struct inode
*inode
, u32 mask
, u32 cookie
,
301 const char *name
, struct inode
*n_inode
)
303 struct inotify_watch
*watch
, *next
;
305 if (!inotify_inode_watched(inode
))
308 mutex_lock(&inode
->inotify_mutex
);
309 list_for_each_entry_safe(watch
, next
, &inode
->inotify_watches
, i_list
) {
310 u32 watch_mask
= watch
->mask
;
311 if (watch_mask
& mask
) {
312 struct inotify_handle
*ih
= watch
->ih
;
313 mutex_lock(&ih
->mutex
);
314 if (watch_mask
& IN_ONESHOT
)
315 remove_watch_no_event(watch
, ih
);
316 ih
->in_ops
->handle_event(watch
, watch
->wd
, mask
, cookie
,
318 mutex_unlock(&ih
->mutex
);
321 mutex_unlock(&inode
->inotify_mutex
);
323 EXPORT_SYMBOL_GPL(inotify_inode_queue_event
);
326 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
327 * @dentry: the dentry in question, we queue against this dentry's parent
328 * @mask: event mask describing this event
329 * @cookie: cookie for synchronization, or zero
330 * @name: filename, if any
332 void inotify_dentry_parent_queue_event(struct dentry
*dentry
, u32 mask
,
333 u32 cookie
, const char *name
)
335 struct dentry
*parent
;
338 if (!(dentry
->d_flags
& DCACHE_INOTIFY_PARENT_WATCHED
))
341 spin_lock(&dentry
->d_lock
);
342 parent
= dentry
->d_parent
;
343 inode
= parent
->d_inode
;
345 if (inotify_inode_watched(inode
)) {
347 spin_unlock(&dentry
->d_lock
);
348 inotify_inode_queue_event(inode
, mask
, cookie
, name
,
352 spin_unlock(&dentry
->d_lock
);
354 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event
);
357 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
359 u32
inotify_get_cookie(void)
361 return atomic_inc_return(&inotify_cookie
);
363 EXPORT_SYMBOL_GPL(inotify_get_cookie
);
366 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
367 * @list: list of inodes being unmounted (sb->s_inodes)
369 * Called with inode_lock held, protecting the unmounting super block's list
370 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
371 * We temporarily drop inode_lock, however, and CAN block.
373 void inotify_unmount_inodes(struct list_head
*list
)
375 struct inode
*inode
, *next_i
, *need_iput
= NULL
;
377 list_for_each_entry_safe(inode
, next_i
, list
, i_sb_list
) {
378 struct inotify_watch
*watch
, *next_w
;
379 struct inode
*need_iput_tmp
;
380 struct list_head
*watches
;
383 * If i_count is zero, the inode cannot have any watches and
384 * doing an __iget/iput with MS_ACTIVE clear would actually
385 * evict all inodes with zero i_count from icache which is
386 * unnecessarily violent and may in fact be illegal to do.
388 if (!atomic_read(&inode
->i_count
))
392 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
393 * I_WILL_FREE which is fine because by that point the inode
394 * cannot have any associated watches.
396 if (inode
->i_state
& (I_CLEAR
| I_FREEING
| I_WILL_FREE
))
399 need_iput_tmp
= need_iput
;
401 /* In case inotify_remove_watch_locked() drops a reference. */
402 if (inode
!= need_iput_tmp
)
405 need_iput_tmp
= NULL
;
406 /* In case the dropping of a reference would nuke next_i. */
407 if ((&next_i
->i_sb_list
!= list
) &&
408 atomic_read(&next_i
->i_count
) &&
409 !(next_i
->i_state
& (I_CLEAR
| I_FREEING
|
416 * We can safely drop inode_lock here because we hold
417 * references on both inode and next_i. Also no new inodes
418 * will be added since the umount has begun. Finally,
419 * iprune_mutex keeps shrink_icache_memory() away.
421 spin_unlock(&inode_lock
);
426 /* for each watch, send IN_UNMOUNT and then remove it */
427 mutex_lock(&inode
->inotify_mutex
);
428 watches
= &inode
->inotify_watches
;
429 list_for_each_entry_safe(watch
, next_w
, watches
, i_list
) {
430 struct inotify_handle
*ih
= watch
->ih
;
431 get_inotify_watch(watch
);
432 mutex_lock(&ih
->mutex
);
433 ih
->in_ops
->handle_event(watch
, watch
->wd
, IN_UNMOUNT
, 0,
435 inotify_remove_watch_locked(ih
, watch
);
436 mutex_unlock(&ih
->mutex
);
437 put_inotify_watch(watch
);
439 mutex_unlock(&inode
->inotify_mutex
);
442 spin_lock(&inode_lock
);
445 EXPORT_SYMBOL_GPL(inotify_unmount_inodes
);
448 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
449 * @inode: inode that is about to be removed
451 void inotify_inode_is_dead(struct inode
*inode
)
453 struct inotify_watch
*watch
, *next
;
455 mutex_lock(&inode
->inotify_mutex
);
456 list_for_each_entry_safe(watch
, next
, &inode
->inotify_watches
, i_list
) {
457 struct inotify_handle
*ih
= watch
->ih
;
458 mutex_lock(&ih
->mutex
);
459 inotify_remove_watch_locked(ih
, watch
);
460 mutex_unlock(&ih
->mutex
);
462 mutex_unlock(&inode
->inotify_mutex
);
464 EXPORT_SYMBOL_GPL(inotify_inode_is_dead
);
466 /* Kernel Consumer API */
469 * inotify_init - allocate and initialize an inotify instance
470 * @ops: caller's inotify operations
472 struct inotify_handle
*inotify_init(const struct inotify_operations
*ops
)
474 struct inotify_handle
*ih
;
476 ih
= kmalloc(sizeof(struct inotify_handle
), GFP_KERNEL
);
478 return ERR_PTR(-ENOMEM
);
481 INIT_LIST_HEAD(&ih
->watches
);
482 mutex_init(&ih
->mutex
);
485 atomic_set(&ih
->count
, 0);
486 get_inotify_handle(ih
);
490 EXPORT_SYMBOL_GPL(inotify_init
);
493 * inotify_init_watch - initialize an inotify watch
494 * @watch: watch to initialize
496 void inotify_init_watch(struct inotify_watch
*watch
)
498 INIT_LIST_HEAD(&watch
->h_list
);
499 INIT_LIST_HEAD(&watch
->i_list
);
500 atomic_set(&watch
->count
, 0);
501 get_inotify_watch(watch
); /* initial get */
503 EXPORT_SYMBOL_GPL(inotify_init_watch
);
506 * Watch removals suck violently. To kick the watch out we need (in this
507 * order) inode->inotify_mutex and ih->mutex. That's fine if we have
508 * a hold on inode; however, for all other cases we need to make damn sure
509 * we don't race with umount. We can *NOT* just grab a reference to a
510 * watch - inotify_unmount_inodes() will happily sail past it and we'll end
511 * with reference to inode potentially outliving its superblock. Ideally
512 * we just want to grab an active reference to superblock if we can; that
513 * will make sure we won't go into inotify_umount_inodes() until we are
514 * done. Cleanup is just deactivate_super(). However, that leaves a messy
515 * case - what if we *are* racing with umount() and active references to
516 * superblock can't be acquired anymore? We can bump ->s_count, grab
517 * ->s_umount, which will almost certainly wait until the superblock is shut
518 * down and the watch in question is pining for fjords. That's fine, but
519 * there is a problem - we might have hit the window between ->s_active
520 * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
521 * is past the point of no return and is heading for shutdown) and the
522 * moment when deactivate_super() acquires ->s_umount. We could just do
523 * drop_super() yield() and retry, but that's rather antisocial and this
524 * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
525 * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
526 * that we won't race with inotify_umount_inodes(). So we could grab a
527 * reference to watch and do the rest as above, just with drop_super() instead
528 * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
529 * could grab ->s_umount. So the watch could've been gone already.
531 * That still can be dealt with - we need to save watch->wd, do idr_find()
532 * and compare its result with our pointer. If they match, we either have
533 * the damn thing still alive or we'd lost not one but two races at once,
534 * the watch had been killed and a new one got created with the same ->wd
535 * at the same address. That couldn't have happened in inotify_destroy(),
536 * but inotify_rm_wd() could run into that. Still, "new one got created"
537 * is not a problem - we have every right to kill it or leave it alone,
538 * whatever's more convenient.
540 * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
541 * "grab it and kill it" check. If it's been our original watch, we are
542 * fine, if it's a newcomer - nevermind, just pretend that we'd won the
543 * race and kill the fscker anyway; we are safe since we know that its
544 * superblock won't be going away.
546 * And yes, this is far beyond mere "not very pretty"; so's the entire
547 * concept of inotify to start with.
551 * pin_to_kill - pin the watch down for removal
552 * @ih: inotify handle
553 * @watch: watch to kill
555 * Called with ih->mutex held, drops it. Possible return values:
556 * 0 - nothing to do, it has died
557 * 1 - remove it, drop the reference and deactivate_super()
558 * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
559 * that variant, since it involved a lot of PITA, but that's the best that
560 * could've been done.
562 static int pin_to_kill(struct inotify_handle
*ih
, struct inotify_watch
*watch
)
564 struct super_block
*sb
= watch
->inode
->i_sb
;
568 if (sb
->s_count
>= S_BIAS
) {
569 atomic_inc(&sb
->s_active
);
570 spin_unlock(&sb_lock
);
571 get_inotify_watch(watch
);
572 mutex_unlock(&ih
->mutex
);
573 return 1; /* the best outcome */
576 spin_unlock(&sb_lock
);
577 mutex_unlock(&ih
->mutex
); /* can't grab ->s_umount under it */
578 down_read(&sb
->s_umount
);
579 if (likely(!sb
->s_root
)) {
580 /* fs is already shut down; the watch is dead */
584 /* raced with the final deactivate_super() */
585 mutex_lock(&ih
->mutex
);
586 if (idr_find(&ih
->idr
, wd
) != watch
|| watch
->inode
->i_sb
!= sb
) {
587 /* the watch is dead */
588 mutex_unlock(&ih
->mutex
);
592 /* still alive or freed and reused with the same sb and wd; kill */
593 get_inotify_watch(watch
);
594 mutex_unlock(&ih
->mutex
);
598 static void unpin_and_kill(struct inotify_watch
*watch
, int how
)
600 struct super_block
*sb
= watch
->inode
->i_sb
;
601 put_inotify_watch(watch
);
604 deactivate_super(sb
);
612 * inotify_destroy - clean up and destroy an inotify instance
613 * @ih: inotify handle
615 void inotify_destroy(struct inotify_handle
*ih
)
618 * Destroy all of the watches for this handle. Unfortunately, not very
619 * pretty. We cannot do a simple iteration over the list, because we
620 * do not know the inode until we iterate to the watch. But we need to
621 * hold inode->inotify_mutex before ih->mutex. The following works.
623 * AV: it had to become even uglier to start working ;-/
626 struct inotify_watch
*watch
;
627 struct list_head
*watches
;
628 struct super_block
*sb
;
632 mutex_lock(&ih
->mutex
);
633 watches
= &ih
->watches
;
634 if (list_empty(watches
)) {
635 mutex_unlock(&ih
->mutex
);
638 watch
= list_first_entry(watches
, struct inotify_watch
, h_list
);
639 sb
= watch
->inode
->i_sb
;
640 how
= pin_to_kill(ih
, watch
);
644 inode
= watch
->inode
;
645 mutex_lock(&inode
->inotify_mutex
);
646 mutex_lock(&ih
->mutex
);
648 /* make sure we didn't race with another list removal */
649 if (likely(idr_find(&ih
->idr
, watch
->wd
))) {
650 remove_watch_no_event(watch
, ih
);
651 put_inotify_watch(watch
);
654 mutex_unlock(&ih
->mutex
);
655 mutex_unlock(&inode
->inotify_mutex
);
656 unpin_and_kill(watch
, how
);
659 /* free this handle: the put matching the get in inotify_init() */
660 put_inotify_handle(ih
);
662 EXPORT_SYMBOL_GPL(inotify_destroy
);
665 * inotify_find_watch - find an existing watch for an (ih,inode) pair
666 * @ih: inotify handle
667 * @inode: inode to watch
668 * @watchp: pointer to existing inotify_watch
670 * Caller must pin given inode (via nameidata).
672 s32
inotify_find_watch(struct inotify_handle
*ih
, struct inode
*inode
,
673 struct inotify_watch
**watchp
)
675 struct inotify_watch
*old
;
678 mutex_lock(&inode
->inotify_mutex
);
679 mutex_lock(&ih
->mutex
);
681 old
= inode_find_handle(inode
, ih
);
683 get_inotify_watch(old
); /* caller must put watch */
688 mutex_unlock(&ih
->mutex
);
689 mutex_unlock(&inode
->inotify_mutex
);
693 EXPORT_SYMBOL_GPL(inotify_find_watch
);
696 * inotify_find_update_watch - find and update the mask of an existing watch
697 * @ih: inotify handle
698 * @inode: inode's watch to update
699 * @mask: mask of events to watch
701 * Caller must pin given inode (via nameidata).
703 s32
inotify_find_update_watch(struct inotify_handle
*ih
, struct inode
*inode
,
706 struct inotify_watch
*old
;
710 if (mask
& IN_MASK_ADD
)
713 /* don't allow invalid bits: we don't want flags set */
714 mask
&= IN_ALL_EVENTS
| IN_ONESHOT
;
718 mutex_lock(&inode
->inotify_mutex
);
719 mutex_lock(&ih
->mutex
);
722 * Handle the case of re-adding a watch on an (inode,ih) pair that we
723 * are already watching. We just update the mask and return its wd.
725 old
= inode_find_handle(inode
, ih
);
726 if (unlikely(!old
)) {
737 mutex_unlock(&ih
->mutex
);
738 mutex_unlock(&inode
->inotify_mutex
);
741 EXPORT_SYMBOL_GPL(inotify_find_update_watch
);
744 * inotify_add_watch - add a watch to an inotify instance
745 * @ih: inotify handle
746 * @watch: caller allocated watch structure
747 * @inode: inode to watch
748 * @mask: mask of events to watch
750 * Caller must pin given inode (via nameidata).
751 * Caller must ensure it only calls inotify_add_watch() once per watch.
752 * Calls inotify_handle_get_wd() so may sleep.
754 s32
inotify_add_watch(struct inotify_handle
*ih
, struct inotify_watch
*watch
,
755 struct inode
*inode
, u32 mask
)
760 /* don't allow invalid bits: we don't want flags set */
761 mask
&= IN_ALL_EVENTS
| IN_ONESHOT
;
766 mutex_lock(&inode
->inotify_mutex
);
767 mutex_lock(&ih
->mutex
);
769 /* Initialize a new watch */
770 ret
= inotify_handle_get_wd(ih
, watch
);
775 /* save a reference to handle and bump the count to make it official */
776 get_inotify_handle(ih
);
780 * Save a reference to the inode and bump the ref count to make it
781 * official. We hold a reference to nameidata, which makes this safe.
783 watch
->inode
= igrab(inode
);
785 /* Add the watch to the handle's and the inode's list */
786 newly_watched
= !inotify_inode_watched(inode
);
787 list_add(&watch
->h_list
, &ih
->watches
);
788 list_add(&watch
->i_list
, &inode
->inotify_watches
);
790 * Set child flags _after_ adding the watch, so there is no race
791 * windows where newly instantiated children could miss their parent's
795 set_dentry_child_flags(inode
, 1);
798 mutex_unlock(&ih
->mutex
);
799 mutex_unlock(&inode
->inotify_mutex
);
802 EXPORT_SYMBOL_GPL(inotify_add_watch
);
805 * inotify_clone_watch - put the watch next to existing one
806 * @old: already installed watch
809 * Caller must hold the inotify_mutex of inode we are dealing with;
810 * it is expected to remove the old watch before unlocking the inode.
812 s32
inotify_clone_watch(struct inotify_watch
*old
, struct inotify_watch
*new)
814 struct inotify_handle
*ih
= old
->ih
;
817 new->mask
= old
->mask
;
820 mutex_lock(&ih
->mutex
);
822 /* Initialize a new watch */
823 ret
= inotify_handle_get_wd(ih
, new);
828 get_inotify_handle(ih
);
830 new->inode
= igrab(old
->inode
);
832 list_add(&new->h_list
, &ih
->watches
);
833 list_add(&new->i_list
, &old
->inode
->inotify_watches
);
835 mutex_unlock(&ih
->mutex
);
839 void inotify_evict_watch(struct inotify_watch
*watch
)
841 get_inotify_watch(watch
);
842 mutex_lock(&watch
->ih
->mutex
);
843 inotify_remove_watch_locked(watch
->ih
, watch
);
844 mutex_unlock(&watch
->ih
->mutex
);
848 * inotify_rm_wd - remove a watch from an inotify instance
849 * @ih: inotify handle
850 * @wd: watch descriptor to remove
854 int inotify_rm_wd(struct inotify_handle
*ih
, u32 wd
)
856 struct inotify_watch
*watch
;
857 struct super_block
*sb
;
861 mutex_lock(&ih
->mutex
);
862 watch
= idr_find(&ih
->idr
, wd
);
863 if (unlikely(!watch
)) {
864 mutex_unlock(&ih
->mutex
);
867 sb
= watch
->inode
->i_sb
;
868 how
= pin_to_kill(ih
, watch
);
872 inode
= watch
->inode
;
874 mutex_lock(&inode
->inotify_mutex
);
875 mutex_lock(&ih
->mutex
);
877 /* make sure that we did not race */
878 if (likely(idr_find(&ih
->idr
, wd
) == watch
))
879 inotify_remove_watch_locked(ih
, watch
);
881 mutex_unlock(&ih
->mutex
);
882 mutex_unlock(&inode
->inotify_mutex
);
883 unpin_and_kill(watch
, how
);
887 EXPORT_SYMBOL_GPL(inotify_rm_wd
);
890 * inotify_rm_watch - remove a watch from an inotify instance
891 * @ih: inotify handle
892 * @watch: watch to remove
896 int inotify_rm_watch(struct inotify_handle
*ih
,
897 struct inotify_watch
*watch
)
899 return inotify_rm_wd(ih
, watch
->wd
);
901 EXPORT_SYMBOL_GPL(inotify_rm_watch
);
904 * inotify_setup - core initialization function
906 static int __init
inotify_setup(void)
908 atomic_set(&inotify_cookie
, 0);
913 module_init(inotify_setup
);