2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/fs.h> /* struct inode */
26 #include <linux/fsnotify_backend.h>
27 #include <linux/inotify.h>
28 #include <linux/path.h> /* struct path */
29 #include <linux/slab.h> /* kmem_* */
30 #include <linux/types.h>
34 static int inotify_handle_event(struct fsnotify_group
*group
, struct fsnotify_event
*event
)
36 struct fsnotify_mark_entry
*entry
;
37 struct inotify_inode_mark_entry
*ientry
;
38 struct inode
*to_tell
;
39 struct inotify_event_private_data
*event_priv
;
40 struct fsnotify_event_private_data
*fsn_event_priv
;
43 to_tell
= event
->to_tell
;
45 spin_lock(&to_tell
->i_lock
);
46 entry
= fsnotify_find_mark_entry(group
, to_tell
);
47 spin_unlock(&to_tell
->i_lock
);
48 /* race with watch removal? We already passes should_send */
51 ientry
= container_of(entry
, struct inotify_inode_mark_entry
,
55 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_KERNEL
);
56 if (unlikely(!event_priv
))
59 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
61 fsn_event_priv
->group
= group
;
64 ret
= fsnotify_add_notify_event(group
, event
, fsn_event_priv
);
66 inotify_free_event_priv(fsn_event_priv
);
67 /* EEXIST says we tail matched, EOVERFLOW isn't something
68 * to report up the stack. */
69 if ((ret
== -EEXIST
) ||
75 * If we hold the entry until after the event is on the queue
76 * IN_IGNORED won't be able to pass this event in the queue
78 fsnotify_put_mark(entry
);
83 static void inotify_freeing_mark(struct fsnotify_mark_entry
*entry
, struct fsnotify_group
*group
)
85 inotify_ignored_and_remove_idr(entry
, group
);
88 static bool inotify_should_send_event(struct fsnotify_group
*group
, struct inode
*inode
, __u32 mask
)
90 struct fsnotify_mark_entry
*entry
;
93 spin_lock(&inode
->i_lock
);
94 entry
= fsnotify_find_mark_entry(group
, inode
);
95 spin_unlock(&inode
->i_lock
);
99 mask
= (mask
& ~FS_EVENT_ON_CHILD
);
100 send
= (entry
->mask
& mask
);
102 /* find took a reference */
103 fsnotify_put_mark(entry
);
108 static int idr_callback(int id
, void *p
, void *data
)
114 static void inotify_free_group_priv(struct fsnotify_group
*group
)
116 /* ideally the idr is empty and we won't hit the BUG in teh callback */
117 idr_for_each(&group
->inotify_data
.idr
, idr_callback
, NULL
);
118 idr_remove_all(&group
->inotify_data
.idr
);
119 idr_destroy(&group
->inotify_data
.idr
);
122 void inotify_free_event_priv(struct fsnotify_event_private_data
*fsn_event_priv
)
124 struct inotify_event_private_data
*event_priv
;
127 event_priv
= container_of(fsn_event_priv
, struct inotify_event_private_data
,
128 fsnotify_event_priv_data
);
130 kmem_cache_free(event_priv_cachep
, event_priv
);
133 const struct fsnotify_ops inotify_fsnotify_ops
= {
134 .handle_event
= inotify_handle_event
,
135 .should_send_event
= inotify_should_send_event
,
136 .free_group_priv
= inotify_free_group_priv
,
137 .free_event_priv
= inotify_free_event_priv
,
138 .freeing_mark
= inotify_freeing_mark
,