2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2, or (at your option) any
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mount.h>
28 #include <linux/namei.h>
29 #include <linux/poll.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/inotify.h>
33 #include <linux/syscalls.h>
34 #include <linux/magic.h>
36 #include <asm/ioctls.h>
38 static struct kmem_cache
*watch_cachep __read_mostly
;
39 static struct kmem_cache
*event_cachep __read_mostly
;
41 static struct vfsmount
*inotify_mnt __read_mostly
;
43 /* these are configurable via /proc/sys/fs/inotify/ */
44 int inotify_max_user_instances __read_mostly
;
45 int inotify_max_user_watches __read_mostly
;
46 int inotify_max_queued_events __read_mostly
;
51 * inotify_dev->up_mutex (ensures we don't re-add the same watch)
52 * inode->inotify_mutex (protects inode's watch list)
53 * inotify_handle->mutex (protects inotify_handle's watch list)
54 * inotify_dev->ev_mutex (protects device's event queue)
58 * Lifetimes of the main data structures:
60 * inotify_device: Lifetime is managed by reference count, from
61 * sys_inotify_init() until release. Additional references can bump the count
62 * via get_inotify_dev() and drop the count via put_inotify_dev().
64 * inotify_user_watch: Lifetime is from create_watch() to the receipt of an
65 * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
66 * first event, or to inotify_destroy().
70 * struct inotify_device - represents an inotify instance
72 * This structure is protected by the mutex 'mutex'.
74 struct inotify_device
{
75 wait_queue_head_t wq
; /* wait queue for i/o */
76 struct mutex ev_mutex
; /* protects event queue */
77 struct mutex up_mutex
; /* synchronizes watch updates */
78 struct list_head events
; /* list of queued events */
79 atomic_t count
; /* reference count */
80 struct user_struct
*user
; /* user who opened this dev */
81 struct inotify_handle
*ih
; /* inotify handle */
82 unsigned int queue_size
; /* size of the queue (bytes) */
83 unsigned int event_count
; /* number of pending events */
84 unsigned int max_events
; /* maximum number of events */
88 * struct inotify_kernel_event - An inotify event, originating from a watch and
89 * queued for user-space. A list of these is attached to each instance of the
90 * device. In read(), this list is walked and all events that can fit in the
91 * buffer are returned.
93 * Protected by dev->ev_mutex of the device in which we are queued.
95 struct inotify_kernel_event
{
96 struct inotify_event event
; /* the user-space event */
97 struct list_head list
; /* entry in inotify_device's list */
98 char *name
; /* filename, if any */
102 * struct inotify_user_watch - our version of an inotify_watch, we add
103 * a reference to the associated inotify_device.
105 struct inotify_user_watch
{
106 struct inotify_device
*dev
; /* associated device */
107 struct inotify_watch wdata
; /* inotify watch data */
112 #include <linux/sysctl.h>
116 ctl_table inotify_table
[] = {
118 .ctl_name
= INOTIFY_MAX_USER_INSTANCES
,
119 .procname
= "max_user_instances",
120 .data
= &inotify_max_user_instances
,
121 .maxlen
= sizeof(int),
123 .proc_handler
= &proc_dointvec_minmax
,
124 .strategy
= &sysctl_intvec
,
128 .ctl_name
= INOTIFY_MAX_USER_WATCHES
,
129 .procname
= "max_user_watches",
130 .data
= &inotify_max_user_watches
,
131 .maxlen
= sizeof(int),
133 .proc_handler
= &proc_dointvec_minmax
,
134 .strategy
= &sysctl_intvec
,
138 .ctl_name
= INOTIFY_MAX_QUEUED_EVENTS
,
139 .procname
= "max_queued_events",
140 .data
= &inotify_max_queued_events
,
141 .maxlen
= sizeof(int),
143 .proc_handler
= &proc_dointvec_minmax
,
144 .strategy
= &sysctl_intvec
,
149 #endif /* CONFIG_SYSCTL */
151 static inline void get_inotify_dev(struct inotify_device
*dev
)
153 atomic_inc(&dev
->count
);
156 static inline void put_inotify_dev(struct inotify_device
*dev
)
158 if (atomic_dec_and_test(&dev
->count
)) {
159 atomic_dec(&dev
->user
->inotify_devs
);
166 * free_inotify_user_watch - cleans up the watch and its references
168 static void free_inotify_user_watch(struct inotify_watch
*w
)
170 struct inotify_user_watch
*watch
;
171 struct inotify_device
*dev
;
173 watch
= container_of(w
, struct inotify_user_watch
, wdata
);
176 atomic_dec(&dev
->user
->inotify_watches
);
177 put_inotify_dev(dev
);
178 kmem_cache_free(watch_cachep
, watch
);
182 * kernel_event - create a new kernel event with the given parameters
184 * This function can sleep.
186 static struct inotify_kernel_event
* kernel_event(s32 wd
, u32 mask
, u32 cookie
,
189 struct inotify_kernel_event
*kevent
;
191 kevent
= kmem_cache_alloc(event_cachep
, GFP_NOFS
);
192 if (unlikely(!kevent
))
195 /* we hand this out to user-space, so zero it just in case */
196 memset(&kevent
->event
, 0, sizeof(struct inotify_event
));
198 kevent
->event
.wd
= wd
;
199 kevent
->event
.mask
= mask
;
200 kevent
->event
.cookie
= cookie
;
202 INIT_LIST_HEAD(&kevent
->list
);
205 size_t len
, rem
, event_size
= sizeof(struct inotify_event
);
208 * We need to pad the filename so as to properly align an
209 * array of inotify_event structures. Because the structure is
210 * small and the common case is a small filename, we just round
211 * up to the next multiple of the structure's sizeof. This is
212 * simple and safe for all architectures.
214 len
= strlen(name
) + 1;
215 rem
= event_size
- len
;
216 if (len
> event_size
) {
217 rem
= event_size
- (len
% event_size
);
218 if (len
% event_size
== 0)
222 kevent
->name
= kmalloc(len
+ rem
, GFP_KERNEL
);
223 if (unlikely(!kevent
->name
)) {
224 kmem_cache_free(event_cachep
, kevent
);
227 memcpy(kevent
->name
, name
, len
);
229 memset(kevent
->name
+ len
, 0, rem
);
230 kevent
->event
.len
= len
+ rem
;
232 kevent
->event
.len
= 0;
240 * inotify_dev_get_event - return the next event in the given dev's queue
242 * Caller must hold dev->ev_mutex.
244 static inline struct inotify_kernel_event
*
245 inotify_dev_get_event(struct inotify_device
*dev
)
247 return list_entry(dev
->events
.next
, struct inotify_kernel_event
, list
);
251 * inotify_dev_queue_event - event handler registered with core inotify, adds
252 * a new event to the given device
254 * Can sleep (calls kernel_event()).
256 static void inotify_dev_queue_event(struct inotify_watch
*w
, u32 wd
, u32 mask
,
257 u32 cookie
, const char *name
,
258 struct inode
*ignored
)
260 struct inotify_user_watch
*watch
;
261 struct inotify_device
*dev
;
262 struct inotify_kernel_event
*kevent
, *last
;
264 watch
= container_of(w
, struct inotify_user_watch
, wdata
);
267 mutex_lock(&dev
->ev_mutex
);
269 /* we can safely put the watch as we don't reference it while
270 * generating the event
272 if (mask
& IN_IGNORED
|| mask
& IN_ONESHOT
)
273 put_inotify_watch(w
); /* final put */
275 /* coalescing: drop this event if it is a dupe of the previous */
276 last
= inotify_dev_get_event(dev
);
277 if (last
&& last
->event
.mask
== mask
&& last
->event
.wd
== wd
&&
278 last
->event
.cookie
== cookie
) {
279 const char *lastname
= last
->name
;
281 if (!name
&& !lastname
)
283 if (name
&& lastname
&& !strcmp(lastname
, name
))
287 /* the queue overflowed and we already sent the Q_OVERFLOW event */
288 if (unlikely(dev
->event_count
> dev
->max_events
))
291 /* if the queue overflows, we need to notify user space */
292 if (unlikely(dev
->event_count
== dev
->max_events
))
293 kevent
= kernel_event(-1, IN_Q_OVERFLOW
, cookie
, NULL
);
295 kevent
= kernel_event(wd
, mask
, cookie
, name
);
297 if (unlikely(!kevent
))
300 /* queue the event and wake up anyone waiting */
302 dev
->queue_size
+= sizeof(struct inotify_event
) + kevent
->event
.len
;
303 list_add_tail(&kevent
->list
, &dev
->events
);
304 wake_up_interruptible(&dev
->wq
);
307 mutex_unlock(&dev
->ev_mutex
);
311 * remove_kevent - cleans up and ultimately frees the given kevent
313 * Caller must hold dev->ev_mutex.
315 static void remove_kevent(struct inotify_device
*dev
,
316 struct inotify_kernel_event
*kevent
)
318 list_del(&kevent
->list
);
321 dev
->queue_size
-= sizeof(struct inotify_event
) + kevent
->event
.len
;
324 kmem_cache_free(event_cachep
, kevent
);
328 * inotify_dev_event_dequeue - destroy an event on the given device
330 * Caller must hold dev->ev_mutex.
332 static void inotify_dev_event_dequeue(struct inotify_device
*dev
)
334 if (!list_empty(&dev
->events
)) {
335 struct inotify_kernel_event
*kevent
;
336 kevent
= inotify_dev_get_event(dev
);
337 remove_kevent(dev
, kevent
);
342 * find_inode - resolve a user-given path to a specific inode and return a nd
344 static int find_inode(const char __user
*dirname
, struct nameidata
*nd
,
349 error
= __user_walk(dirname
, flags
, nd
);
352 /* you can only watch an inode if you have read permissions on it */
353 error
= vfs_permission(nd
, MAY_READ
);
360 * create_watch - creates a watch on the given device.
362 * Callers must hold dev->up_mutex.
364 static int create_watch(struct inotify_device
*dev
, struct inode
*inode
,
367 struct inotify_user_watch
*watch
;
370 if (atomic_read(&dev
->user
->inotify_watches
) >=
371 inotify_max_user_watches
)
374 watch
= kmem_cache_alloc(watch_cachep
, GFP_KERNEL
);
375 if (unlikely(!watch
))
378 /* save a reference to device and bump the count to make it official */
379 get_inotify_dev(dev
);
382 atomic_inc(&dev
->user
->inotify_watches
);
384 inotify_init_watch(&watch
->wdata
);
385 ret
= inotify_add_watch(dev
->ih
, &watch
->wdata
, inode
, mask
);
387 free_inotify_user_watch(&watch
->wdata
);
392 /* Device Interface */
394 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
396 struct inotify_device
*dev
= file
->private_data
;
399 poll_wait(file
, &dev
->wq
, wait
);
400 mutex_lock(&dev
->ev_mutex
);
401 if (!list_empty(&dev
->events
))
402 ret
= POLLIN
| POLLRDNORM
;
403 mutex_unlock(&dev
->ev_mutex
);
408 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
409 size_t count
, loff_t
*pos
)
411 size_t event_size
= sizeof (struct inotify_event
);
412 struct inotify_device
*dev
;
418 dev
= file
->private_data
;
423 prepare_to_wait(&dev
->wq
, &wait
, TASK_INTERRUPTIBLE
);
425 mutex_lock(&dev
->ev_mutex
);
426 events
= !list_empty(&dev
->events
);
427 mutex_unlock(&dev
->ev_mutex
);
433 if (file
->f_flags
& O_NONBLOCK
) {
438 if (signal_pending(current
)) {
446 finish_wait(&dev
->wq
, &wait
);
450 mutex_lock(&dev
->ev_mutex
);
452 struct inotify_kernel_event
*kevent
;
455 if (list_empty(&dev
->events
))
458 kevent
= inotify_dev_get_event(dev
);
459 if (event_size
+ kevent
->event
.len
> count
) {
460 if (ret
== 0 && count
> 0) {
462 * could not get a single event because we
463 * didn't have enough buffer space.
470 if (copy_to_user(buf
, &kevent
->event
, event_size
)) {
478 if (copy_to_user(buf
, kevent
->name
, kevent
->event
.len
)){
482 buf
+= kevent
->event
.len
;
483 count
-= kevent
->event
.len
;
486 remove_kevent(dev
, kevent
);
488 mutex_unlock(&dev
->ev_mutex
);
493 static int inotify_release(struct inode
*ignored
, struct file
*file
)
495 struct inotify_device
*dev
= file
->private_data
;
497 inotify_destroy(dev
->ih
);
499 /* destroy all of the events on this device */
500 mutex_lock(&dev
->ev_mutex
);
501 while (!list_empty(&dev
->events
))
502 inotify_dev_event_dequeue(dev
);
503 mutex_unlock(&dev
->ev_mutex
);
505 /* free this device: the put matching the get in inotify_init() */
506 put_inotify_dev(dev
);
511 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
514 struct inotify_device
*dev
;
518 dev
= file
->private_data
;
519 p
= (void __user
*) arg
;
523 ret
= put_user(dev
->queue_size
, (int __user
*) p
);
530 static const struct file_operations inotify_fops
= {
531 .poll
= inotify_poll
,
532 .read
= inotify_read
,
533 .release
= inotify_release
,
534 .unlocked_ioctl
= inotify_ioctl
,
535 .compat_ioctl
= inotify_ioctl
,
538 static const struct inotify_operations inotify_user_ops
= {
539 .handle_event
= inotify_dev_queue_event
,
540 .destroy_watch
= free_inotify_user_watch
,
543 asmlinkage
long sys_inotify_init(void)
545 struct inotify_device
*dev
;
546 struct inotify_handle
*ih
;
547 struct user_struct
*user
;
551 fd
= get_unused_fd();
555 filp
= get_empty_filp();
561 user
= get_uid(current
->user
);
562 if (unlikely(atomic_read(&user
->inotify_devs
) >=
563 inotify_max_user_instances
)) {
568 dev
= kmalloc(sizeof(struct inotify_device
), GFP_KERNEL
);
569 if (unlikely(!dev
)) {
574 ih
= inotify_init(&inotify_user_ops
);
575 if (unlikely(IS_ERR(ih
))) {
581 filp
->f_op
= &inotify_fops
;
582 filp
->f_path
.mnt
= mntget(inotify_mnt
);
583 filp
->f_path
.dentry
= dget(inotify_mnt
->mnt_root
);
584 filp
->f_mapping
= filp
->f_path
.dentry
->d_inode
->i_mapping
;
585 filp
->f_mode
= FMODE_READ
;
586 filp
->f_flags
= O_RDONLY
;
587 filp
->private_data
= dev
;
589 INIT_LIST_HEAD(&dev
->events
);
590 init_waitqueue_head(&dev
->wq
);
591 mutex_init(&dev
->ev_mutex
);
592 mutex_init(&dev
->up_mutex
);
593 dev
->event_count
= 0;
595 dev
->max_events
= inotify_max_queued_events
;
597 atomic_set(&dev
->count
, 0);
599 get_inotify_dev(dev
);
600 atomic_inc(&user
->inotify_devs
);
601 fd_install(fd
, filp
);
614 asmlinkage
long sys_inotify_add_watch(int fd
, const char __user
*path
, u32 mask
)
617 struct inotify_device
*dev
;
620 int ret
, fput_needed
;
623 filp
= fget_light(fd
, &fput_needed
);
627 /* verify that this is indeed an inotify instance */
628 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
633 if (!(mask
& IN_DONT_FOLLOW
))
634 flags
|= LOOKUP_FOLLOW
;
635 if (mask
& IN_ONLYDIR
)
636 flags
|= LOOKUP_DIRECTORY
;
638 ret
= find_inode(path
, &nd
, flags
);
642 /* inode held in place by reference to nd; dev by fget on fd */
643 inode
= nd
.dentry
->d_inode
;
644 dev
= filp
->private_data
;
646 mutex_lock(&dev
->up_mutex
);
647 ret
= inotify_find_update_watch(dev
->ih
, inode
, mask
);
649 ret
= create_watch(dev
, inode
, mask
);
650 mutex_unlock(&dev
->up_mutex
);
654 fput_light(filp
, fput_needed
);
658 asmlinkage
long sys_inotify_rm_watch(int fd
, u32 wd
)
661 struct inotify_device
*dev
;
662 int ret
, fput_needed
;
664 filp
= fget_light(fd
, &fput_needed
);
668 /* verify that this is indeed an inotify instance */
669 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
674 dev
= filp
->private_data
;
676 /* we free our watch data when we get IN_IGNORED */
677 ret
= inotify_rm_wd(dev
->ih
, wd
);
680 fput_light(filp
, fput_needed
);
685 inotify_get_sb(struct file_system_type
*fs_type
, int flags
,
686 const char *dev_name
, void *data
, struct vfsmount
*mnt
)
688 return get_sb_pseudo(fs_type
, "inotify", NULL
,
689 INOTIFYFS_SUPER_MAGIC
, mnt
);
692 static struct file_system_type inotify_fs_type
= {
694 .get_sb
= inotify_get_sb
,
695 .kill_sb
= kill_anon_super
,
699 * inotify_user_setup - Our initialization function. Note that we cannnot return
700 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
701 * must result in panic().
703 static int __init
inotify_user_setup(void)
707 ret
= register_filesystem(&inotify_fs_type
);
709 panic("inotify: register_filesystem returned %d!\n", ret
);
711 inotify_mnt
= kern_mount(&inotify_fs_type
);
712 if (IS_ERR(inotify_mnt
))
713 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt
));
715 inotify_max_queued_events
= 16384;
716 inotify_max_user_instances
= 128;
717 inotify_max_user_watches
= 8192;
719 watch_cachep
= kmem_cache_create("inotify_watch_cache",
720 sizeof(struct inotify_user_watch
),
721 0, SLAB_PANIC
, NULL
);
722 event_cachep
= kmem_cache_create("inotify_event_cache",
723 sizeof(struct inotify_kernel_event
),
724 0, SLAB_PANIC
, NULL
);
729 module_init(inotify_user_setup
);