[IPSEC]: Sync series - core changes
[linux-2.6/linux-mips.git] / fs / inotify.c
blob3041503bde02f918382cacc49ec97275d1994b1f
1 /*
2 * fs/inotify.c - inode-based file event notifications
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
13 * later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/idr.h>
26 #include <linux/slab.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/poll.h>
32 #include <linux/init.h>
33 #include <linux/list.h>
34 #include <linux/writeback.h>
35 #include <linux/inotify.h>
36 #include <linux/syscalls.h>
38 #include <asm/ioctls.h>
40 static atomic_t inotify_cookie;
41 static atomic_t inotify_watches;
43 static kmem_cache_t *watch_cachep;
44 static kmem_cache_t *event_cachep;
46 static struct vfsmount *inotify_mnt;
48 /* these are configurable via /proc/sys/fs/inotify/ */
49 int inotify_max_user_instances;
50 int inotify_max_user_watches;
51 int inotify_max_queued_events;
54 * Lock ordering:
56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
57 * iprune_sem (synchronize shrink_icache_memory())
58 * inode_lock (protects the super_block->s_inodes list)
59 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
60 * inotify_dev->sem (protects inotify_device and watches->d_list)
64 * Lifetimes of the three main data structures--inotify_device, inode, and
65 * inotify_watch--are managed by reference count.
67 * inotify_device: Lifetime is from inotify_init() until release. Additional
68 * references can bump the count via get_inotify_dev() and drop the count via
69 * put_inotify_dev().
71 * inotify_watch: Lifetime is from create_watch() to destory_watch().
72 * Additional references can bump the count via get_inotify_watch() and drop
73 * the count via put_inotify_watch().
75 * inode: Pinned so long as the inode is associated with a watch, from
76 * create_watch() to put_inotify_watch().
80 * struct inotify_device - represents an inotify instance
82 * This structure is protected by the semaphore 'sem'.
84 struct inotify_device {
85 wait_queue_head_t wq; /* wait queue for i/o */
86 struct idr idr; /* idr mapping wd -> watch */
87 struct semaphore sem; /* protects this bad boy */
88 struct list_head events; /* list of queued events */
89 struct list_head watches; /* list of watches */
90 atomic_t count; /* reference count */
91 struct user_struct *user; /* user who opened this dev */
92 unsigned int queue_size; /* size of the queue (bytes) */
93 unsigned int event_count; /* number of pending events */
94 unsigned int max_events; /* maximum number of events */
95 u32 last_wd; /* the last wd allocated */
99 * struct inotify_kernel_event - An inotify event, originating from a watch and
100 * queued for user-space. A list of these is attached to each instance of the
101 * device. In read(), this list is walked and all events that can fit in the
102 * buffer are returned.
104 * Protected by dev->sem of the device in which we are queued.
106 struct inotify_kernel_event {
107 struct inotify_event event; /* the user-space event */
108 struct list_head list; /* entry in inotify_device's list */
109 char *name; /* filename, if any */
113 * struct inotify_watch - represents a watch request on a specific inode
115 * d_list is protected by dev->sem of the associated watch->dev.
116 * i_list and mask are protected by inode->inotify_sem of the associated inode.
117 * dev, inode, and wd are never written to once the watch is created.
119 struct inotify_watch {
120 struct list_head d_list; /* entry in inotify_device's list */
121 struct list_head i_list; /* entry in inode's list */
122 atomic_t count; /* reference count */
123 struct inotify_device *dev; /* associated device */
124 struct inode *inode; /* associated inode */
125 s32 wd; /* watch descriptor */
126 u32 mask; /* event mask for this watch */
129 #ifdef CONFIG_SYSCTL
131 #include <linux/sysctl.h>
133 static int zero;
135 ctl_table inotify_table[] = {
137 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
138 .procname = "max_user_instances",
139 .data = &inotify_max_user_instances,
140 .maxlen = sizeof(int),
141 .mode = 0644,
142 .proc_handler = &proc_dointvec_minmax,
143 .strategy = &sysctl_intvec,
144 .extra1 = &zero,
147 .ctl_name = INOTIFY_MAX_USER_WATCHES,
148 .procname = "max_user_watches",
149 .data = &inotify_max_user_watches,
150 .maxlen = sizeof(int),
151 .mode = 0644,
152 .proc_handler = &proc_dointvec_minmax,
153 .strategy = &sysctl_intvec,
154 .extra1 = &zero,
157 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
158 .procname = "max_queued_events",
159 .data = &inotify_max_queued_events,
160 .maxlen = sizeof(int),
161 .mode = 0644,
162 .proc_handler = &proc_dointvec_minmax,
163 .strategy = &sysctl_intvec,
164 .extra1 = &zero
166 { .ctl_name = 0 }
168 #endif /* CONFIG_SYSCTL */
170 static inline void get_inotify_dev(struct inotify_device *dev)
172 atomic_inc(&dev->count);
175 static inline void put_inotify_dev(struct inotify_device *dev)
177 if (atomic_dec_and_test(&dev->count)) {
178 atomic_dec(&dev->user->inotify_devs);
179 free_uid(dev->user);
180 idr_destroy(&dev->idr);
181 kfree(dev);
185 static inline void get_inotify_watch(struct inotify_watch *watch)
187 atomic_inc(&watch->count);
191 * put_inotify_watch - decrements the ref count on a given watch. cleans up
192 * the watch and its references if the count reaches zero.
194 static inline void put_inotify_watch(struct inotify_watch *watch)
196 if (atomic_dec_and_test(&watch->count)) {
197 put_inotify_dev(watch->dev);
198 iput(watch->inode);
199 kmem_cache_free(watch_cachep, watch);
204 * kernel_event - create a new kernel event with the given parameters
206 * This function can sleep.
208 static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
209 const char *name)
211 struct inotify_kernel_event *kevent;
213 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
214 if (unlikely(!kevent))
215 return NULL;
217 /* we hand this out to user-space, so zero it just in case */
218 memset(&kevent->event, 0, sizeof(struct inotify_event));
220 kevent->event.wd = wd;
221 kevent->event.mask = mask;
222 kevent->event.cookie = cookie;
224 INIT_LIST_HEAD(&kevent->list);
226 if (name) {
227 size_t len, rem, event_size = sizeof(struct inotify_event);
230 * We need to pad the filename so as to properly align an
231 * array of inotify_event structures. Because the structure is
232 * small and the common case is a small filename, we just round
233 * up to the next multiple of the structure's sizeof. This is
234 * simple and safe for all architectures.
236 len = strlen(name) + 1;
237 rem = event_size - len;
238 if (len > event_size) {
239 rem = event_size - (len % event_size);
240 if (len % event_size == 0)
241 rem = 0;
244 kevent->name = kmalloc(len + rem, GFP_KERNEL);
245 if (unlikely(!kevent->name)) {
246 kmem_cache_free(event_cachep, kevent);
247 return NULL;
249 memcpy(kevent->name, name, len);
250 if (rem)
251 memset(kevent->name + len, 0, rem);
252 kevent->event.len = len + rem;
253 } else {
254 kevent->event.len = 0;
255 kevent->name = NULL;
258 return kevent;
262 * inotify_dev_get_event - return the next event in the given dev's queue
264 * Caller must hold dev->sem.
266 static inline struct inotify_kernel_event *
267 inotify_dev_get_event(struct inotify_device *dev)
269 return list_entry(dev->events.next, struct inotify_kernel_event, list);
273 * inotify_dev_queue_event - add a new event to the given device
275 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
277 static void inotify_dev_queue_event(struct inotify_device *dev,
278 struct inotify_watch *watch, u32 mask,
279 u32 cookie, const char *name)
281 struct inotify_kernel_event *kevent, *last;
283 /* coalescing: drop this event if it is a dupe of the previous */
284 last = inotify_dev_get_event(dev);
285 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
286 last->event.cookie == cookie) {
287 const char *lastname = last->name;
289 if (!name && !lastname)
290 return;
291 if (name && lastname && !strcmp(lastname, name))
292 return;
295 /* the queue overflowed and we already sent the Q_OVERFLOW event */
296 if (unlikely(dev->event_count > dev->max_events))
297 return;
299 /* if the queue overflows, we need to notify user space */
300 if (unlikely(dev->event_count == dev->max_events))
301 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
302 else
303 kevent = kernel_event(watch->wd, mask, cookie, name);
305 if (unlikely(!kevent))
306 return;
308 /* queue the event and wake up anyone waiting */
309 dev->event_count++;
310 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
311 list_add_tail(&kevent->list, &dev->events);
312 wake_up_interruptible(&dev->wq);
316 * remove_kevent - cleans up and ultimately frees the given kevent
318 * Caller must hold dev->sem.
320 static void remove_kevent(struct inotify_device *dev,
321 struct inotify_kernel_event *kevent)
323 list_del(&kevent->list);
325 dev->event_count--;
326 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
328 kfree(kevent->name);
329 kmem_cache_free(event_cachep, kevent);
333 * inotify_dev_event_dequeue - destroy an event on the given device
335 * Caller must hold dev->sem.
337 static void inotify_dev_event_dequeue(struct inotify_device *dev)
339 if (!list_empty(&dev->events)) {
340 struct inotify_kernel_event *kevent;
341 kevent = inotify_dev_get_event(dev);
342 remove_kevent(dev, kevent);
347 * inotify_dev_get_wd - returns the next WD for use by the given dev
349 * Callers must hold dev->sem. This function can sleep.
351 static int inotify_dev_get_wd(struct inotify_device *dev,
352 struct inotify_watch *watch)
354 int ret;
356 do {
357 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
358 return -ENOSPC;
359 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
360 } while (ret == -EAGAIN);
362 return ret;
366 * find_inode - resolve a user-given path to a specific inode and return a nd
368 static int find_inode(const char __user *dirname, struct nameidata *nd,
369 unsigned flags)
371 int error;
373 error = __user_walk(dirname, flags, nd);
374 if (error)
375 return error;
376 /* you can only watch an inode if you have read permissions on it */
377 error = vfs_permission(nd, MAY_READ);
378 if (error)
379 path_release(nd);
380 return error;
384 * create_watch - creates a watch on the given device.
386 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
389 static struct inotify_watch *create_watch(struct inotify_device *dev,
390 u32 mask, struct inode *inode)
392 struct inotify_watch *watch;
393 int ret;
395 if (atomic_read(&dev->user->inotify_watches) >=
396 inotify_max_user_watches)
397 return ERR_PTR(-ENOSPC);
399 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
400 if (unlikely(!watch))
401 return ERR_PTR(-ENOMEM);
403 ret = inotify_dev_get_wd(dev, watch);
404 if (unlikely(ret)) {
405 kmem_cache_free(watch_cachep, watch);
406 return ERR_PTR(ret);
409 dev->last_wd = watch->wd;
410 watch->mask = mask;
411 atomic_set(&watch->count, 0);
412 INIT_LIST_HEAD(&watch->d_list);
413 INIT_LIST_HEAD(&watch->i_list);
415 /* save a reference to device and bump the count to make it official */
416 get_inotify_dev(dev);
417 watch->dev = dev;
420 * Save a reference to the inode and bump the ref count to make it
421 * official. We hold a reference to nameidata, which makes this safe.
423 watch->inode = igrab(inode);
425 /* bump our own count, corresponding to our entry in dev->watches */
426 get_inotify_watch(watch);
428 atomic_inc(&dev->user->inotify_watches);
429 atomic_inc(&inotify_watches);
431 return watch;
435 * inotify_find_dev - find the watch associated with the given inode and dev
437 * Callers must hold inode->inotify_sem.
439 static struct inotify_watch *inode_find_dev(struct inode *inode,
440 struct inotify_device *dev)
442 struct inotify_watch *watch;
444 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
445 if (watch->dev == dev)
446 return watch;
449 return NULL;
453 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
455 static void remove_watch_no_event(struct inotify_watch *watch,
456 struct inotify_device *dev)
458 list_del(&watch->i_list);
459 list_del(&watch->d_list);
461 atomic_dec(&dev->user->inotify_watches);
462 atomic_dec(&inotify_watches);
463 idr_remove(&dev->idr, watch->wd);
464 put_inotify_watch(watch);
468 * remove_watch - Remove a watch from both the device and the inode. Sends
469 * the IN_IGNORED event to the given device signifying that the inode is no
470 * longer watched.
472 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
473 * reference to the inode before returning.
475 * The inode is not iput() so as to remain atomic. If the inode needs to be
476 * iput(), the call returns one. Otherwise, it returns zero.
478 static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
480 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
481 remove_watch_no_event(watch, dev);
485 * inotify_inode_watched - returns nonzero if there are watches on this inode
486 * and zero otherwise. We call this lockless, we do not care if we race.
488 static inline int inotify_inode_watched(struct inode *inode)
490 return !list_empty(&inode->inotify_watches);
493 /* Kernel API */
496 * inotify_inode_queue_event - queue an event to all watches on this inode
497 * @inode: inode event is originating from
498 * @mask: event mask describing this event
499 * @cookie: cookie for synchronization, or zero
500 * @name: filename, if any
502 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
503 const char *name)
505 struct inotify_watch *watch, *next;
507 if (!inotify_inode_watched(inode))
508 return;
510 down(&inode->inotify_sem);
511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
512 u32 watch_mask = watch->mask;
513 if (watch_mask & mask) {
514 struct inotify_device *dev = watch->dev;
515 get_inotify_watch(watch);
516 down(&dev->sem);
517 inotify_dev_queue_event(dev, watch, mask, cookie, name);
518 if (watch_mask & IN_ONESHOT)
519 remove_watch_no_event(watch, dev);
520 up(&dev->sem);
521 put_inotify_watch(watch);
524 up(&inode->inotify_sem);
526 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
529 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
530 * @dentry: the dentry in question, we queue against this dentry's parent
531 * @mask: event mask describing this event
532 * @cookie: cookie for synchronization, or zero
533 * @name: filename, if any
535 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
536 u32 cookie, const char *name)
538 struct dentry *parent;
539 struct inode *inode;
541 if (!atomic_read (&inotify_watches))
542 return;
544 spin_lock(&dentry->d_lock);
545 parent = dentry->d_parent;
546 inode = parent->d_inode;
548 if (inotify_inode_watched(inode)) {
549 dget(parent);
550 spin_unlock(&dentry->d_lock);
551 inotify_inode_queue_event(inode, mask, cookie, name);
552 dput(parent);
553 } else
554 spin_unlock(&dentry->d_lock);
556 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
559 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
561 u32 inotify_get_cookie(void)
563 return atomic_inc_return(&inotify_cookie);
565 EXPORT_SYMBOL_GPL(inotify_get_cookie);
568 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
569 * @list: list of inodes being unmounted (sb->s_inodes)
571 * Called with inode_lock held, protecting the unmounting super block's list
572 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
573 * We temporarily drop inode_lock, however, and CAN block.
575 void inotify_unmount_inodes(struct list_head *list)
577 struct inode *inode, *next_i, *need_iput = NULL;
579 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
580 struct inotify_watch *watch, *next_w;
581 struct inode *need_iput_tmp;
582 struct list_head *watches;
585 * If i_count is zero, the inode cannot have any watches and
586 * doing an __iget/iput with MS_ACTIVE clear would actually
587 * evict all inodes with zero i_count from icache which is
588 * unnecessarily violent and may in fact be illegal to do.
590 if (!atomic_read(&inode->i_count))
591 continue;
594 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
595 * I_WILL_FREE which is fine because by that point the inode
596 * cannot have any associated watches.
598 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
599 continue;
601 need_iput_tmp = need_iput;
602 need_iput = NULL;
603 /* In case the remove_watch() drops a reference. */
604 if (inode != need_iput_tmp)
605 __iget(inode);
606 else
607 need_iput_tmp = NULL;
608 /* In case the dropping of a reference would nuke next_i. */
609 if ((&next_i->i_sb_list != list) &&
610 atomic_read(&next_i->i_count) &&
611 !(next_i->i_state & (I_CLEAR | I_FREEING |
612 I_WILL_FREE))) {
613 __iget(next_i);
614 need_iput = next_i;
618 * We can safely drop inode_lock here because we hold
619 * references on both inode and next_i. Also no new inodes
620 * will be added since the umount has begun. Finally,
621 * iprune_sem keeps shrink_icache_memory() away.
623 spin_unlock(&inode_lock);
625 if (need_iput_tmp)
626 iput(need_iput_tmp);
628 /* for each watch, send IN_UNMOUNT and then remove it */
629 down(&inode->inotify_sem);
630 watches = &inode->inotify_watches;
631 list_for_each_entry_safe(watch, next_w, watches, i_list) {
632 struct inotify_device *dev = watch->dev;
633 down(&dev->sem);
634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
635 remove_watch(watch, dev);
636 up(&dev->sem);
638 up(&inode->inotify_sem);
639 iput(inode);
641 spin_lock(&inode_lock);
644 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
647 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
648 * @inode: inode that is about to be removed
650 void inotify_inode_is_dead(struct inode *inode)
652 struct inotify_watch *watch, *next;
654 down(&inode->inotify_sem);
655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
656 struct inotify_device *dev = watch->dev;
657 down(&dev->sem);
658 remove_watch(watch, dev);
659 up(&dev->sem);
661 up(&inode->inotify_sem);
663 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
665 /* Device Interface */
667 static unsigned int inotify_poll(struct file *file, poll_table *wait)
669 struct inotify_device *dev = file->private_data;
670 int ret = 0;
672 poll_wait(file, &dev->wq, wait);
673 down(&dev->sem);
674 if (!list_empty(&dev->events))
675 ret = POLLIN | POLLRDNORM;
676 up(&dev->sem);
678 return ret;
681 static ssize_t inotify_read(struct file *file, char __user *buf,
682 size_t count, loff_t *pos)
684 size_t event_size = sizeof (struct inotify_event);
685 struct inotify_device *dev;
686 char __user *start;
687 int ret;
688 DEFINE_WAIT(wait);
690 start = buf;
691 dev = file->private_data;
693 while (1) {
694 int events;
696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
698 down(&dev->sem);
699 events = !list_empty(&dev->events);
700 up(&dev->sem);
701 if (events) {
702 ret = 0;
703 break;
706 if (file->f_flags & O_NONBLOCK) {
707 ret = -EAGAIN;
708 break;
711 if (signal_pending(current)) {
712 ret = -EINTR;
713 break;
716 schedule();
719 finish_wait(&dev->wq, &wait);
720 if (ret)
721 return ret;
723 down(&dev->sem);
724 while (1) {
725 struct inotify_kernel_event *kevent;
727 ret = buf - start;
728 if (list_empty(&dev->events))
729 break;
731 kevent = inotify_dev_get_event(dev);
732 if (event_size + kevent->event.len > count)
733 break;
735 if (copy_to_user(buf, &kevent->event, event_size)) {
736 ret = -EFAULT;
737 break;
739 buf += event_size;
740 count -= event_size;
742 if (kevent->name) {
743 if (copy_to_user(buf, kevent->name, kevent->event.len)){
744 ret = -EFAULT;
745 break;
747 buf += kevent->event.len;
748 count -= kevent->event.len;
751 remove_kevent(dev, kevent);
753 up(&dev->sem);
755 return ret;
758 static int inotify_release(struct inode *ignored, struct file *file)
760 struct inotify_device *dev = file->private_data;
763 * Destroy all of the watches on this device. Unfortunately, not very
764 * pretty. We cannot do a simple iteration over the list, because we
765 * do not know the inode until we iterate to the watch. But we need to
766 * hold inode->inotify_sem before dev->sem. The following works.
768 while (1) {
769 struct inotify_watch *watch;
770 struct list_head *watches;
771 struct inode *inode;
773 down(&dev->sem);
774 watches = &dev->watches;
775 if (list_empty(watches)) {
776 up(&dev->sem);
777 break;
779 watch = list_entry(watches->next, struct inotify_watch, d_list);
780 get_inotify_watch(watch);
781 up(&dev->sem);
783 inode = watch->inode;
784 down(&inode->inotify_sem);
785 down(&dev->sem);
786 remove_watch_no_event(watch, dev);
787 up(&dev->sem);
788 up(&inode->inotify_sem);
789 put_inotify_watch(watch);
792 /* destroy all of the events on this device */
793 down(&dev->sem);
794 while (!list_empty(&dev->events))
795 inotify_dev_event_dequeue(dev);
796 up(&dev->sem);
798 /* free this device: the put matching the get in inotify_init() */
799 put_inotify_dev(dev);
801 return 0;
805 * inotify_ignore - remove a given wd from this inotify instance.
807 * Can sleep.
809 static int inotify_ignore(struct inotify_device *dev, s32 wd)
811 struct inotify_watch *watch;
812 struct inode *inode;
814 down(&dev->sem);
815 watch = idr_find(&dev->idr, wd);
816 if (unlikely(!watch)) {
817 up(&dev->sem);
818 return -EINVAL;
820 get_inotify_watch(watch);
821 inode = watch->inode;
822 up(&dev->sem);
824 down(&inode->inotify_sem);
825 down(&dev->sem);
827 /* make sure that we did not race */
828 watch = idr_find(&dev->idr, wd);
829 if (likely(watch))
830 remove_watch(watch, dev);
832 up(&dev->sem);
833 up(&inode->inotify_sem);
834 put_inotify_watch(watch);
836 return 0;
839 static long inotify_ioctl(struct file *file, unsigned int cmd,
840 unsigned long arg)
842 struct inotify_device *dev;
843 void __user *p;
844 int ret = -ENOTTY;
846 dev = file->private_data;
847 p = (void __user *) arg;
849 switch (cmd) {
850 case FIONREAD:
851 ret = put_user(dev->queue_size, (int __user *) p);
852 break;
855 return ret;
858 static struct file_operations inotify_fops = {
859 .poll = inotify_poll,
860 .read = inotify_read,
861 .release = inotify_release,
862 .unlocked_ioctl = inotify_ioctl,
863 .compat_ioctl = inotify_ioctl,
866 asmlinkage long sys_inotify_init(void)
868 struct inotify_device *dev;
869 struct user_struct *user;
870 struct file *filp;
871 int fd, ret;
873 fd = get_unused_fd();
874 if (fd < 0)
875 return fd;
877 filp = get_empty_filp();
878 if (!filp) {
879 ret = -ENFILE;
880 goto out_put_fd;
883 user = get_uid(current->user);
884 if (unlikely(atomic_read(&user->inotify_devs) >=
885 inotify_max_user_instances)) {
886 ret = -EMFILE;
887 goto out_free_uid;
890 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
891 if (unlikely(!dev)) {
892 ret = -ENOMEM;
893 goto out_free_uid;
896 filp->f_op = &inotify_fops;
897 filp->f_vfsmnt = mntget(inotify_mnt);
898 filp->f_dentry = dget(inotify_mnt->mnt_root);
899 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
900 filp->f_mode = FMODE_READ;
901 filp->f_flags = O_RDONLY;
902 filp->private_data = dev;
904 idr_init(&dev->idr);
905 INIT_LIST_HEAD(&dev->events);
906 INIT_LIST_HEAD(&dev->watches);
907 init_waitqueue_head(&dev->wq);
908 sema_init(&dev->sem, 1);
909 dev->event_count = 0;
910 dev->queue_size = 0;
911 dev->max_events = inotify_max_queued_events;
912 dev->user = user;
913 dev->last_wd = 0;
914 atomic_set(&dev->count, 0);
916 get_inotify_dev(dev);
917 atomic_inc(&user->inotify_devs);
918 fd_install(fd, filp);
920 return fd;
921 out_free_uid:
922 free_uid(user);
923 put_filp(filp);
924 out_put_fd:
925 put_unused_fd(fd);
926 return ret;
929 asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
931 struct inotify_watch *watch, *old;
932 struct inode *inode;
933 struct inotify_device *dev;
934 struct nameidata nd;
935 struct file *filp;
936 int ret, fput_needed;
937 int mask_add = 0;
938 unsigned flags = 0;
940 filp = fget_light(fd, &fput_needed);
941 if (unlikely(!filp))
942 return -EBADF;
944 /* verify that this is indeed an inotify instance */
945 if (unlikely(filp->f_op != &inotify_fops)) {
946 ret = -EINVAL;
947 goto fput_and_out;
950 if (!(mask & IN_DONT_FOLLOW))
951 flags |= LOOKUP_FOLLOW;
952 if (mask & IN_ONLYDIR)
953 flags |= LOOKUP_DIRECTORY;
955 ret = find_inode(path, &nd, flags);
956 if (unlikely(ret))
957 goto fput_and_out;
959 /* inode held in place by reference to nd; dev by fget on fd */
960 inode = nd.dentry->d_inode;
961 dev = filp->private_data;
963 down(&inode->inotify_sem);
964 down(&dev->sem);
966 if (mask & IN_MASK_ADD)
967 mask_add = 1;
969 /* don't let user-space set invalid bits: we don't want flags set */
970 mask &= IN_ALL_EVENTS | IN_ONESHOT;
971 if (unlikely(!mask)) {
972 ret = -EINVAL;
973 goto out;
977 * Handle the case of re-adding a watch on an (inode,dev) pair that we
978 * are already watching. We just update the mask and return its wd.
980 old = inode_find_dev(inode, dev);
981 if (unlikely(old)) {
982 if (mask_add)
983 old->mask |= mask;
984 else
985 old->mask = mask;
986 ret = old->wd;
987 goto out;
990 watch = create_watch(dev, mask, inode);
991 if (unlikely(IS_ERR(watch))) {
992 ret = PTR_ERR(watch);
993 goto out;
996 /* Add the watch to the device's and the inode's list */
997 list_add(&watch->d_list, &dev->watches);
998 list_add(&watch->i_list, &inode->inotify_watches);
999 ret = watch->wd;
1000 out:
1001 up(&dev->sem);
1002 up(&inode->inotify_sem);
1003 path_release(&nd);
1004 fput_and_out:
1005 fput_light(filp, fput_needed);
1006 return ret;
1009 asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
1011 struct file *filp;
1012 struct inotify_device *dev;
1013 int ret, fput_needed;
1015 filp = fget_light(fd, &fput_needed);
1016 if (unlikely(!filp))
1017 return -EBADF;
1019 /* verify that this is indeed an inotify instance */
1020 if (unlikely(filp->f_op != &inotify_fops)) {
1021 ret = -EINVAL;
1022 goto out;
1025 dev = filp->private_data;
1026 ret = inotify_ignore(dev, wd);
1028 out:
1029 fput_light(filp, fput_needed);
1030 return ret;
1033 static struct super_block *
1034 inotify_get_sb(struct file_system_type *fs_type, int flags,
1035 const char *dev_name, void *data)
1037 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1040 static struct file_system_type inotify_fs_type = {
1041 .name = "inotifyfs",
1042 .get_sb = inotify_get_sb,
1043 .kill_sb = kill_anon_super,
1047 * inotify_setup - Our initialization function. Note that we cannnot return
1048 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1049 * must result in panic().
1051 static int __init inotify_setup(void)
1053 int ret;
1055 ret = register_filesystem(&inotify_fs_type);
1056 if (unlikely(ret))
1057 panic("inotify: register_filesystem returned %d!\n", ret);
1059 inotify_mnt = kern_mount(&inotify_fs_type);
1060 if (IS_ERR(inotify_mnt))
1061 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
1063 inotify_max_queued_events = 16384;
1064 inotify_max_user_instances = 128;
1065 inotify_max_user_watches = 8192;
1067 atomic_set(&inotify_cookie, 0);
1068 atomic_set(&inotify_watches, 0);
1070 watch_cachep = kmem_cache_create("inotify_watch_cache",
1071 sizeof(struct inotify_watch),
1072 0, SLAB_PANIC, NULL, NULL);
1073 event_cachep = kmem_cache_create("inotify_event_cache",
1074 sizeof(struct inotify_kernel_event),
1075 0, SLAB_PANIC, NULL, NULL);
1077 return 0;
1080 module_init(inotify_setup);