[PATCH] powerpc: Nicer printing of address at oops
[linux-2.6/mini2440.git] / fs / inotify.c
blob9fbaebfdf40bef99428d0feaa50d9b69cba234d0
1 /*
2 * fs/inotify.c - inode-based file event notifications
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
13 * later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/idr.h>
26 #include <linux/slab.h>
27 #include <linux/fs.h>
28 #include <linux/file.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/poll.h>
32 #include <linux/init.h>
33 #include <linux/list.h>
34 #include <linux/writeback.h>
35 #include <linux/inotify.h>
37 #include <asm/ioctls.h>
39 static atomic_t inotify_cookie;
40 static atomic_t inotify_watches;
42 static kmem_cache_t *watch_cachep;
43 static kmem_cache_t *event_cachep;
45 static struct vfsmount *inotify_mnt;
47 /* these are configurable via /proc/sys/fs/inotify/ */
48 int inotify_max_user_instances;
49 int inotify_max_user_watches;
50 int inotify_max_queued_events;
53 * Lock ordering:
55 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
56 * iprune_sem (synchronize shrink_icache_memory())
57 * inode_lock (protects the super_block->s_inodes list)
58 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
59 * inotify_dev->sem (protects inotify_device and watches->d_list)
63 * Lifetimes of the three main data structures--inotify_device, inode, and
64 * inotify_watch--are managed by reference count.
66 * inotify_device: Lifetime is from inotify_init() until release. Additional
67 * references can bump the count via get_inotify_dev() and drop the count via
68 * put_inotify_dev().
70 * inotify_watch: Lifetime is from create_watch() to destory_watch().
71 * Additional references can bump the count via get_inotify_watch() and drop
72 * the count via put_inotify_watch().
74 * inode: Pinned so long as the inode is associated with a watch, from
75 * create_watch() to put_inotify_watch().
79 * struct inotify_device - represents an inotify instance
81 * This structure is protected by the semaphore 'sem'.
83 struct inotify_device {
84 wait_queue_head_t wq; /* wait queue for i/o */
85 struct idr idr; /* idr mapping wd -> watch */
86 struct semaphore sem; /* protects this bad boy */
87 struct list_head events; /* list of queued events */
88 struct list_head watches; /* list of watches */
89 atomic_t count; /* reference count */
90 struct user_struct *user; /* user who opened this dev */
91 unsigned int queue_size; /* size of the queue (bytes) */
92 unsigned int event_count; /* number of pending events */
93 unsigned int max_events; /* maximum number of events */
94 u32 last_wd; /* the last wd allocated */
98 * struct inotify_kernel_event - An inotify event, originating from a watch and
99 * queued for user-space. A list of these is attached to each instance of the
100 * device. In read(), this list is walked and all events that can fit in the
101 * buffer are returned.
103 * Protected by dev->sem of the device in which we are queued.
105 struct inotify_kernel_event {
106 struct inotify_event event; /* the user-space event */
107 struct list_head list; /* entry in inotify_device's list */
108 char *name; /* filename, if any */
112 * struct inotify_watch - represents a watch request on a specific inode
114 * d_list is protected by dev->sem of the associated watch->dev.
115 * i_list and mask are protected by inode->inotify_sem of the associated inode.
116 * dev, inode, and wd are never written to once the watch is created.
118 struct inotify_watch {
119 struct list_head d_list; /* entry in inotify_device's list */
120 struct list_head i_list; /* entry in inode's list */
121 atomic_t count; /* reference count */
122 struct inotify_device *dev; /* associated device */
123 struct inode *inode; /* associated inode */
124 s32 wd; /* watch descriptor */
125 u32 mask; /* event mask for this watch */
128 #ifdef CONFIG_SYSCTL
130 #include <linux/sysctl.h>
132 static int zero;
134 ctl_table inotify_table[] = {
136 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
137 .procname = "max_user_instances",
138 .data = &inotify_max_user_instances,
139 .maxlen = sizeof(int),
140 .mode = 0644,
141 .proc_handler = &proc_dointvec_minmax,
142 .strategy = &sysctl_intvec,
143 .extra1 = &zero,
146 .ctl_name = INOTIFY_MAX_USER_WATCHES,
147 .procname = "max_user_watches",
148 .data = &inotify_max_user_watches,
149 .maxlen = sizeof(int),
150 .mode = 0644,
151 .proc_handler = &proc_dointvec_minmax,
152 .strategy = &sysctl_intvec,
153 .extra1 = &zero,
156 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
157 .procname = "max_queued_events",
158 .data = &inotify_max_queued_events,
159 .maxlen = sizeof(int),
160 .mode = 0644,
161 .proc_handler = &proc_dointvec_minmax,
162 .strategy = &sysctl_intvec,
163 .extra1 = &zero
165 { .ctl_name = 0 }
167 #endif /* CONFIG_SYSCTL */
169 static inline void get_inotify_dev(struct inotify_device *dev)
171 atomic_inc(&dev->count);
174 static inline void put_inotify_dev(struct inotify_device *dev)
176 if (atomic_dec_and_test(&dev->count)) {
177 atomic_dec(&dev->user->inotify_devs);
178 free_uid(dev->user);
179 idr_destroy(&dev->idr);
180 kfree(dev);
184 static inline void get_inotify_watch(struct inotify_watch *watch)
186 atomic_inc(&watch->count);
190 * put_inotify_watch - decrements the ref count on a given watch. cleans up
191 * the watch and its references if the count reaches zero.
193 static inline void put_inotify_watch(struct inotify_watch *watch)
195 if (atomic_dec_and_test(&watch->count)) {
196 put_inotify_dev(watch->dev);
197 iput(watch->inode);
198 kmem_cache_free(watch_cachep, watch);
203 * kernel_event - create a new kernel event with the given parameters
205 * This function can sleep.
207 static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
208 const char *name)
210 struct inotify_kernel_event *kevent;
212 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
213 if (unlikely(!kevent))
214 return NULL;
216 /* we hand this out to user-space, so zero it just in case */
217 memset(&kevent->event, 0, sizeof(struct inotify_event));
219 kevent->event.wd = wd;
220 kevent->event.mask = mask;
221 kevent->event.cookie = cookie;
223 INIT_LIST_HEAD(&kevent->list);
225 if (name) {
226 size_t len, rem, event_size = sizeof(struct inotify_event);
229 * We need to pad the filename so as to properly align an
230 * array of inotify_event structures. Because the structure is
231 * small and the common case is a small filename, we just round
232 * up to the next multiple of the structure's sizeof. This is
233 * simple and safe for all architectures.
235 len = strlen(name) + 1;
236 rem = event_size - len;
237 if (len > event_size) {
238 rem = event_size - (len % event_size);
239 if (len % event_size == 0)
240 rem = 0;
243 kevent->name = kmalloc(len + rem, GFP_KERNEL);
244 if (unlikely(!kevent->name)) {
245 kmem_cache_free(event_cachep, kevent);
246 return NULL;
248 memcpy(kevent->name, name, len);
249 if (rem)
250 memset(kevent->name + len, 0, rem);
251 kevent->event.len = len + rem;
252 } else {
253 kevent->event.len = 0;
254 kevent->name = NULL;
257 return kevent;
261 * inotify_dev_get_event - return the next event in the given dev's queue
263 * Caller must hold dev->sem.
265 static inline struct inotify_kernel_event *
266 inotify_dev_get_event(struct inotify_device *dev)
268 return list_entry(dev->events.next, struct inotify_kernel_event, list);
272 * inotify_dev_queue_event - add a new event to the given device
274 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
276 static void inotify_dev_queue_event(struct inotify_device *dev,
277 struct inotify_watch *watch, u32 mask,
278 u32 cookie, const char *name)
280 struct inotify_kernel_event *kevent, *last;
282 /* coalescing: drop this event if it is a dupe of the previous */
283 last = inotify_dev_get_event(dev);
284 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
285 last->event.cookie == cookie) {
286 const char *lastname = last->name;
288 if (!name && !lastname)
289 return;
290 if (name && lastname && !strcmp(lastname, name))
291 return;
294 /* the queue overflowed and we already sent the Q_OVERFLOW event */
295 if (unlikely(dev->event_count > dev->max_events))
296 return;
298 /* if the queue overflows, we need to notify user space */
299 if (unlikely(dev->event_count == dev->max_events))
300 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
301 else
302 kevent = kernel_event(watch->wd, mask, cookie, name);
304 if (unlikely(!kevent))
305 return;
307 /* queue the event and wake up anyone waiting */
308 dev->event_count++;
309 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
310 list_add_tail(&kevent->list, &dev->events);
311 wake_up_interruptible(&dev->wq);
315 * remove_kevent - cleans up and ultimately frees the given kevent
317 * Caller must hold dev->sem.
319 static void remove_kevent(struct inotify_device *dev,
320 struct inotify_kernel_event *kevent)
322 list_del(&kevent->list);
324 dev->event_count--;
325 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
327 kfree(kevent->name);
328 kmem_cache_free(event_cachep, kevent);
332 * inotify_dev_event_dequeue - destroy an event on the given device
334 * Caller must hold dev->sem.
336 static void inotify_dev_event_dequeue(struct inotify_device *dev)
338 if (!list_empty(&dev->events)) {
339 struct inotify_kernel_event *kevent;
340 kevent = inotify_dev_get_event(dev);
341 remove_kevent(dev, kevent);
346 * inotify_dev_get_wd - returns the next WD for use by the given dev
348 * Callers must hold dev->sem. This function can sleep.
350 static int inotify_dev_get_wd(struct inotify_device *dev,
351 struct inotify_watch *watch)
353 int ret;
355 do {
356 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
357 return -ENOSPC;
358 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
359 } while (ret == -EAGAIN);
361 return ret;
365 * find_inode - resolve a user-given path to a specific inode and return a nd
367 static int find_inode(const char __user *dirname, struct nameidata *nd)
369 int error;
371 error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
372 if (error)
373 return error;
374 /* you can only watch an inode if you have read permissions on it */
375 error = permission(nd->dentry->d_inode, MAY_READ, NULL);
376 if (error)
377 path_release(nd);
378 return error;
382 * create_watch - creates a watch on the given device.
384 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
385 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
387 static struct inotify_watch *create_watch(struct inotify_device *dev,
388 u32 mask, struct inode *inode)
390 struct inotify_watch *watch;
391 int ret;
393 if (atomic_read(&dev->user->inotify_watches) >=
394 inotify_max_user_watches)
395 return ERR_PTR(-ENOSPC);
397 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
398 if (unlikely(!watch))
399 return ERR_PTR(-ENOMEM);
401 ret = inotify_dev_get_wd(dev, watch);
402 if (unlikely(ret)) {
403 kmem_cache_free(watch_cachep, watch);
404 return ERR_PTR(ret);
407 dev->last_wd = watch->wd;
408 watch->mask = mask;
409 atomic_set(&watch->count, 0);
410 INIT_LIST_HEAD(&watch->d_list);
411 INIT_LIST_HEAD(&watch->i_list);
413 /* save a reference to device and bump the count to make it official */
414 get_inotify_dev(dev);
415 watch->dev = dev;
418 * Save a reference to the inode and bump the ref count to make it
419 * official. We hold a reference to nameidata, which makes this safe.
421 watch->inode = igrab(inode);
423 /* bump our own count, corresponding to our entry in dev->watches */
424 get_inotify_watch(watch);
426 atomic_inc(&dev->user->inotify_watches);
427 atomic_inc(&inotify_watches);
429 return watch;
433 * inotify_find_dev - find the watch associated with the given inode and dev
435 * Callers must hold inode->inotify_sem.
437 static struct inotify_watch *inode_find_dev(struct inode *inode,
438 struct inotify_device *dev)
440 struct inotify_watch *watch;
442 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
443 if (watch->dev == dev)
444 return watch;
447 return NULL;
451 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
453 static void remove_watch_no_event(struct inotify_watch *watch,
454 struct inotify_device *dev)
456 list_del(&watch->i_list);
457 list_del(&watch->d_list);
459 atomic_dec(&dev->user->inotify_watches);
460 atomic_dec(&inotify_watches);
461 idr_remove(&dev->idr, watch->wd);
462 put_inotify_watch(watch);
466 * remove_watch - Remove a watch from both the device and the inode. Sends
467 * the IN_IGNORED event to the given device signifying that the inode is no
468 * longer watched.
470 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
471 * reference to the inode before returning.
473 * The inode is not iput() so as to remain atomic. If the inode needs to be
474 * iput(), the call returns one. Otherwise, it returns zero.
476 static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
478 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
479 remove_watch_no_event(watch, dev);
483 * inotify_inode_watched - returns nonzero if there are watches on this inode
484 * and zero otherwise. We call this lockless, we do not care if we race.
486 static inline int inotify_inode_watched(struct inode *inode)
488 return !list_empty(&inode->inotify_watches);
491 /* Kernel API */
494 * inotify_inode_queue_event - queue an event to all watches on this inode
495 * @inode: inode event is originating from
496 * @mask: event mask describing this event
497 * @cookie: cookie for synchronization, or zero
498 * @name: filename, if any
500 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
501 const char *name)
503 struct inotify_watch *watch, *next;
505 if (!inotify_inode_watched(inode))
506 return;
508 down(&inode->inotify_sem);
509 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
510 u32 watch_mask = watch->mask;
511 if (watch_mask & mask) {
512 struct inotify_device *dev = watch->dev;
513 get_inotify_watch(watch);
514 down(&dev->sem);
515 inotify_dev_queue_event(dev, watch, mask, cookie, name);
516 if (watch_mask & IN_ONESHOT)
517 remove_watch_no_event(watch, dev);
518 up(&dev->sem);
519 put_inotify_watch(watch);
522 up(&inode->inotify_sem);
524 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
527 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
528 * @dentry: the dentry in question, we queue against this dentry's parent
529 * @mask: event mask describing this event
530 * @cookie: cookie for synchronization, or zero
531 * @name: filename, if any
533 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
534 u32 cookie, const char *name)
536 struct dentry *parent;
537 struct inode *inode;
539 if (!atomic_read (&inotify_watches))
540 return;
542 spin_lock(&dentry->d_lock);
543 parent = dentry->d_parent;
544 inode = parent->d_inode;
546 if (inotify_inode_watched(inode)) {
547 dget(parent);
548 spin_unlock(&dentry->d_lock);
549 inotify_inode_queue_event(inode, mask, cookie, name);
550 dput(parent);
551 } else
552 spin_unlock(&dentry->d_lock);
554 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
557 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
559 u32 inotify_get_cookie(void)
561 return atomic_inc_return(&inotify_cookie);
563 EXPORT_SYMBOL_GPL(inotify_get_cookie);
566 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
567 * @list: list of inodes being unmounted (sb->s_inodes)
569 * Called with inode_lock held, protecting the unmounting super block's list
570 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
571 * We temporarily drop inode_lock, however, and CAN block.
573 void inotify_unmount_inodes(struct list_head *list)
575 struct inode *inode, *next_i, *need_iput = NULL;
577 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
578 struct inotify_watch *watch, *next_w;
579 struct inode *need_iput_tmp;
580 struct list_head *watches;
583 * If i_count is zero, the inode cannot have any watches and
584 * doing an __iget/iput with MS_ACTIVE clear would actually
585 * evict all inodes with zero i_count from icache which is
586 * unnecessarily violent and may in fact be illegal to do.
588 if (!atomic_read(&inode->i_count))
589 continue;
592 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
593 * I_WILL_FREE which is fine because by that point the inode
594 * cannot have any associated watches.
596 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
597 continue;
599 need_iput_tmp = need_iput;
600 need_iput = NULL;
601 /* In case the remove_watch() drops a reference. */
602 if (inode != need_iput_tmp)
603 __iget(inode);
604 else
605 need_iput_tmp = NULL;
606 /* In case the dropping of a reference would nuke next_i. */
607 if ((&next_i->i_sb_list != list) &&
608 atomic_read(&next_i->i_count) &&
609 !(next_i->i_state & (I_CLEAR | I_FREEING |
610 I_WILL_FREE))) {
611 __iget(next_i);
612 need_iput = next_i;
616 * We can safely drop inode_lock here because we hold
617 * references on both inode and next_i. Also no new inodes
618 * will be added since the umount has begun. Finally,
619 * iprune_sem keeps shrink_icache_memory() away.
621 spin_unlock(&inode_lock);
623 if (need_iput_tmp)
624 iput(need_iput_tmp);
626 /* for each watch, send IN_UNMOUNT and then remove it */
627 down(&inode->inotify_sem);
628 watches = &inode->inotify_watches;
629 list_for_each_entry_safe(watch, next_w, watches, i_list) {
630 struct inotify_device *dev = watch->dev;
631 down(&dev->sem);
632 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
633 remove_watch(watch, dev);
634 up(&dev->sem);
636 up(&inode->inotify_sem);
637 iput(inode);
639 spin_lock(&inode_lock);
642 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
645 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
646 * @inode: inode that is about to be removed
648 void inotify_inode_is_dead(struct inode *inode)
650 struct inotify_watch *watch, *next;
652 down(&inode->inotify_sem);
653 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
654 struct inotify_device *dev = watch->dev;
655 down(&dev->sem);
656 remove_watch(watch, dev);
657 up(&dev->sem);
659 up(&inode->inotify_sem);
661 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
663 /* Device Interface */
665 static unsigned int inotify_poll(struct file *file, poll_table *wait)
667 struct inotify_device *dev = file->private_data;
668 int ret = 0;
670 poll_wait(file, &dev->wq, wait);
671 down(&dev->sem);
672 if (!list_empty(&dev->events))
673 ret = POLLIN | POLLRDNORM;
674 up(&dev->sem);
676 return ret;
679 static ssize_t inotify_read(struct file *file, char __user *buf,
680 size_t count, loff_t *pos)
682 size_t event_size = sizeof (struct inotify_event);
683 struct inotify_device *dev;
684 char __user *start;
685 int ret;
686 DEFINE_WAIT(wait);
688 start = buf;
689 dev = file->private_data;
691 while (1) {
692 int events;
694 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
696 down(&dev->sem);
697 events = !list_empty(&dev->events);
698 up(&dev->sem);
699 if (events) {
700 ret = 0;
701 break;
704 if (file->f_flags & O_NONBLOCK) {
705 ret = -EAGAIN;
706 break;
709 if (signal_pending(current)) {
710 ret = -EINTR;
711 break;
714 schedule();
717 finish_wait(&dev->wq, &wait);
718 if (ret)
719 return ret;
721 down(&dev->sem);
722 while (1) {
723 struct inotify_kernel_event *kevent;
725 ret = buf - start;
726 if (list_empty(&dev->events))
727 break;
729 kevent = inotify_dev_get_event(dev);
730 if (event_size + kevent->event.len > count)
731 break;
733 if (copy_to_user(buf, &kevent->event, event_size)) {
734 ret = -EFAULT;
735 break;
737 buf += event_size;
738 count -= event_size;
740 if (kevent->name) {
741 if (copy_to_user(buf, kevent->name, kevent->event.len)){
742 ret = -EFAULT;
743 break;
745 buf += kevent->event.len;
746 count -= kevent->event.len;
749 remove_kevent(dev, kevent);
751 up(&dev->sem);
753 return ret;
756 static int inotify_release(struct inode *ignored, struct file *file)
758 struct inotify_device *dev = file->private_data;
761 * Destroy all of the watches on this device. Unfortunately, not very
762 * pretty. We cannot do a simple iteration over the list, because we
763 * do not know the inode until we iterate to the watch. But we need to
764 * hold inode->inotify_sem before dev->sem. The following works.
766 while (1) {
767 struct inotify_watch *watch;
768 struct list_head *watches;
769 struct inode *inode;
771 down(&dev->sem);
772 watches = &dev->watches;
773 if (list_empty(watches)) {
774 up(&dev->sem);
775 break;
777 watch = list_entry(watches->next, struct inotify_watch, d_list);
778 get_inotify_watch(watch);
779 up(&dev->sem);
781 inode = watch->inode;
782 down(&inode->inotify_sem);
783 down(&dev->sem);
784 remove_watch_no_event(watch, dev);
785 up(&dev->sem);
786 up(&inode->inotify_sem);
787 put_inotify_watch(watch);
790 /* destroy all of the events on this device */
791 down(&dev->sem);
792 while (!list_empty(&dev->events))
793 inotify_dev_event_dequeue(dev);
794 up(&dev->sem);
796 /* free this device: the put matching the get in inotify_init() */
797 put_inotify_dev(dev);
799 return 0;
803 * inotify_ignore - remove a given wd from this inotify instance.
805 * Can sleep.
807 static int inotify_ignore(struct inotify_device *dev, s32 wd)
809 struct inotify_watch *watch;
810 struct inode *inode;
812 down(&dev->sem);
813 watch = idr_find(&dev->idr, wd);
814 if (unlikely(!watch)) {
815 up(&dev->sem);
816 return -EINVAL;
818 get_inotify_watch(watch);
819 inode = watch->inode;
820 up(&dev->sem);
822 down(&inode->inotify_sem);
823 down(&dev->sem);
825 /* make sure that we did not race */
826 watch = idr_find(&dev->idr, wd);
827 if (likely(watch))
828 remove_watch(watch, dev);
830 up(&dev->sem);
831 up(&inode->inotify_sem);
832 put_inotify_watch(watch);
834 return 0;
837 static long inotify_ioctl(struct file *file, unsigned int cmd,
838 unsigned long arg)
840 struct inotify_device *dev;
841 void __user *p;
842 int ret = -ENOTTY;
844 dev = file->private_data;
845 p = (void __user *) arg;
847 switch (cmd) {
848 case FIONREAD:
849 ret = put_user(dev->queue_size, (int __user *) p);
850 break;
853 return ret;
856 static struct file_operations inotify_fops = {
857 .poll = inotify_poll,
858 .read = inotify_read,
859 .release = inotify_release,
860 .unlocked_ioctl = inotify_ioctl,
861 .compat_ioctl = inotify_ioctl,
864 asmlinkage long sys_inotify_init(void)
866 struct inotify_device *dev;
867 struct user_struct *user;
868 struct file *filp;
869 int fd, ret;
871 fd = get_unused_fd();
872 if (fd < 0)
873 return fd;
875 filp = get_empty_filp();
876 if (!filp) {
877 ret = -ENFILE;
878 goto out_put_fd;
881 user = get_uid(current->user);
882 if (unlikely(atomic_read(&user->inotify_devs) >=
883 inotify_max_user_instances)) {
884 ret = -EMFILE;
885 goto out_free_uid;
888 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
889 if (unlikely(!dev)) {
890 ret = -ENOMEM;
891 goto out_free_uid;
894 filp->f_op = &inotify_fops;
895 filp->f_vfsmnt = mntget(inotify_mnt);
896 filp->f_dentry = dget(inotify_mnt->mnt_root);
897 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
898 filp->f_mode = FMODE_READ;
899 filp->f_flags = O_RDONLY;
900 filp->private_data = dev;
902 idr_init(&dev->idr);
903 INIT_LIST_HEAD(&dev->events);
904 INIT_LIST_HEAD(&dev->watches);
905 init_waitqueue_head(&dev->wq);
906 sema_init(&dev->sem, 1);
907 dev->event_count = 0;
908 dev->queue_size = 0;
909 dev->max_events = inotify_max_queued_events;
910 dev->user = user;
911 dev->last_wd = 0;
912 atomic_set(&dev->count, 0);
914 get_inotify_dev(dev);
915 atomic_inc(&user->inotify_devs);
916 fd_install(fd, filp);
918 return fd;
919 out_free_uid:
920 free_uid(user);
921 put_filp(filp);
922 out_put_fd:
923 put_unused_fd(fd);
924 return ret;
927 asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
929 struct inotify_watch *watch, *old;
930 struct inode *inode;
931 struct inotify_device *dev;
932 struct nameidata nd;
933 struct file *filp;
934 int ret, fput_needed;
935 int mask_add = 0;
937 filp = fget_light(fd, &fput_needed);
938 if (unlikely(!filp))
939 return -EBADF;
941 /* verify that this is indeed an inotify instance */
942 if (unlikely(filp->f_op != &inotify_fops)) {
943 ret = -EINVAL;
944 goto fput_and_out;
947 ret = find_inode(path, &nd);
948 if (unlikely(ret))
949 goto fput_and_out;
951 /* inode held in place by reference to nd; dev by fget on fd */
952 inode = nd.dentry->d_inode;
953 dev = filp->private_data;
955 down(&inode->inotify_sem);
956 down(&dev->sem);
958 if (mask & IN_MASK_ADD)
959 mask_add = 1;
961 /* don't let user-space set invalid bits: we don't want flags set */
962 mask &= IN_ALL_EVENTS;
963 if (unlikely(!mask)) {
964 ret = -EINVAL;
965 goto out;
969 * Handle the case of re-adding a watch on an (inode,dev) pair that we
970 * are already watching. We just update the mask and return its wd.
972 old = inode_find_dev(inode, dev);
973 if (unlikely(old)) {
974 if (mask_add)
975 old->mask |= mask;
976 else
977 old->mask = mask;
978 ret = old->wd;
979 goto out;
982 watch = create_watch(dev, mask, inode);
983 if (unlikely(IS_ERR(watch))) {
984 ret = PTR_ERR(watch);
985 goto out;
988 /* Add the watch to the device's and the inode's list */
989 list_add(&watch->d_list, &dev->watches);
990 list_add(&watch->i_list, &inode->inotify_watches);
991 ret = watch->wd;
992 out:
993 up(&dev->sem);
994 up(&inode->inotify_sem);
995 path_release(&nd);
996 fput_and_out:
997 fput_light(filp, fput_needed);
998 return ret;
1001 asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
1003 struct file *filp;
1004 struct inotify_device *dev;
1005 int ret, fput_needed;
1007 filp = fget_light(fd, &fput_needed);
1008 if (unlikely(!filp))
1009 return -EBADF;
1011 /* verify that this is indeed an inotify instance */
1012 if (unlikely(filp->f_op != &inotify_fops)) {
1013 ret = -EINVAL;
1014 goto out;
1017 dev = filp->private_data;
1018 ret = inotify_ignore(dev, wd);
1020 out:
1021 fput_light(filp, fput_needed);
1022 return ret;
1025 static struct super_block *
1026 inotify_get_sb(struct file_system_type *fs_type, int flags,
1027 const char *dev_name, void *data)
1029 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1032 static struct file_system_type inotify_fs_type = {
1033 .name = "inotifyfs",
1034 .get_sb = inotify_get_sb,
1035 .kill_sb = kill_anon_super,
1039 * inotify_setup - Our initialization function. Note that we cannnot return
1040 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1041 * must result in panic().
1043 static int __init inotify_setup(void)
1045 int ret;
1047 ret = register_filesystem(&inotify_fs_type);
1048 if (unlikely(ret))
1049 panic("inotify: register_filesystem returned %d!\n", ret);
1051 inotify_mnt = kern_mount(&inotify_fs_type);
1052 if (IS_ERR(inotify_mnt))
1053 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
1055 inotify_max_queued_events = 16384;
1056 inotify_max_user_instances = 128;
1057 inotify_max_user_watches = 8192;
1059 atomic_set(&inotify_cookie, 0);
1060 atomic_set(&inotify_watches, 0);
1062 watch_cachep = kmem_cache_create("inotify_watch_cache",
1063 sizeof(struct inotify_watch),
1064 0, SLAB_PANIC, NULL, NULL);
1065 event_cachep = kmem_cache_create("inotify_event_cache",
1066 sizeof(struct inotify_kernel_event),
1067 0, SLAB_PANIC, NULL, NULL);
1069 return 0;
1072 module_init(inotify_setup);