Revert "fsnotify: store struct file not struct path"
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / notify / fanotify / fanotify_user.c
blob032b837fcd11912e54a01920ddf40b9506f1c8f9
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
17 #include <asm/ioctls.h>
19 extern const struct fsnotify_ops fanotify_fsnotify_ops;
21 static struct kmem_cache *fanotify_mark_cache __read_mostly;
22 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
24 struct fanotify_response_event {
25 struct list_head list;
26 __s32 fd;
27 struct fsnotify_event *event;
31 * Get an fsnotify notification event if one exists and is small
32 * enough to fit in "count". Return an error pointer if the count
33 * is not large enough.
35 * Called with the group->notification_mutex held.
37 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
38 size_t count)
40 BUG_ON(!mutex_is_locked(&group->notification_mutex));
42 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
44 if (fsnotify_notify_queue_is_empty(group))
45 return NULL;
47 if (FAN_EVENT_METADATA_LEN > count)
48 return ERR_PTR(-EINVAL);
50 /* held the notification_mutex the whole time, so this is the
51 * same event we peeked above */
52 return fsnotify_remove_notify_event(group);
55 static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
57 int client_fd;
58 struct dentry *dentry;
59 struct vfsmount *mnt;
60 struct file *new_file;
62 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
64 client_fd = get_unused_fd();
65 if (client_fd < 0)
66 return client_fd;
68 if (event->data_type != FSNOTIFY_EVENT_PATH) {
69 WARN_ON(1);
70 put_unused_fd(client_fd);
71 return -EINVAL;
75 * we need a new file handle for the userspace program so it can read even if it was
76 * originally opened O_WRONLY.
78 dentry = dget(event->path.dentry);
79 mnt = mntget(event->path.mnt);
80 /* it's possible this event was an overflow event. in that case dentry and mnt
81 * are NULL; That's fine, just don't call dentry open */
82 if (dentry && mnt)
83 new_file = dentry_open(dentry, mnt,
84 group->fanotify_data.f_flags | FMODE_NONOTIFY,
85 current_cred());
86 else
87 new_file = ERR_PTR(-EOVERFLOW);
88 if (IS_ERR(new_file)) {
90 * we still send an event even if we can't open the file. this
91 * can happen when say tasks are gone and we try to open their
92 * /proc files or we try to open a WRONLY file like in sysfs
93 * we just send the errno to userspace since there isn't much
94 * else we can do.
96 put_unused_fd(client_fd);
97 client_fd = PTR_ERR(new_file);
98 } else {
99 fd_install(client_fd, new_file);
102 return client_fd;
105 static ssize_t fill_event_metadata(struct fsnotify_group *group,
106 struct fanotify_event_metadata *metadata,
107 struct fsnotify_event *event)
109 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
110 group, metadata, event);
112 metadata->event_len = FAN_EVENT_METADATA_LEN;
113 metadata->vers = FANOTIFY_METADATA_VERSION;
114 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
115 metadata->pid = pid_vnr(event->tgid);
116 metadata->fd = create_fd(group, event);
118 return metadata->fd;
121 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
122 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
123 __s32 fd)
125 struct fanotify_response_event *re, *return_re = NULL;
127 mutex_lock(&group->fanotify_data.access_mutex);
128 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
129 if (re->fd != fd)
130 continue;
132 list_del_init(&re->list);
133 return_re = re;
134 break;
136 mutex_unlock(&group->fanotify_data.access_mutex);
138 pr_debug("%s: found return_re=%p\n", __func__, return_re);
140 return return_re;
143 static int process_access_response(struct fsnotify_group *group,
144 struct fanotify_response *response_struct)
146 struct fanotify_response_event *re;
147 __s32 fd = response_struct->fd;
148 __u32 response = response_struct->response;
150 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
151 fd, response);
153 * make sure the response is valid, if invalid we do nothing and either
154 * userspace can send a valid responce or we will clean it up after the
155 * timeout
157 switch (response) {
158 case FAN_ALLOW:
159 case FAN_DENY:
160 break;
161 default:
162 return -EINVAL;
165 if (fd < 0)
166 return -EINVAL;
168 re = dequeue_re(group, fd);
169 if (!re)
170 return -ENOENT;
172 re->event->response = response;
174 wake_up(&group->fanotify_data.access_waitq);
176 kmem_cache_free(fanotify_response_event_cache, re);
178 return 0;
181 static int prepare_for_access_response(struct fsnotify_group *group,
182 struct fsnotify_event *event,
183 __s32 fd)
185 struct fanotify_response_event *re;
187 if (!(event->mask & FAN_ALL_PERM_EVENTS))
188 return 0;
190 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
191 if (!re)
192 return -ENOMEM;
194 re->event = event;
195 re->fd = fd;
197 mutex_lock(&group->fanotify_data.access_mutex);
198 list_add_tail(&re->list, &group->fanotify_data.access_list);
199 mutex_unlock(&group->fanotify_data.access_mutex);
201 return 0;
204 static void remove_access_response(struct fsnotify_group *group,
205 struct fsnotify_event *event,
206 __s32 fd)
208 struct fanotify_response_event *re;
210 if (!(event->mask & FAN_ALL_PERM_EVENTS))
211 return;
213 re = dequeue_re(group, fd);
214 if (!re)
215 return;
217 BUG_ON(re->event != event);
219 kmem_cache_free(fanotify_response_event_cache, re);
221 return;
223 #else
224 static int prepare_for_access_response(struct fsnotify_group *group,
225 struct fsnotify_event *event,
226 __s32 fd)
228 return 0;
231 static void remove_access_response(struct fsnotify_group *group,
232 struct fsnotify_event *event,
233 __s32 fd)
235 return;
237 #endif
239 static ssize_t copy_event_to_user(struct fsnotify_group *group,
240 struct fsnotify_event *event,
241 char __user *buf)
243 struct fanotify_event_metadata fanotify_event_metadata;
244 int fd, ret;
246 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
248 fd = fill_event_metadata(group, &fanotify_event_metadata, event);
249 if (fd < 0)
250 return fd;
252 ret = prepare_for_access_response(group, event, fd);
253 if (ret)
254 goto out_close_fd;
256 ret = -EFAULT;
257 if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
258 goto out_kill_access_response;
260 return FAN_EVENT_METADATA_LEN;
262 out_kill_access_response:
263 remove_access_response(group, event, fd);
264 out_close_fd:
265 sys_close(fd);
266 return ret;
269 /* intofiy userspace file descriptor functions */
270 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
272 struct fsnotify_group *group = file->private_data;
273 int ret = 0;
275 poll_wait(file, &group->notification_waitq, wait);
276 mutex_lock(&group->notification_mutex);
277 if (!fsnotify_notify_queue_is_empty(group))
278 ret = POLLIN | POLLRDNORM;
279 mutex_unlock(&group->notification_mutex);
281 return ret;
284 static ssize_t fanotify_read(struct file *file, char __user *buf,
285 size_t count, loff_t *pos)
287 struct fsnotify_group *group;
288 struct fsnotify_event *kevent;
289 char __user *start;
290 int ret;
291 DEFINE_WAIT(wait);
293 start = buf;
294 group = file->private_data;
296 pr_debug("%s: group=%p\n", __func__, group);
298 while (1) {
299 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
301 mutex_lock(&group->notification_mutex);
302 kevent = get_one_event(group, count);
303 mutex_unlock(&group->notification_mutex);
305 if (kevent) {
306 ret = PTR_ERR(kevent);
307 if (IS_ERR(kevent))
308 break;
309 ret = copy_event_to_user(group, kevent, buf);
310 fsnotify_put_event(kevent);
311 if (ret < 0)
312 break;
313 buf += ret;
314 count -= ret;
315 continue;
318 ret = -EAGAIN;
319 if (file->f_flags & O_NONBLOCK)
320 break;
321 ret = -EINTR;
322 if (signal_pending(current))
323 break;
325 if (start != buf)
326 break;
328 schedule();
331 finish_wait(&group->notification_waitq, &wait);
332 if (start != buf && ret != -EFAULT)
333 ret = buf - start;
334 return ret;
337 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
339 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
340 struct fanotify_response response = { .fd = -1, .response = -1 };
341 struct fsnotify_group *group;
342 int ret;
344 group = file->private_data;
346 if (count > sizeof(response))
347 count = sizeof(response);
349 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
351 if (copy_from_user(&response, buf, count))
352 return -EFAULT;
354 ret = process_access_response(group, &response);
355 if (ret < 0)
356 count = ret;
358 return count;
359 #else
360 return -EINVAL;
361 #endif
364 static int fanotify_release(struct inode *ignored, struct file *file)
366 struct fsnotify_group *group = file->private_data;
368 pr_debug("%s: file=%p group=%p\n", __func__, file, group);
370 /* matches the fanotify_init->fsnotify_alloc_group */
371 fsnotify_put_group(group);
373 return 0;
376 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
378 struct fsnotify_group *group;
379 struct fsnotify_event_holder *holder;
380 void __user *p;
381 int ret = -ENOTTY;
382 size_t send_len = 0;
384 group = file->private_data;
386 p = (void __user *) arg;
388 switch (cmd) {
389 case FIONREAD:
390 mutex_lock(&group->notification_mutex);
391 list_for_each_entry(holder, &group->notification_list, event_list)
392 send_len += FAN_EVENT_METADATA_LEN;
393 mutex_unlock(&group->notification_mutex);
394 ret = put_user(send_len, (int __user *) p);
395 break;
398 return ret;
401 static const struct file_operations fanotify_fops = {
402 .poll = fanotify_poll,
403 .read = fanotify_read,
404 .write = fanotify_write,
405 .fasync = NULL,
406 .release = fanotify_release,
407 .unlocked_ioctl = fanotify_ioctl,
408 .compat_ioctl = fanotify_ioctl,
411 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
413 kmem_cache_free(fanotify_mark_cache, fsn_mark);
416 static int fanotify_find_path(int dfd, const char __user *filename,
417 struct path *path, unsigned int flags)
419 int ret;
421 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
422 dfd, filename, flags);
424 if (filename == NULL) {
425 struct file *file;
426 int fput_needed;
428 ret = -EBADF;
429 file = fget_light(dfd, &fput_needed);
430 if (!file)
431 goto out;
433 ret = -ENOTDIR;
434 if ((flags & FAN_MARK_ONLYDIR) &&
435 !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
436 fput_light(file, fput_needed);
437 goto out;
440 *path = file->f_path;
441 path_get(path);
442 fput_light(file, fput_needed);
443 } else {
444 unsigned int lookup_flags = 0;
446 if (!(flags & FAN_MARK_DONT_FOLLOW))
447 lookup_flags |= LOOKUP_FOLLOW;
448 if (flags & FAN_MARK_ONLYDIR)
449 lookup_flags |= LOOKUP_DIRECTORY;
451 ret = user_path_at(dfd, filename, lookup_flags, path);
452 if (ret)
453 goto out;
456 /* you can only watch an inode if you have read permissions on it */
457 ret = inode_permission(path->dentry->d_inode, MAY_READ);
458 if (ret)
459 path_put(path);
460 out:
461 return ret;
464 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
465 __u32 mask,
466 unsigned int flags)
468 __u32 oldmask;
470 spin_lock(&fsn_mark->lock);
471 if (!(flags & FAN_MARK_IGNORED_MASK)) {
472 oldmask = fsn_mark->mask;
473 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
474 } else {
475 oldmask = fsn_mark->ignored_mask;
476 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
478 spin_unlock(&fsn_mark->lock);
480 if (!(oldmask & ~mask))
481 fsnotify_destroy_mark(fsn_mark);
483 return mask & oldmask;
486 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
487 struct vfsmount *mnt, __u32 mask,
488 unsigned int flags)
490 struct fsnotify_mark *fsn_mark = NULL;
491 __u32 removed;
493 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
494 if (!fsn_mark)
495 return -ENOENT;
497 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
498 fsnotify_put_mark(fsn_mark);
499 if (removed & mnt->mnt_fsnotify_mask)
500 fsnotify_recalc_vfsmount_mask(mnt);
502 return 0;
505 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
506 struct inode *inode, __u32 mask,
507 unsigned int flags)
509 struct fsnotify_mark *fsn_mark = NULL;
510 __u32 removed;
512 fsn_mark = fsnotify_find_inode_mark(group, inode);
513 if (!fsn_mark)
514 return -ENOENT;
516 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
517 /* matches the fsnotify_find_inode_mark() */
518 fsnotify_put_mark(fsn_mark);
519 if (removed & inode->i_fsnotify_mask)
520 fsnotify_recalc_inode_mask(inode);
522 return 0;
525 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
526 __u32 mask,
527 unsigned int flags)
529 __u32 oldmask;
531 spin_lock(&fsn_mark->lock);
532 if (!(flags & FAN_MARK_IGNORED_MASK)) {
533 oldmask = fsn_mark->mask;
534 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
535 } else {
536 oldmask = fsn_mark->ignored_mask;
537 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
538 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
539 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
541 spin_unlock(&fsn_mark->lock);
543 return mask & ~oldmask;
546 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
547 struct vfsmount *mnt, __u32 mask,
548 unsigned int flags)
550 struct fsnotify_mark *fsn_mark;
551 __u32 added;
553 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
554 if (!fsn_mark) {
555 int ret;
557 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
558 if (!fsn_mark)
559 return -ENOMEM;
561 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
562 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
563 if (ret) {
564 fanotify_free_mark(fsn_mark);
565 return ret;
568 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
569 fsnotify_put_mark(fsn_mark);
570 if (added & ~mnt->mnt_fsnotify_mask)
571 fsnotify_recalc_vfsmount_mask(mnt);
573 return 0;
576 static int fanotify_add_inode_mark(struct fsnotify_group *group,
577 struct inode *inode, __u32 mask,
578 unsigned int flags)
580 struct fsnotify_mark *fsn_mark;
581 __u32 added;
583 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
585 fsn_mark = fsnotify_find_inode_mark(group, inode);
586 if (!fsn_mark) {
587 int ret;
589 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
590 if (!fsn_mark)
591 return -ENOMEM;
593 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
594 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
595 if (ret) {
596 fanotify_free_mark(fsn_mark);
597 return ret;
600 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
601 fsnotify_put_mark(fsn_mark);
602 if (added & ~inode->i_fsnotify_mask)
603 fsnotify_recalc_inode_mask(inode);
604 return 0;
607 /* fanotify syscalls */
608 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
610 struct fsnotify_group *group;
611 int f_flags, fd;
613 pr_debug("%s: flags=%d event_f_flags=%d\n",
614 __func__, flags, event_f_flags);
616 if (!capable(CAP_SYS_ADMIN))
617 return -EACCES;
619 if (flags & ~FAN_ALL_INIT_FLAGS)
620 return -EINVAL;
622 f_flags = O_RDWR | FMODE_NONOTIFY;
623 if (flags & FAN_CLOEXEC)
624 f_flags |= O_CLOEXEC;
625 if (flags & FAN_NONBLOCK)
626 f_flags |= O_NONBLOCK;
628 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
629 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
630 if (IS_ERR(group))
631 return PTR_ERR(group);
633 group->fanotify_data.f_flags = event_f_flags;
634 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
635 mutex_init(&group->fanotify_data.access_mutex);
636 init_waitqueue_head(&group->fanotify_data.access_waitq);
637 INIT_LIST_HEAD(&group->fanotify_data.access_list);
638 #endif
640 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
641 if (fd < 0)
642 goto out_put_group;
644 return fd;
646 out_put_group:
647 fsnotify_put_group(group);
648 return fd;
651 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
652 __u64 mask, int dfd,
653 const char __user * pathname)
655 struct inode *inode = NULL;
656 struct vfsmount *mnt = NULL;
657 struct fsnotify_group *group;
658 struct file *filp;
659 struct path path;
660 int ret, fput_needed;
662 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
663 __func__, fanotify_fd, flags, dfd, pathname, mask);
665 /* we only use the lower 32 bits as of right now. */
666 if (mask & ((__u64)0xffffffff << 32))
667 return -EINVAL;
669 if (flags & ~FAN_ALL_MARK_FLAGS)
670 return -EINVAL;
671 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
672 case FAN_MARK_ADD:
673 case FAN_MARK_REMOVE:
674 case FAN_MARK_FLUSH:
675 break;
676 default:
677 return -EINVAL;
679 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
680 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
681 #else
682 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
683 #endif
684 return -EINVAL;
686 filp = fget_light(fanotify_fd, &fput_needed);
687 if (unlikely(!filp))
688 return -EBADF;
690 /* verify that this is indeed an fanotify instance */
691 ret = -EINVAL;
692 if (unlikely(filp->f_op != &fanotify_fops))
693 goto fput_and_out;
695 ret = fanotify_find_path(dfd, pathname, &path, flags);
696 if (ret)
697 goto fput_and_out;
699 /* inode held in place by reference to path; group by fget on fd */
700 if (!(flags & FAN_MARK_MOUNT))
701 inode = path.dentry->d_inode;
702 else
703 mnt = path.mnt;
704 group = filp->private_data;
706 /* create/update an inode mark */
707 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
708 case FAN_MARK_ADD:
709 if (flags & FAN_MARK_MOUNT)
710 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
711 else
712 ret = fanotify_add_inode_mark(group, inode, mask, flags);
713 break;
714 case FAN_MARK_REMOVE:
715 if (flags & FAN_MARK_MOUNT)
716 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
717 else
718 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
719 break;
720 case FAN_MARK_FLUSH:
721 if (flags & FAN_MARK_MOUNT)
722 fsnotify_clear_vfsmount_marks_by_group(group);
723 else
724 fsnotify_clear_inode_marks_by_group(group);
725 break;
726 default:
727 ret = -EINVAL;
730 path_put(&path);
731 fput_and_out:
732 fput_light(filp, fput_needed);
733 return ret;
736 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
737 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
738 long dfd, long pathname)
740 return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
741 mask, (int) dfd,
742 (const char __user *) pathname);
744 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
745 #endif
748 * fanotify_user_setup - Our initialization function. Note that we cannnot return
749 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
750 * must result in panic().
752 static int __init fanotify_user_setup(void)
754 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
755 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
756 SLAB_PANIC);
758 return 0;
760 device_initcall(fanotify_user_setup);