fanotify: sanitize failure exits in copy_event_to_user()
[linux-2.6/libata-dev.git] / fs / notify / fanotify / fanotify_user.c
blobea48693940f1b7eabe3ffd992b2df253f20e9506
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
17 #include <asm/ioctls.h>
19 #include "../../mount.h"
21 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
22 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
23 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
25 extern const struct fsnotify_ops fanotify_fsnotify_ops;
27 static struct kmem_cache *fanotify_mark_cache __read_mostly;
28 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
30 struct fanotify_response_event {
31 struct list_head list;
32 __s32 fd;
33 struct fsnotify_event *event;
37 * Get an fsnotify notification event if one exists and is small
38 * enough to fit in "count". Return an error pointer if the count
39 * is not large enough.
41 * Called with the group->notification_mutex held.
43 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
44 size_t count)
46 BUG_ON(!mutex_is_locked(&group->notification_mutex));
48 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
50 if (fsnotify_notify_queue_is_empty(group))
51 return NULL;
53 if (FAN_EVENT_METADATA_LEN > count)
54 return ERR_PTR(-EINVAL);
56 /* held the notification_mutex the whole time, so this is the
57 * same event we peeked above */
58 return fsnotify_remove_notify_event(group);
61 static int create_fd(struct fsnotify_group *group,
62 struct fsnotify_event *event,
63 struct file **file)
65 int client_fd;
66 struct file *new_file;
68 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
70 client_fd = get_unused_fd();
71 if (client_fd < 0)
72 return client_fd;
74 if (event->data_type != FSNOTIFY_EVENT_PATH) {
75 WARN_ON(1);
76 put_unused_fd(client_fd);
77 return -EINVAL;
81 * we need a new file handle for the userspace program so it can read even if it was
82 * originally opened O_WRONLY.
84 /* it's possible this event was an overflow event. in that case dentry and mnt
85 * are NULL; That's fine, just don't call dentry open */
86 if (event->path.dentry && event->path.mnt)
87 new_file = dentry_open(&event->path,
88 group->fanotify_data.f_flags | FMODE_NONOTIFY,
89 current_cred());
90 else
91 new_file = ERR_PTR(-EOVERFLOW);
92 if (IS_ERR(new_file)) {
94 * we still send an event even if we can't open the file. this
95 * can happen when say tasks are gone and we try to open their
96 * /proc files or we try to open a WRONLY file like in sysfs
97 * we just send the errno to userspace since there isn't much
98 * else we can do.
100 put_unused_fd(client_fd);
101 client_fd = PTR_ERR(new_file);
102 } else {
103 *file = new_file;
106 return client_fd;
109 static int fill_event_metadata(struct fsnotify_group *group,
110 struct fanotify_event_metadata *metadata,
111 struct fsnotify_event *event,
112 struct file **file)
114 int ret = 0;
116 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
117 group, metadata, event);
119 *file = NULL;
120 metadata->event_len = FAN_EVENT_METADATA_LEN;
121 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
122 metadata->vers = FANOTIFY_METADATA_VERSION;
123 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
124 metadata->pid = pid_vnr(event->tgid);
125 if (unlikely(event->mask & FAN_Q_OVERFLOW))
126 metadata->fd = FAN_NOFD;
127 else {
128 metadata->fd = create_fd(group, event, file);
129 if (metadata->fd < 0)
130 ret = metadata->fd;
133 return ret;
136 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
137 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
138 __s32 fd)
140 struct fanotify_response_event *re, *return_re = NULL;
142 mutex_lock(&group->fanotify_data.access_mutex);
143 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
144 if (re->fd != fd)
145 continue;
147 list_del_init(&re->list);
148 return_re = re;
149 break;
151 mutex_unlock(&group->fanotify_data.access_mutex);
153 pr_debug("%s: found return_re=%p\n", __func__, return_re);
155 return return_re;
158 static int process_access_response(struct fsnotify_group *group,
159 struct fanotify_response *response_struct)
161 struct fanotify_response_event *re;
162 __s32 fd = response_struct->fd;
163 __u32 response = response_struct->response;
165 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
166 fd, response);
168 * make sure the response is valid, if invalid we do nothing and either
169 * userspace can send a valid response or we will clean it up after the
170 * timeout
172 switch (response) {
173 case FAN_ALLOW:
174 case FAN_DENY:
175 break;
176 default:
177 return -EINVAL;
180 if (fd < 0)
181 return -EINVAL;
183 re = dequeue_re(group, fd);
184 if (!re)
185 return -ENOENT;
187 re->event->response = response;
189 wake_up(&group->fanotify_data.access_waitq);
191 kmem_cache_free(fanotify_response_event_cache, re);
193 return 0;
196 static int prepare_for_access_response(struct fsnotify_group *group,
197 struct fsnotify_event *event,
198 __s32 fd)
200 struct fanotify_response_event *re;
202 if (!(event->mask & FAN_ALL_PERM_EVENTS))
203 return 0;
205 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
206 if (!re)
207 return -ENOMEM;
209 re->event = event;
210 re->fd = fd;
212 mutex_lock(&group->fanotify_data.access_mutex);
214 if (atomic_read(&group->fanotify_data.bypass_perm)) {
215 mutex_unlock(&group->fanotify_data.access_mutex);
216 kmem_cache_free(fanotify_response_event_cache, re);
217 event->response = FAN_ALLOW;
218 return 0;
221 list_add_tail(&re->list, &group->fanotify_data.access_list);
222 mutex_unlock(&group->fanotify_data.access_mutex);
224 return 0;
227 #else
228 static int prepare_for_access_response(struct fsnotify_group *group,
229 struct fsnotify_event *event,
230 __s32 fd)
232 return 0;
235 #endif
237 static ssize_t copy_event_to_user(struct fsnotify_group *group,
238 struct fsnotify_event *event,
239 char __user *buf)
241 struct fanotify_event_metadata fanotify_event_metadata;
242 struct file *f;
243 int fd, ret;
245 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
247 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
248 if (ret < 0)
249 goto out;
251 fd = fanotify_event_metadata.fd;
252 ret = -EFAULT;
253 if (copy_to_user(buf, &fanotify_event_metadata,
254 fanotify_event_metadata.event_len))
255 goto out_close_fd;
257 ret = prepare_for_access_response(group, event, fd);
258 if (ret)
259 goto out_close_fd;
261 fd_install(fd, f);
262 return fanotify_event_metadata.event_len;
264 out_close_fd:
265 if (fd != FAN_NOFD) {
266 put_unused_fd(fd);
267 fput(f);
269 out:
270 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
271 if (event->mask & FAN_ALL_PERM_EVENTS) {
272 event->response = FAN_DENY;
273 wake_up(&group->fanotify_data.access_waitq);
275 #endif
276 return ret;
279 /* intofiy userspace file descriptor functions */
280 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
282 struct fsnotify_group *group = file->private_data;
283 int ret = 0;
285 poll_wait(file, &group->notification_waitq, wait);
286 mutex_lock(&group->notification_mutex);
287 if (!fsnotify_notify_queue_is_empty(group))
288 ret = POLLIN | POLLRDNORM;
289 mutex_unlock(&group->notification_mutex);
291 return ret;
294 static ssize_t fanotify_read(struct file *file, char __user *buf,
295 size_t count, loff_t *pos)
297 struct fsnotify_group *group;
298 struct fsnotify_event *kevent;
299 char __user *start;
300 int ret;
301 DEFINE_WAIT(wait);
303 start = buf;
304 group = file->private_data;
306 pr_debug("%s: group=%p\n", __func__, group);
308 while (1) {
309 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
311 mutex_lock(&group->notification_mutex);
312 kevent = get_one_event(group, count);
313 mutex_unlock(&group->notification_mutex);
315 if (kevent) {
316 ret = PTR_ERR(kevent);
317 if (IS_ERR(kevent))
318 break;
319 ret = copy_event_to_user(group, kevent, buf);
320 fsnotify_put_event(kevent);
321 if (ret < 0)
322 break;
323 buf += ret;
324 count -= ret;
325 continue;
328 ret = -EAGAIN;
329 if (file->f_flags & O_NONBLOCK)
330 break;
331 ret = -ERESTARTSYS;
332 if (signal_pending(current))
333 break;
335 if (start != buf)
336 break;
338 schedule();
341 finish_wait(&group->notification_waitq, &wait);
342 if (start != buf && ret != -EFAULT)
343 ret = buf - start;
344 return ret;
347 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
349 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
350 struct fanotify_response response = { .fd = -1, .response = -1 };
351 struct fsnotify_group *group;
352 int ret;
354 group = file->private_data;
356 if (count > sizeof(response))
357 count = sizeof(response);
359 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
361 if (copy_from_user(&response, buf, count))
362 return -EFAULT;
364 ret = process_access_response(group, &response);
365 if (ret < 0)
366 count = ret;
368 return count;
369 #else
370 return -EINVAL;
371 #endif
374 static int fanotify_release(struct inode *ignored, struct file *file)
376 struct fsnotify_group *group = file->private_data;
378 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
379 struct fanotify_response_event *re, *lre;
381 mutex_lock(&group->fanotify_data.access_mutex);
383 atomic_inc(&group->fanotify_data.bypass_perm);
385 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
386 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
387 re, re->event);
389 list_del_init(&re->list);
390 re->event->response = FAN_ALLOW;
392 kmem_cache_free(fanotify_response_event_cache, re);
394 mutex_unlock(&group->fanotify_data.access_mutex);
396 wake_up(&group->fanotify_data.access_waitq);
397 #endif
398 /* matches the fanotify_init->fsnotify_alloc_group */
399 fsnotify_put_group(group);
401 return 0;
404 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
406 struct fsnotify_group *group;
407 struct fsnotify_event_holder *holder;
408 void __user *p;
409 int ret = -ENOTTY;
410 size_t send_len = 0;
412 group = file->private_data;
414 p = (void __user *) arg;
416 switch (cmd) {
417 case FIONREAD:
418 mutex_lock(&group->notification_mutex);
419 list_for_each_entry(holder, &group->notification_list, event_list)
420 send_len += FAN_EVENT_METADATA_LEN;
421 mutex_unlock(&group->notification_mutex);
422 ret = put_user(send_len, (int __user *) p);
423 break;
426 return ret;
429 static const struct file_operations fanotify_fops = {
430 .poll = fanotify_poll,
431 .read = fanotify_read,
432 .write = fanotify_write,
433 .fasync = NULL,
434 .release = fanotify_release,
435 .unlocked_ioctl = fanotify_ioctl,
436 .compat_ioctl = fanotify_ioctl,
437 .llseek = noop_llseek,
440 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
442 kmem_cache_free(fanotify_mark_cache, fsn_mark);
445 static int fanotify_find_path(int dfd, const char __user *filename,
446 struct path *path, unsigned int flags)
448 int ret;
450 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
451 dfd, filename, flags);
453 if (filename == NULL) {
454 struct file *file;
455 int fput_needed;
457 ret = -EBADF;
458 file = fget_light(dfd, &fput_needed);
459 if (!file)
460 goto out;
462 ret = -ENOTDIR;
463 if ((flags & FAN_MARK_ONLYDIR) &&
464 !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
465 fput_light(file, fput_needed);
466 goto out;
469 *path = file->f_path;
470 path_get(path);
471 fput_light(file, fput_needed);
472 } else {
473 unsigned int lookup_flags = 0;
475 if (!(flags & FAN_MARK_DONT_FOLLOW))
476 lookup_flags |= LOOKUP_FOLLOW;
477 if (flags & FAN_MARK_ONLYDIR)
478 lookup_flags |= LOOKUP_DIRECTORY;
480 ret = user_path_at(dfd, filename, lookup_flags, path);
481 if (ret)
482 goto out;
485 /* you can only watch an inode if you have read permissions on it */
486 ret = inode_permission(path->dentry->d_inode, MAY_READ);
487 if (ret)
488 path_put(path);
489 out:
490 return ret;
493 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
494 __u32 mask,
495 unsigned int flags)
497 __u32 oldmask;
499 spin_lock(&fsn_mark->lock);
500 if (!(flags & FAN_MARK_IGNORED_MASK)) {
501 oldmask = fsn_mark->mask;
502 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
503 } else {
504 oldmask = fsn_mark->ignored_mask;
505 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
507 spin_unlock(&fsn_mark->lock);
509 if (!(oldmask & ~mask))
510 fsnotify_destroy_mark(fsn_mark);
512 return mask & oldmask;
515 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
516 struct vfsmount *mnt, __u32 mask,
517 unsigned int flags)
519 struct fsnotify_mark *fsn_mark = NULL;
520 __u32 removed;
522 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
523 if (!fsn_mark)
524 return -ENOENT;
526 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
527 fsnotify_put_mark(fsn_mark);
528 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
529 fsnotify_recalc_vfsmount_mask(mnt);
531 return 0;
534 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
535 struct inode *inode, __u32 mask,
536 unsigned int flags)
538 struct fsnotify_mark *fsn_mark = NULL;
539 __u32 removed;
541 fsn_mark = fsnotify_find_inode_mark(group, inode);
542 if (!fsn_mark)
543 return -ENOENT;
545 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
546 /* matches the fsnotify_find_inode_mark() */
547 fsnotify_put_mark(fsn_mark);
548 if (removed & inode->i_fsnotify_mask)
549 fsnotify_recalc_inode_mask(inode);
551 return 0;
554 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
555 __u32 mask,
556 unsigned int flags)
558 __u32 oldmask = -1;
560 spin_lock(&fsn_mark->lock);
561 if (!(flags & FAN_MARK_IGNORED_MASK)) {
562 oldmask = fsn_mark->mask;
563 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
564 } else {
565 __u32 tmask = fsn_mark->ignored_mask | mask;
566 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
567 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
568 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
571 if (!(flags & FAN_MARK_ONDIR)) {
572 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
573 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
576 spin_unlock(&fsn_mark->lock);
578 return mask & ~oldmask;
581 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
582 struct vfsmount *mnt, __u32 mask,
583 unsigned int flags)
585 struct fsnotify_mark *fsn_mark;
586 __u32 added;
587 int ret = 0;
589 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
590 if (!fsn_mark) {
591 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
592 return -ENOSPC;
594 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
595 if (!fsn_mark)
596 return -ENOMEM;
598 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
599 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
600 if (ret)
601 goto err;
603 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
605 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
606 fsnotify_recalc_vfsmount_mask(mnt);
607 err:
608 fsnotify_put_mark(fsn_mark);
609 return ret;
612 static int fanotify_add_inode_mark(struct fsnotify_group *group,
613 struct inode *inode, __u32 mask,
614 unsigned int flags)
616 struct fsnotify_mark *fsn_mark;
617 __u32 added;
618 int ret = 0;
620 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
623 * If some other task has this inode open for write we should not add
624 * an ignored mark, unless that ignored mark is supposed to survive
625 * modification changes anyway.
627 if ((flags & FAN_MARK_IGNORED_MASK) &&
628 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
629 (atomic_read(&inode->i_writecount) > 0))
630 return 0;
632 fsn_mark = fsnotify_find_inode_mark(group, inode);
633 if (!fsn_mark) {
634 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
635 return -ENOSPC;
637 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
638 if (!fsn_mark)
639 return -ENOMEM;
641 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
642 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
643 if (ret)
644 goto err;
646 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
648 if (added & ~inode->i_fsnotify_mask)
649 fsnotify_recalc_inode_mask(inode);
650 err:
651 fsnotify_put_mark(fsn_mark);
652 return ret;
655 /* fanotify syscalls */
656 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
658 struct fsnotify_group *group;
659 int f_flags, fd;
660 struct user_struct *user;
662 pr_debug("%s: flags=%d event_f_flags=%d\n",
663 __func__, flags, event_f_flags);
665 if (!capable(CAP_SYS_ADMIN))
666 return -EPERM;
668 if (flags & ~FAN_ALL_INIT_FLAGS)
669 return -EINVAL;
671 user = get_current_user();
672 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
673 free_uid(user);
674 return -EMFILE;
677 f_flags = O_RDWR | FMODE_NONOTIFY;
678 if (flags & FAN_CLOEXEC)
679 f_flags |= O_CLOEXEC;
680 if (flags & FAN_NONBLOCK)
681 f_flags |= O_NONBLOCK;
683 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
684 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
685 if (IS_ERR(group)) {
686 free_uid(user);
687 return PTR_ERR(group);
690 group->fanotify_data.user = user;
691 atomic_inc(&user->fanotify_listeners);
693 group->fanotify_data.f_flags = event_f_flags;
694 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
695 mutex_init(&group->fanotify_data.access_mutex);
696 init_waitqueue_head(&group->fanotify_data.access_waitq);
697 INIT_LIST_HEAD(&group->fanotify_data.access_list);
698 atomic_set(&group->fanotify_data.bypass_perm, 0);
699 #endif
700 switch (flags & FAN_ALL_CLASS_BITS) {
701 case FAN_CLASS_NOTIF:
702 group->priority = FS_PRIO_0;
703 break;
704 case FAN_CLASS_CONTENT:
705 group->priority = FS_PRIO_1;
706 break;
707 case FAN_CLASS_PRE_CONTENT:
708 group->priority = FS_PRIO_2;
709 break;
710 default:
711 fd = -EINVAL;
712 goto out_put_group;
715 if (flags & FAN_UNLIMITED_QUEUE) {
716 fd = -EPERM;
717 if (!capable(CAP_SYS_ADMIN))
718 goto out_put_group;
719 group->max_events = UINT_MAX;
720 } else {
721 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
724 if (flags & FAN_UNLIMITED_MARKS) {
725 fd = -EPERM;
726 if (!capable(CAP_SYS_ADMIN))
727 goto out_put_group;
728 group->fanotify_data.max_marks = UINT_MAX;
729 } else {
730 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
733 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
734 if (fd < 0)
735 goto out_put_group;
737 return fd;
739 out_put_group:
740 fsnotify_put_group(group);
741 return fd;
744 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
745 __u64 mask, int dfd,
746 const char __user * pathname)
748 struct inode *inode = NULL;
749 struct vfsmount *mnt = NULL;
750 struct fsnotify_group *group;
751 struct file *filp;
752 struct path path;
753 int ret, fput_needed;
755 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
756 __func__, fanotify_fd, flags, dfd, pathname, mask);
758 /* we only use the lower 32 bits as of right now. */
759 if (mask & ((__u64)0xffffffff << 32))
760 return -EINVAL;
762 if (flags & ~FAN_ALL_MARK_FLAGS)
763 return -EINVAL;
764 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
765 case FAN_MARK_ADD: /* fallthrough */
766 case FAN_MARK_REMOVE:
767 if (!mask)
768 return -EINVAL;
769 case FAN_MARK_FLUSH:
770 break;
771 default:
772 return -EINVAL;
775 if (mask & FAN_ONDIR) {
776 flags |= FAN_MARK_ONDIR;
777 mask &= ~FAN_ONDIR;
780 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
781 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
782 #else
783 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
784 #endif
785 return -EINVAL;
787 filp = fget_light(fanotify_fd, &fput_needed);
788 if (unlikely(!filp))
789 return -EBADF;
791 /* verify that this is indeed an fanotify instance */
792 ret = -EINVAL;
793 if (unlikely(filp->f_op != &fanotify_fops))
794 goto fput_and_out;
795 group = filp->private_data;
798 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
799 * allowed to set permissions events.
801 ret = -EINVAL;
802 if (mask & FAN_ALL_PERM_EVENTS &&
803 group->priority == FS_PRIO_0)
804 goto fput_and_out;
806 ret = fanotify_find_path(dfd, pathname, &path, flags);
807 if (ret)
808 goto fput_and_out;
810 /* inode held in place by reference to path; group by fget on fd */
811 if (!(flags & FAN_MARK_MOUNT))
812 inode = path.dentry->d_inode;
813 else
814 mnt = path.mnt;
816 /* create/update an inode mark */
817 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
818 case FAN_MARK_ADD:
819 if (flags & FAN_MARK_MOUNT)
820 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
821 else
822 ret = fanotify_add_inode_mark(group, inode, mask, flags);
823 break;
824 case FAN_MARK_REMOVE:
825 if (flags & FAN_MARK_MOUNT)
826 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
827 else
828 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
829 break;
830 case FAN_MARK_FLUSH:
831 if (flags & FAN_MARK_MOUNT)
832 fsnotify_clear_vfsmount_marks_by_group(group);
833 else
834 fsnotify_clear_inode_marks_by_group(group);
835 break;
836 default:
837 ret = -EINVAL;
840 path_put(&path);
841 fput_and_out:
842 fput_light(filp, fput_needed);
843 return ret;
846 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
847 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
848 long dfd, long pathname)
850 return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
851 mask, (int) dfd,
852 (const char __user *) pathname);
854 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
855 #endif
858 * fanotify_user_setup - Our initialization function. Note that we cannot return
859 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
860 * must result in panic().
862 static int __init fanotify_user_setup(void)
864 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
865 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
866 SLAB_PANIC);
868 return 0;
870 device_initcall(fanotify_user_setup);