fanotify: do not recalculate the mask if the ignored mask changed
[linux-2.6/btrfs-unstable.git] / fs / notify / fanotify / fanotify_user.c
blobfa71d5dfd102a3c1fcb6c9a6d40521650aee8d23
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
17 #include <asm/ioctls.h>
19 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
20 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
21 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
23 extern const struct fsnotify_ops fanotify_fsnotify_ops;
25 static struct kmem_cache *fanotify_mark_cache __read_mostly;
26 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
28 struct fanotify_response_event {
29 struct list_head list;
30 __s32 fd;
31 struct fsnotify_event *event;
35 * Get an fsnotify notification event if one exists and is small
36 * enough to fit in "count". Return an error pointer if the count
37 * is not large enough.
39 * Called with the group->notification_mutex held.
41 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
42 size_t count)
44 BUG_ON(!mutex_is_locked(&group->notification_mutex));
46 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
48 if (fsnotify_notify_queue_is_empty(group))
49 return NULL;
51 if (FAN_EVENT_METADATA_LEN > count)
52 return ERR_PTR(-EINVAL);
54 /* held the notification_mutex the whole time, so this is the
55 * same event we peeked above */
56 return fsnotify_remove_notify_event(group);
59 static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
61 int client_fd;
62 struct dentry *dentry;
63 struct vfsmount *mnt;
64 struct file *new_file;
66 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
68 client_fd = get_unused_fd();
69 if (client_fd < 0)
70 return client_fd;
72 if (event->data_type != FSNOTIFY_EVENT_PATH) {
73 WARN_ON(1);
74 put_unused_fd(client_fd);
75 return -EINVAL;
79 * we need a new file handle for the userspace program so it can read even if it was
80 * originally opened O_WRONLY.
82 dentry = dget(event->path.dentry);
83 mnt = mntget(event->path.mnt);
84 /* it's possible this event was an overflow event. in that case dentry and mnt
85 * are NULL; That's fine, just don't call dentry open */
86 if (dentry && mnt)
87 new_file = dentry_open(dentry, mnt,
88 group->fanotify_data.f_flags | FMODE_NONOTIFY,
89 current_cred());
90 else
91 new_file = ERR_PTR(-EOVERFLOW);
92 if (IS_ERR(new_file)) {
94 * we still send an event even if we can't open the file. this
95 * can happen when say tasks are gone and we try to open their
96 * /proc files or we try to open a WRONLY file like in sysfs
97 * we just send the errno to userspace since there isn't much
98 * else we can do.
100 put_unused_fd(client_fd);
101 client_fd = PTR_ERR(new_file);
102 } else {
103 fd_install(client_fd, new_file);
106 return client_fd;
109 static ssize_t fill_event_metadata(struct fsnotify_group *group,
110 struct fanotify_event_metadata *metadata,
111 struct fsnotify_event *event)
113 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
114 group, metadata, event);
116 metadata->event_len = FAN_EVENT_METADATA_LEN;
117 metadata->vers = FANOTIFY_METADATA_VERSION;
118 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
119 metadata->pid = pid_vnr(event->tgid);
120 metadata->fd = create_fd(group, event);
122 return metadata->fd;
125 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
126 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
127 __s32 fd)
129 struct fanotify_response_event *re, *return_re = NULL;
131 mutex_lock(&group->fanotify_data.access_mutex);
132 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
133 if (re->fd != fd)
134 continue;
136 list_del_init(&re->list);
137 return_re = re;
138 break;
140 mutex_unlock(&group->fanotify_data.access_mutex);
142 pr_debug("%s: found return_re=%p\n", __func__, return_re);
144 return return_re;
147 static int process_access_response(struct fsnotify_group *group,
148 struct fanotify_response *response_struct)
150 struct fanotify_response_event *re;
151 __s32 fd = response_struct->fd;
152 __u32 response = response_struct->response;
154 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
155 fd, response);
157 * make sure the response is valid, if invalid we do nothing and either
158 * userspace can send a valid responce or we will clean it up after the
159 * timeout
161 switch (response) {
162 case FAN_ALLOW:
163 case FAN_DENY:
164 break;
165 default:
166 return -EINVAL;
169 if (fd < 0)
170 return -EINVAL;
172 re = dequeue_re(group, fd);
173 if (!re)
174 return -ENOENT;
176 re->event->response = response;
178 wake_up(&group->fanotify_data.access_waitq);
180 kmem_cache_free(fanotify_response_event_cache, re);
182 return 0;
185 static int prepare_for_access_response(struct fsnotify_group *group,
186 struct fsnotify_event *event,
187 __s32 fd)
189 struct fanotify_response_event *re;
191 if (!(event->mask & FAN_ALL_PERM_EVENTS))
192 return 0;
194 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
195 if (!re)
196 return -ENOMEM;
198 re->event = event;
199 re->fd = fd;
201 mutex_lock(&group->fanotify_data.access_mutex);
203 if (group->fanotify_data.bypass_perm) {
204 mutex_unlock(&group->fanotify_data.access_mutex);
205 kmem_cache_free(fanotify_response_event_cache, re);
206 event->response = FAN_ALLOW;
207 return 0;
210 list_add_tail(&re->list, &group->fanotify_data.access_list);
211 mutex_unlock(&group->fanotify_data.access_mutex);
213 return 0;
216 static void remove_access_response(struct fsnotify_group *group,
217 struct fsnotify_event *event,
218 __s32 fd)
220 struct fanotify_response_event *re;
222 if (!(event->mask & FAN_ALL_PERM_EVENTS))
223 return;
225 re = dequeue_re(group, fd);
226 if (!re)
227 return;
229 BUG_ON(re->event != event);
231 kmem_cache_free(fanotify_response_event_cache, re);
233 return;
235 #else
236 static int prepare_for_access_response(struct fsnotify_group *group,
237 struct fsnotify_event *event,
238 __s32 fd)
240 return 0;
243 static void remove_access_response(struct fsnotify_group *group,
244 struct fsnotify_event *event,
245 __s32 fd)
247 return;
249 #endif
251 static ssize_t copy_event_to_user(struct fsnotify_group *group,
252 struct fsnotify_event *event,
253 char __user *buf)
255 struct fanotify_event_metadata fanotify_event_metadata;
256 int fd, ret;
258 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
260 fd = fill_event_metadata(group, &fanotify_event_metadata, event);
261 if (fd < 0)
262 return fd;
264 ret = prepare_for_access_response(group, event, fd);
265 if (ret)
266 goto out_close_fd;
268 ret = -EFAULT;
269 if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
270 goto out_kill_access_response;
272 return FAN_EVENT_METADATA_LEN;
274 out_kill_access_response:
275 remove_access_response(group, event, fd);
276 out_close_fd:
277 sys_close(fd);
278 return ret;
281 /* intofiy userspace file descriptor functions */
282 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
284 struct fsnotify_group *group = file->private_data;
285 int ret = 0;
287 poll_wait(file, &group->notification_waitq, wait);
288 mutex_lock(&group->notification_mutex);
289 if (!fsnotify_notify_queue_is_empty(group))
290 ret = POLLIN | POLLRDNORM;
291 mutex_unlock(&group->notification_mutex);
293 return ret;
296 static ssize_t fanotify_read(struct file *file, char __user *buf,
297 size_t count, loff_t *pos)
299 struct fsnotify_group *group;
300 struct fsnotify_event *kevent;
301 char __user *start;
302 int ret;
303 DEFINE_WAIT(wait);
305 start = buf;
306 group = file->private_data;
308 pr_debug("%s: group=%p\n", __func__, group);
310 while (1) {
311 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
313 mutex_lock(&group->notification_mutex);
314 kevent = get_one_event(group, count);
315 mutex_unlock(&group->notification_mutex);
317 if (kevent) {
318 ret = PTR_ERR(kevent);
319 if (IS_ERR(kevent))
320 break;
321 ret = copy_event_to_user(group, kevent, buf);
322 fsnotify_put_event(kevent);
323 if (ret < 0)
324 break;
325 buf += ret;
326 count -= ret;
327 continue;
330 ret = -EAGAIN;
331 if (file->f_flags & O_NONBLOCK)
332 break;
333 ret = -EINTR;
334 if (signal_pending(current))
335 break;
337 if (start != buf)
338 break;
340 schedule();
343 finish_wait(&group->notification_waitq, &wait);
344 if (start != buf && ret != -EFAULT)
345 ret = buf - start;
346 return ret;
349 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
351 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
352 struct fanotify_response response = { .fd = -1, .response = -1 };
353 struct fsnotify_group *group;
354 int ret;
356 group = file->private_data;
358 if (count > sizeof(response))
359 count = sizeof(response);
361 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
363 if (copy_from_user(&response, buf, count))
364 return -EFAULT;
366 ret = process_access_response(group, &response);
367 if (ret < 0)
368 count = ret;
370 return count;
371 #else
372 return -EINVAL;
373 #endif
376 static int fanotify_release(struct inode *ignored, struct file *file)
378 struct fsnotify_group *group = file->private_data;
379 struct fanotify_response_event *re, *lre;
381 pr_debug("%s: file=%p group=%p\n", __func__, file, group);
383 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
384 mutex_lock(&group->fanotify_data.access_mutex);
386 group->fanotify_data.bypass_perm = true;
388 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
389 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
390 re, re->event);
392 list_del_init(&re->list);
393 re->event->response = FAN_ALLOW;
395 kmem_cache_free(fanotify_response_event_cache, re);
397 mutex_unlock(&group->fanotify_data.access_mutex);
399 wake_up(&group->fanotify_data.access_waitq);
400 #endif
401 /* matches the fanotify_init->fsnotify_alloc_group */
402 fsnotify_put_group(group);
404 return 0;
407 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
409 struct fsnotify_group *group;
410 struct fsnotify_event_holder *holder;
411 void __user *p;
412 int ret = -ENOTTY;
413 size_t send_len = 0;
415 group = file->private_data;
417 p = (void __user *) arg;
419 switch (cmd) {
420 case FIONREAD:
421 mutex_lock(&group->notification_mutex);
422 list_for_each_entry(holder, &group->notification_list, event_list)
423 send_len += FAN_EVENT_METADATA_LEN;
424 mutex_unlock(&group->notification_mutex);
425 ret = put_user(send_len, (int __user *) p);
426 break;
429 return ret;
432 static const struct file_operations fanotify_fops = {
433 .poll = fanotify_poll,
434 .read = fanotify_read,
435 .write = fanotify_write,
436 .fasync = NULL,
437 .release = fanotify_release,
438 .unlocked_ioctl = fanotify_ioctl,
439 .compat_ioctl = fanotify_ioctl,
440 .llseek = noop_llseek,
443 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
445 kmem_cache_free(fanotify_mark_cache, fsn_mark);
448 static int fanotify_find_path(int dfd, const char __user *filename,
449 struct path *path, unsigned int flags)
451 int ret;
453 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
454 dfd, filename, flags);
456 if (filename == NULL) {
457 struct file *file;
458 int fput_needed;
460 ret = -EBADF;
461 file = fget_light(dfd, &fput_needed);
462 if (!file)
463 goto out;
465 ret = -ENOTDIR;
466 if ((flags & FAN_MARK_ONLYDIR) &&
467 !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
468 fput_light(file, fput_needed);
469 goto out;
472 *path = file->f_path;
473 path_get(path);
474 fput_light(file, fput_needed);
475 } else {
476 unsigned int lookup_flags = 0;
478 if (!(flags & FAN_MARK_DONT_FOLLOW))
479 lookup_flags |= LOOKUP_FOLLOW;
480 if (flags & FAN_MARK_ONLYDIR)
481 lookup_flags |= LOOKUP_DIRECTORY;
483 ret = user_path_at(dfd, filename, lookup_flags, path);
484 if (ret)
485 goto out;
488 /* you can only watch an inode if you have read permissions on it */
489 ret = inode_permission(path->dentry->d_inode, MAY_READ);
490 if (ret)
491 path_put(path);
492 out:
493 return ret;
496 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
497 __u32 mask,
498 unsigned int flags)
500 __u32 oldmask;
502 spin_lock(&fsn_mark->lock);
503 if (!(flags & FAN_MARK_IGNORED_MASK)) {
504 oldmask = fsn_mark->mask;
505 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
506 } else {
507 oldmask = fsn_mark->ignored_mask;
508 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
510 spin_unlock(&fsn_mark->lock);
512 if (!(oldmask & ~mask))
513 fsnotify_destroy_mark(fsn_mark);
515 return mask & oldmask;
518 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
519 struct vfsmount *mnt, __u32 mask,
520 unsigned int flags)
522 struct fsnotify_mark *fsn_mark = NULL;
523 __u32 removed;
525 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
526 if (!fsn_mark)
527 return -ENOENT;
529 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
530 fsnotify_put_mark(fsn_mark);
531 if (removed & mnt->mnt_fsnotify_mask)
532 fsnotify_recalc_vfsmount_mask(mnt);
534 return 0;
537 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
538 struct inode *inode, __u32 mask,
539 unsigned int flags)
541 struct fsnotify_mark *fsn_mark = NULL;
542 __u32 removed;
544 fsn_mark = fsnotify_find_inode_mark(group, inode);
545 if (!fsn_mark)
546 return -ENOENT;
548 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
549 /* matches the fsnotify_find_inode_mark() */
550 fsnotify_put_mark(fsn_mark);
551 if (removed & inode->i_fsnotify_mask)
552 fsnotify_recalc_inode_mask(inode);
554 return 0;
557 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
558 __u32 mask,
559 unsigned int flags)
561 __u32 oldmask = -1;
563 spin_lock(&fsn_mark->lock);
564 if (!(flags & FAN_MARK_IGNORED_MASK)) {
565 oldmask = fsn_mark->mask;
566 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
567 } else {
568 __u32 tmask = fsn_mark->ignored_mask | mask;
569 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
570 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
571 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
574 if (!(flags & FAN_MARK_ONDIR)) {
575 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
576 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
579 spin_unlock(&fsn_mark->lock);
581 return mask & ~oldmask;
584 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
585 struct vfsmount *mnt, __u32 mask,
586 unsigned int flags)
588 struct fsnotify_mark *fsn_mark;
589 __u32 added;
591 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
592 if (!fsn_mark) {
593 int ret;
595 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
596 return -ENOSPC;
598 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
599 if (!fsn_mark)
600 return -ENOMEM;
602 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
603 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
604 if (ret) {
605 fanotify_free_mark(fsn_mark);
606 return ret;
609 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
610 fsnotify_put_mark(fsn_mark);
611 if (added & ~mnt->mnt_fsnotify_mask)
612 fsnotify_recalc_vfsmount_mask(mnt);
614 return 0;
617 static int fanotify_add_inode_mark(struct fsnotify_group *group,
618 struct inode *inode, __u32 mask,
619 unsigned int flags)
621 struct fsnotify_mark *fsn_mark;
622 __u32 added;
624 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
627 * If some other task has this inode open for write we should not add
628 * an ignored mark, unless that ignored mark is supposed to survive
629 * modification changes anyway.
631 if ((flags & FAN_MARK_IGNORED_MASK) &&
632 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
633 (atomic_read(&inode->i_writecount) > 0))
634 return 0;
636 fsn_mark = fsnotify_find_inode_mark(group, inode);
637 if (!fsn_mark) {
638 int ret;
640 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
641 return -ENOSPC;
643 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
644 if (!fsn_mark)
645 return -ENOMEM;
647 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
648 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
649 if (ret) {
650 fanotify_free_mark(fsn_mark);
651 return ret;
654 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
655 fsnotify_put_mark(fsn_mark);
656 if (added & ~inode->i_fsnotify_mask)
657 fsnotify_recalc_inode_mask(inode);
658 return 0;
661 /* fanotify syscalls */
662 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
664 struct fsnotify_group *group;
665 int f_flags, fd;
666 struct user_struct *user;
668 pr_debug("%s: flags=%d event_f_flags=%d\n",
669 __func__, flags, event_f_flags);
671 if (!capable(CAP_SYS_ADMIN))
672 return -EPERM;
674 if (flags & ~FAN_ALL_INIT_FLAGS)
675 return -EINVAL;
677 user = get_current_user();
678 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
679 free_uid(user);
680 return -EMFILE;
683 f_flags = O_RDWR | FMODE_NONOTIFY;
684 if (flags & FAN_CLOEXEC)
685 f_flags |= O_CLOEXEC;
686 if (flags & FAN_NONBLOCK)
687 f_flags |= O_NONBLOCK;
689 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
690 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
691 if (IS_ERR(group))
692 return PTR_ERR(group);
694 group->fanotify_data.user = user;
695 atomic_inc(&user->fanotify_listeners);
697 group->fanotify_data.f_flags = event_f_flags;
698 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
699 mutex_init(&group->fanotify_data.access_mutex);
700 init_waitqueue_head(&group->fanotify_data.access_waitq);
701 INIT_LIST_HEAD(&group->fanotify_data.access_list);
702 #endif
703 switch (flags & FAN_ALL_CLASS_BITS) {
704 case FAN_CLASS_NOTIF:
705 group->priority = FS_PRIO_0;
706 break;
707 case FAN_CLASS_CONTENT:
708 group->priority = FS_PRIO_1;
709 break;
710 case FAN_CLASS_PRE_CONTENT:
711 group->priority = FS_PRIO_2;
712 break;
713 default:
714 fd = -EINVAL;
715 goto out_put_group;
718 if (flags & FAN_UNLIMITED_QUEUE) {
719 fd = -EPERM;
720 if (!capable(CAP_SYS_ADMIN))
721 goto out_put_group;
722 group->max_events = UINT_MAX;
723 } else {
724 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
727 if (flags & FAN_UNLIMITED_MARKS) {
728 fd = -EPERM;
729 if (!capable(CAP_SYS_ADMIN))
730 goto out_put_group;
731 group->fanotify_data.max_marks = UINT_MAX;
732 } else {
733 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
736 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
737 if (fd < 0)
738 goto out_put_group;
740 return fd;
742 out_put_group:
743 fsnotify_put_group(group);
744 return fd;
747 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
748 __u64 mask, int dfd,
749 const char __user * pathname)
751 struct inode *inode = NULL;
752 struct vfsmount *mnt = NULL;
753 struct fsnotify_group *group;
754 struct file *filp;
755 struct path path;
756 int ret, fput_needed;
758 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
759 __func__, fanotify_fd, flags, dfd, pathname, mask);
761 /* we only use the lower 32 bits as of right now. */
762 if (mask & ((__u64)0xffffffff << 32))
763 return -EINVAL;
765 if (flags & ~FAN_ALL_MARK_FLAGS)
766 return -EINVAL;
767 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
768 case FAN_MARK_ADD:
769 case FAN_MARK_REMOVE:
770 case FAN_MARK_FLUSH:
771 break;
772 default:
773 return -EINVAL;
776 if (mask & FAN_ONDIR) {
777 flags |= FAN_MARK_ONDIR;
778 mask &= ~FAN_ONDIR;
781 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
782 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
783 #else
784 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
785 #endif
786 return -EINVAL;
788 filp = fget_light(fanotify_fd, &fput_needed);
789 if (unlikely(!filp))
790 return -EBADF;
792 /* verify that this is indeed an fanotify instance */
793 ret = -EINVAL;
794 if (unlikely(filp->f_op != &fanotify_fops))
795 goto fput_and_out;
796 group = filp->private_data;
799 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
800 * allowed to set permissions events.
802 ret = -EINVAL;
803 if (mask & FAN_ALL_PERM_EVENTS &&
804 group->priority == FS_PRIO_0)
805 goto fput_and_out;
807 ret = fanotify_find_path(dfd, pathname, &path, flags);
808 if (ret)
809 goto fput_and_out;
811 /* inode held in place by reference to path; group by fget on fd */
812 if (!(flags & FAN_MARK_MOUNT))
813 inode = path.dentry->d_inode;
814 else
815 mnt = path.mnt;
817 /* create/update an inode mark */
818 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
819 case FAN_MARK_ADD:
820 if (flags & FAN_MARK_MOUNT)
821 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
822 else
823 ret = fanotify_add_inode_mark(group, inode, mask, flags);
824 break;
825 case FAN_MARK_REMOVE:
826 if (flags & FAN_MARK_MOUNT)
827 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
828 else
829 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
830 break;
831 case FAN_MARK_FLUSH:
832 if (flags & FAN_MARK_MOUNT)
833 fsnotify_clear_vfsmount_marks_by_group(group);
834 else
835 fsnotify_clear_inode_marks_by_group(group);
836 break;
837 default:
838 ret = -EINVAL;
841 path_put(&path);
842 fput_and_out:
843 fput_light(filp, fput_needed);
844 return ret;
847 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
848 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
849 long dfd, long pathname)
851 return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
852 mask, (int) dfd,
853 (const char __user *) pathname);
855 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
856 #endif
859 * fanotify_user_setup - Our initialization function. Note that we cannnot return
860 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
861 * must result in panic().
863 static int __init fanotify_user_setup(void)
865 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
866 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
867 SLAB_PANIC);
869 return 0;
871 device_initcall(fanotify_user_setup);