4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
11 #include <linux/file.h>
12 #include <linux/fdtable.h>
13 #include <linux/capability.h>
14 #include <linux/dnotify.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/pipe_fs_i.h>
18 #include <linux/security.h>
19 #include <linux/ptrace.h>
20 #include <linux/signal.h>
21 #include <linux/rcupdate.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/user_namespace.h>
26 #include <asm/siginfo.h>
27 #include <asm/uaccess.h>
29 void set_close_on_exec(unsigned int fd
, int flag
)
31 struct files_struct
*files
= current
->files
;
33 spin_lock(&files
->file_lock
);
34 fdt
= files_fdtable(files
);
36 __set_close_on_exec(fd
, fdt
);
38 __clear_close_on_exec(fd
, fdt
);
39 spin_unlock(&files
->file_lock
);
42 static bool get_close_on_exec(unsigned int fd
)
44 struct files_struct
*files
= current
->files
;
48 fdt
= files_fdtable(files
);
49 res
= close_on_exec(fd
, fdt
);
54 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
57 struct file
* file
, *tofree
;
58 struct files_struct
* files
= current
->files
;
61 if ((flags
& ~O_CLOEXEC
) != 0)
64 if (unlikely(oldfd
== newfd
))
67 spin_lock(&files
->file_lock
);
68 err
= expand_files(files
, newfd
);
72 if (unlikely(err
< 0)) {
78 * We need to detect attempts to do dup2() over allocated but still
79 * not finished descriptor. NB: OpenBSD avoids that at the price of
80 * extra work in their equivalent of fget() - they insert struct
81 * file immediately after grabbing descriptor, mark it larval if
82 * more work (e.g. actual opening) is needed and make sure that
83 * fget() treats larval files as absent. Potentially interesting,
84 * but while extra work in fget() is trivial, locking implications
85 * and amount of surgery on open()-related paths in VFS are not.
86 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
87 * deadlocks in rather amusing ways, AFAICS. All of that is out of
88 * scope of POSIX or SUS, since neither considers shared descriptor
89 * tables and this condition does not arise without those.
92 fdt
= files_fdtable(files
);
93 tofree
= fdt
->fd
[newfd
];
94 if (!tofree
&& fd_is_open(newfd
, fdt
))
97 rcu_assign_pointer(fdt
->fd
[newfd
], file
);
98 __set_open_fd(newfd
, fdt
);
99 if (flags
& O_CLOEXEC
)
100 __set_close_on_exec(newfd
, fdt
);
102 __clear_close_on_exec(newfd
, fdt
);
103 spin_unlock(&files
->file_lock
);
106 filp_close(tofree
, files
);
113 spin_unlock(&files
->file_lock
);
117 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
119 if (unlikely(newfd
== oldfd
)) { /* corner case */
120 struct files_struct
*files
= current
->files
;
124 if (!fcheck_files(files
, oldfd
))
129 return sys_dup3(oldfd
, newfd
, 0);
132 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
135 struct file
*file
= fget_raw(fildes
);
138 ret
= get_unused_fd();
140 fd_install(ret
, file
);
147 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
149 static int setfl(int fd
, struct file
* filp
, unsigned long arg
)
151 struct inode
* inode
= filp
->f_path
.dentry
->d_inode
;
155 * O_APPEND cannot be cleared if the file is marked as append-only
156 * and the file is open for write.
158 if (((arg
^ filp
->f_flags
) & O_APPEND
) && IS_APPEND(inode
))
161 /* O_NOATIME can only be set by the owner or superuser */
162 if ((arg
& O_NOATIME
) && !(filp
->f_flags
& O_NOATIME
))
163 if (!inode_owner_or_capable(inode
))
166 /* required for strict SunOS emulation */
167 if (O_NONBLOCK
!= O_NDELAY
)
171 if (arg
& O_DIRECT
) {
172 if (!filp
->f_mapping
|| !filp
->f_mapping
->a_ops
||
173 !filp
->f_mapping
->a_ops
->direct_IO
)
177 if (filp
->f_op
&& filp
->f_op
->check_flags
)
178 error
= filp
->f_op
->check_flags(arg
);
183 * ->fasync() is responsible for setting the FASYNC bit.
185 if (((arg
^ filp
->f_flags
) & FASYNC
) && filp
->f_op
&&
186 filp
->f_op
->fasync
) {
187 error
= filp
->f_op
->fasync(fd
, filp
, (arg
& FASYNC
) != 0);
193 spin_lock(&filp
->f_lock
);
194 filp
->f_flags
= (arg
& SETFL_MASK
) | (filp
->f_flags
& ~SETFL_MASK
);
195 spin_unlock(&filp
->f_lock
);
201 static void f_modown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
204 write_lock_irq(&filp
->f_owner
.lock
);
205 if (force
|| !filp
->f_owner
.pid
) {
206 put_pid(filp
->f_owner
.pid
);
207 filp
->f_owner
.pid
= get_pid(pid
);
208 filp
->f_owner
.pid_type
= type
;
211 const struct cred
*cred
= current_cred();
212 filp
->f_owner
.uid
= cred
->uid
;
213 filp
->f_owner
.euid
= cred
->euid
;
216 write_unlock_irq(&filp
->f_owner
.lock
);
219 int __f_setown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
224 err
= security_file_set_fowner(filp
);
228 f_modown(filp
, pid
, type
, force
);
231 EXPORT_SYMBOL(__f_setown
);
233 int f_setown(struct file
*filp
, unsigned long arg
, int force
)
245 pid
= find_vpid(who
);
246 result
= __f_setown(filp
, pid
, type
, force
);
250 EXPORT_SYMBOL(f_setown
);
252 void f_delown(struct file
*filp
)
254 f_modown(filp
, NULL
, PIDTYPE_PID
, 1);
257 pid_t
f_getown(struct file
*filp
)
260 read_lock(&filp
->f_owner
.lock
);
261 pid
= pid_vnr(filp
->f_owner
.pid
);
262 if (filp
->f_owner
.pid_type
== PIDTYPE_PGID
)
264 read_unlock(&filp
->f_owner
.lock
);
268 static int f_setown_ex(struct file
*filp
, unsigned long arg
)
270 struct f_owner_ex
* __user owner_p
= (void * __user
)arg
;
271 struct f_owner_ex owner
;
276 ret
= copy_from_user(&owner
, owner_p
, sizeof(owner
));
280 switch (owner
.type
) {
298 pid
= find_vpid(owner
.pid
);
299 if (owner
.pid
&& !pid
)
302 ret
= __f_setown(filp
, pid
, type
, 1);
308 static int f_getown_ex(struct file
*filp
, unsigned long arg
)
310 struct f_owner_ex
* __user owner_p
= (void * __user
)arg
;
311 struct f_owner_ex owner
;
314 read_lock(&filp
->f_owner
.lock
);
315 owner
.pid
= pid_vnr(filp
->f_owner
.pid
);
316 switch (filp
->f_owner
.pid_type
) {
318 owner
.type
= F_OWNER_TID
;
322 owner
.type
= F_OWNER_PID
;
326 owner
.type
= F_OWNER_PGRP
;
334 read_unlock(&filp
->f_owner
.lock
);
337 ret
= copy_to_user(owner_p
, &owner
, sizeof(owner
));
344 #ifdef CONFIG_CHECKPOINT_RESTORE
345 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
347 struct user_namespace
*user_ns
= current_user_ns();
348 uid_t
* __user dst
= (void * __user
)arg
;
352 read_lock(&filp
->f_owner
.lock
);
353 src
[0] = from_kuid(user_ns
, filp
->f_owner
.uid
);
354 src
[1] = from_kuid(user_ns
, filp
->f_owner
.euid
);
355 read_unlock(&filp
->f_owner
.lock
);
357 err
= put_user(src
[0], &dst
[0]);
358 err
|= put_user(src
[1], &dst
[1]);
363 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
369 static long do_fcntl(int fd
, unsigned int cmd
, unsigned long arg
,
376 case F_DUPFD_CLOEXEC
:
377 if (arg
>= rlimit(RLIMIT_NOFILE
))
379 err
= alloc_fd(arg
, cmd
== F_DUPFD_CLOEXEC
? O_CLOEXEC
: 0);
382 fd_install(err
, filp
);
386 err
= get_close_on_exec(fd
) ? FD_CLOEXEC
: 0;
390 set_close_on_exec(fd
, arg
& FD_CLOEXEC
);
396 err
= setfl(fd
, filp
, arg
);
399 err
= fcntl_getlk(filp
, (struct flock __user
*) arg
);
403 err
= fcntl_setlk(fd
, filp
, cmd
, (struct flock __user
*) arg
);
407 * XXX If f_owner is a process group, the
408 * negative return value will get converted
409 * into an error. Oops. If we keep the
410 * current syscall conventions, the only way
411 * to fix this will be in libc.
413 err
= f_getown(filp
);
414 force_successful_syscall_return();
417 err
= f_setown(filp
, arg
, 1);
420 err
= f_getown_ex(filp
, arg
);
423 err
= f_setown_ex(filp
, arg
);
425 case F_GETOWNER_UIDS
:
426 err
= f_getowner_uids(filp
, arg
);
429 err
= filp
->f_owner
.signum
;
432 /* arg == 0 restores default behaviour. */
433 if (!valid_signal(arg
)) {
437 filp
->f_owner
.signum
= arg
;
440 err
= fcntl_getlease(filp
);
443 err
= fcntl_setlease(fd
, filp
, arg
);
446 err
= fcntl_dirnotify(fd
, filp
, arg
);
450 err
= pipe_fcntl(filp
, cmd
, arg
);
458 static int check_fcntl_cmd(unsigned cmd
)
462 case F_DUPFD_CLOEXEC
:
471 SYSCALL_DEFINE3(fcntl
, unsigned int, fd
, unsigned int, cmd
, unsigned long, arg
)
477 filp
= fget_raw_light(fd
, &fput_needed
);
481 if (unlikely(filp
->f_mode
& FMODE_PATH
)) {
482 if (!check_fcntl_cmd(cmd
))
486 err
= security_file_fcntl(filp
, cmd
, arg
);
488 err
= do_fcntl(fd
, cmd
, arg
, filp
);
491 fput_light(filp
, fput_needed
);
496 #if BITS_PER_LONG == 32
497 SYSCALL_DEFINE3(fcntl64
, unsigned int, fd
, unsigned int, cmd
,
504 filp
= fget_raw_light(fd
, &fput_needed
);
508 if (unlikely(filp
->f_mode
& FMODE_PATH
)) {
509 if (!check_fcntl_cmd(cmd
))
513 err
= security_file_fcntl(filp
, cmd
, arg
);
519 err
= fcntl_getlk64(filp
, (struct flock64 __user
*) arg
);
523 err
= fcntl_setlk64(fd
, filp
, cmd
,
524 (struct flock64 __user
*) arg
);
527 err
= do_fcntl(fd
, cmd
, arg
, filp
);
531 fput_light(filp
, fput_needed
);
537 /* Table to convert sigio signal codes into poll band bitmaps */
539 static const long band_table
[NSIGPOLL
] = {
540 POLLIN
| POLLRDNORM
, /* POLL_IN */
541 POLLOUT
| POLLWRNORM
| POLLWRBAND
, /* POLL_OUT */
542 POLLIN
| POLLRDNORM
| POLLMSG
, /* POLL_MSG */
543 POLLERR
, /* POLL_ERR */
544 POLLPRI
| POLLRDBAND
, /* POLL_PRI */
545 POLLHUP
| POLLERR
/* POLL_HUP */
548 static inline int sigio_perm(struct task_struct
*p
,
549 struct fown_struct
*fown
, int sig
)
551 const struct cred
*cred
;
555 cred
= __task_cred(p
);
556 ret
= ((uid_eq(fown
->euid
, GLOBAL_ROOT_UID
) ||
557 uid_eq(fown
->euid
, cred
->suid
) || uid_eq(fown
->euid
, cred
->uid
) ||
558 uid_eq(fown
->uid
, cred
->suid
) || uid_eq(fown
->uid
, cred
->uid
)) &&
559 !security_file_send_sigiotask(p
, fown
, sig
));
564 static void send_sigio_to_task(struct task_struct
*p
,
565 struct fown_struct
*fown
,
566 int fd
, int reason
, int group
)
569 * F_SETSIG can change ->signum lockless in parallel, make
570 * sure we read it once and use the same value throughout.
572 int signum
= ACCESS_ONCE(fown
->signum
);
574 if (!sigio_perm(p
, fown
, signum
))
580 /* Queue a rt signal with the appropriate fd as its
581 value. We use SI_SIGIO as the source, not
582 SI_KERNEL, since kernel signals always get
583 delivered even if we can't queue. Failure to
584 queue in this case _should_ be reported; we fall
585 back to SIGIO in that case. --sct */
586 si
.si_signo
= signum
;
589 /* Make sure we are called with one of the POLL_*
590 reasons, otherwise we could leak kernel stack into
592 BUG_ON((reason
& __SI_MASK
) != __SI_POLL
);
593 if (reason
- POLL_IN
>= NSIGPOLL
)
596 si
.si_band
= band_table
[reason
- POLL_IN
];
598 if (!do_send_sig_info(signum
, &si
, p
, group
))
600 /* fall-through: fall back on the old plain SIGIO signal */
602 do_send_sig_info(SIGIO
, SEND_SIG_PRIV
, p
, group
);
606 void send_sigio(struct fown_struct
*fown
, int fd
, int band
)
608 struct task_struct
*p
;
613 read_lock(&fown
->lock
);
615 type
= fown
->pid_type
;
616 if (type
== PIDTYPE_MAX
) {
623 goto out_unlock_fown
;
625 read_lock(&tasklist_lock
);
626 do_each_pid_task(pid
, type
, p
) {
627 send_sigio_to_task(p
, fown
, fd
, band
, group
);
628 } while_each_pid_task(pid
, type
, p
);
629 read_unlock(&tasklist_lock
);
631 read_unlock(&fown
->lock
);
634 static void send_sigurg_to_task(struct task_struct
*p
,
635 struct fown_struct
*fown
, int group
)
637 if (sigio_perm(p
, fown
, SIGURG
))
638 do_send_sig_info(SIGURG
, SEND_SIG_PRIV
, p
, group
);
641 int send_sigurg(struct fown_struct
*fown
)
643 struct task_struct
*p
;
649 read_lock(&fown
->lock
);
651 type
= fown
->pid_type
;
652 if (type
== PIDTYPE_MAX
) {
659 goto out_unlock_fown
;
663 read_lock(&tasklist_lock
);
664 do_each_pid_task(pid
, type
, p
) {
665 send_sigurg_to_task(p
, fown
, group
);
666 } while_each_pid_task(pid
, type
, p
);
667 read_unlock(&tasklist_lock
);
669 read_unlock(&fown
->lock
);
673 static DEFINE_SPINLOCK(fasync_lock
);
674 static struct kmem_cache
*fasync_cache __read_mostly
;
676 static void fasync_free_rcu(struct rcu_head
*head
)
678 kmem_cache_free(fasync_cache
,
679 container_of(head
, struct fasync_struct
, fa_rcu
));
683 * Remove a fasync entry. If successfully removed, return
684 * positive and clear the FASYNC flag. If no entry exists,
685 * do nothing and return 0.
687 * NOTE! It is very important that the FASYNC flag always
688 * match the state "is the filp on a fasync list".
691 int fasync_remove_entry(struct file
*filp
, struct fasync_struct
**fapp
)
693 struct fasync_struct
*fa
, **fp
;
696 spin_lock(&filp
->f_lock
);
697 spin_lock(&fasync_lock
);
698 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
699 if (fa
->fa_file
!= filp
)
702 spin_lock_irq(&fa
->fa_lock
);
704 spin_unlock_irq(&fa
->fa_lock
);
707 call_rcu(&fa
->fa_rcu
, fasync_free_rcu
);
708 filp
->f_flags
&= ~FASYNC
;
712 spin_unlock(&fasync_lock
);
713 spin_unlock(&filp
->f_lock
);
717 struct fasync_struct
*fasync_alloc(void)
719 return kmem_cache_alloc(fasync_cache
, GFP_KERNEL
);
723 * NOTE! This can be used only for unused fasync entries:
724 * entries that actually got inserted on the fasync list
725 * need to be released by rcu - see fasync_remove_entry.
727 void fasync_free(struct fasync_struct
*new)
729 kmem_cache_free(fasync_cache
, new);
733 * Insert a new entry into the fasync list. Return the pointer to the
734 * old one if we didn't use the new one.
736 * NOTE! It is very important that the FASYNC flag always
737 * match the state "is the filp on a fasync list".
739 struct fasync_struct
*fasync_insert_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
, struct fasync_struct
*new)
741 struct fasync_struct
*fa
, **fp
;
743 spin_lock(&filp
->f_lock
);
744 spin_lock(&fasync_lock
);
745 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
746 if (fa
->fa_file
!= filp
)
749 spin_lock_irq(&fa
->fa_lock
);
751 spin_unlock_irq(&fa
->fa_lock
);
755 spin_lock_init(&new->fa_lock
);
756 new->magic
= FASYNC_MAGIC
;
759 new->fa_next
= *fapp
;
760 rcu_assign_pointer(*fapp
, new);
761 filp
->f_flags
|= FASYNC
;
764 spin_unlock(&fasync_lock
);
765 spin_unlock(&filp
->f_lock
);
770 * Add a fasync entry. Return negative on error, positive if
771 * added, and zero if did nothing but change an existing one.
773 static int fasync_add_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
)
775 struct fasync_struct
*new;
777 new = fasync_alloc();
782 * fasync_insert_entry() returns the old (update) entry if
785 * So free the (unused) new entry and return 0 to let the
786 * caller know that we didn't add any new fasync entries.
788 if (fasync_insert_entry(fd
, filp
, fapp
, new)) {
797 * fasync_helper() is used by almost all character device drivers
798 * to set up the fasync queue, and for regular files by the file
799 * lease code. It returns negative on error, 0 if it did no changes
800 * and positive if it added/deleted the entry.
802 int fasync_helper(int fd
, struct file
* filp
, int on
, struct fasync_struct
**fapp
)
805 return fasync_remove_entry(filp
, fapp
);
806 return fasync_add_entry(fd
, filp
, fapp
);
809 EXPORT_SYMBOL(fasync_helper
);
812 * rcu_read_lock() is held
814 static void kill_fasync_rcu(struct fasync_struct
*fa
, int sig
, int band
)
817 struct fown_struct
*fown
;
820 if (fa
->magic
!= FASYNC_MAGIC
) {
821 printk(KERN_ERR
"kill_fasync: bad magic number in "
825 spin_lock_irqsave(&fa
->fa_lock
, flags
);
827 fown
= &fa
->fa_file
->f_owner
;
828 /* Don't send SIGURG to processes which have not set a
829 queued signum: SIGURG has its own default signalling
831 if (!(sig
== SIGURG
&& fown
->signum
== 0))
832 send_sigio(fown
, fa
->fa_fd
, band
);
834 spin_unlock_irqrestore(&fa
->fa_lock
, flags
);
835 fa
= rcu_dereference(fa
->fa_next
);
839 void kill_fasync(struct fasync_struct
**fp
, int sig
, int band
)
841 /* First a quick test without locking: usually
846 kill_fasync_rcu(rcu_dereference(*fp
), sig
, band
);
850 EXPORT_SYMBOL(kill_fasync
);
852 static int __init
fcntl_init(void)
855 * Please add new bits here to ensure allocation uniqueness.
856 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
857 * is defined as O_NONBLOCK on some platforms and not on others.
859 BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
860 O_RDONLY
| O_WRONLY
| O_RDWR
|
861 O_CREAT
| O_EXCL
| O_NOCTTY
|
862 O_TRUNC
| O_APPEND
| /* O_NONBLOCK | */
863 __O_SYNC
| O_DSYNC
| FASYNC
|
864 O_DIRECT
| O_LARGEFILE
| O_DIRECTORY
|
865 O_NOFOLLOW
| O_NOATIME
| O_CLOEXEC
|
866 __FMODE_EXEC
| O_PATH
869 fasync_cache
= kmem_cache_create("fasync_cache",
870 sizeof(struct fasync_struct
), 0, SLAB_PANIC
, NULL
);
874 module_init(fcntl_init
)