4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/export.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/gfp.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/version.h>
42 #include <linux/ctype.h>
44 #include <linux/compat.h>
45 #include <linux/syscalls.h>
46 #include <linux/kprobes.h>
47 #include <linux/user_namespace.h>
49 #include <linux/kmsg_dump.h>
50 /* Move somewhere else to avoid recompiling? */
51 #include <generated/utsrelease.h>
53 #include <asm/uaccess.h>
55 #include <asm/unistd.h>
57 #ifndef SET_UNALIGN_CTL
58 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
60 #ifndef GET_UNALIGN_CTL
61 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
64 # define SET_FPEMU_CTL(a,b) (-EINVAL)
67 # define GET_FPEMU_CTL(a,b) (-EINVAL)
70 # define SET_FPEXC_CTL(a,b) (-EINVAL)
73 # define GET_FPEXC_CTL(a,b) (-EINVAL)
76 # define GET_ENDIAN(a,b) (-EINVAL)
79 # define SET_ENDIAN(a,b) (-EINVAL)
82 # define GET_TSC_CTL(a) (-EINVAL)
85 # define SET_TSC_CTL(a) (-EINVAL)
89 * this is where the system-wide overflow UID and GID are defined, for
90 * architectures that now have 32-bit UID/GID but didn't in the past
93 int overflowuid
= DEFAULT_OVERFLOWUID
;
94 int overflowgid
= DEFAULT_OVERFLOWGID
;
96 EXPORT_SYMBOL(overflowuid
);
97 EXPORT_SYMBOL(overflowgid
);
100 * the same as above, but for filesystems which can only store a 16-bit
101 * UID and GID. as such, this is needed on all architectures
104 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
105 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
107 EXPORT_SYMBOL(fs_overflowuid
);
108 EXPORT_SYMBOL(fs_overflowgid
);
111 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
116 EXPORT_SYMBOL(cad_pid
);
119 * If set, this is used for preparing the system to power off.
122 void (*pm_power_off_prepare
)(void);
125 * Returns true if current's euid is same as p's uid or euid,
126 * or has CAP_SYS_NICE to p's user_ns.
128 * Called with rcu_read_lock, creds are safe
130 static bool set_one_prio_perm(struct task_struct
*p
)
132 const struct cred
*cred
= current_cred(), *pcred
= __task_cred(p
);
134 if (uid_eq(pcred
->uid
, cred
->euid
) ||
135 uid_eq(pcred
->euid
, cred
->euid
))
137 if (ns_capable(pcred
->user_ns
, CAP_SYS_NICE
))
143 * set the priority of a task
144 * - the caller must hold the RCU read lock
146 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
150 if (!set_one_prio_perm(p
)) {
154 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
158 no_nice
= security_task_setnice(p
, niceval
);
165 set_user_nice(p
, niceval
);
170 SYSCALL_DEFINE3(setpriority
, int, which
, int, who
, int, niceval
)
172 struct task_struct
*g
, *p
;
173 struct user_struct
*user
;
174 const struct cred
*cred
= current_cred();
179 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
182 /* normalize: avoid signed division (rounding problems) */
190 read_lock(&tasklist_lock
);
194 p
= find_task_by_vpid(who
);
198 error
= set_one_prio(p
, niceval
, error
);
202 pgrp
= find_vpid(who
);
204 pgrp
= task_pgrp(current
);
205 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
206 error
= set_one_prio(p
, niceval
, error
);
207 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
210 uid
= make_kuid(cred
->user_ns
, who
);
214 else if (!uid_eq(uid
, cred
->uid
) &&
215 !(user
= find_user(uid
)))
216 goto out_unlock
; /* No processes for this user */
218 do_each_thread(g
, p
) {
219 if (uid_eq(task_uid(p
), uid
))
220 error
= set_one_prio(p
, niceval
, error
);
221 } while_each_thread(g
, p
);
222 if (!uid_eq(uid
, cred
->uid
))
223 free_uid(user
); /* For find_user() */
227 read_unlock(&tasklist_lock
);
234 * Ugh. To avoid negative return values, "getpriority()" will
235 * not return the normal nice-value, but a negated value that
236 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
237 * to stay compatible.
239 SYSCALL_DEFINE2(getpriority
, int, which
, int, who
)
241 struct task_struct
*g
, *p
;
242 struct user_struct
*user
;
243 const struct cred
*cred
= current_cred();
244 long niceval
, retval
= -ESRCH
;
248 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
252 read_lock(&tasklist_lock
);
256 p
= find_task_by_vpid(who
);
260 niceval
= 20 - task_nice(p
);
261 if (niceval
> retval
)
267 pgrp
= find_vpid(who
);
269 pgrp
= task_pgrp(current
);
270 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
271 niceval
= 20 - task_nice(p
);
272 if (niceval
> retval
)
274 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
277 uid
= make_kuid(cred
->user_ns
, who
);
281 else if (!uid_eq(uid
, cred
->uid
) &&
282 !(user
= find_user(uid
)))
283 goto out_unlock
; /* No processes for this user */
285 do_each_thread(g
, p
) {
286 if (uid_eq(task_uid(p
), uid
)) {
287 niceval
= 20 - task_nice(p
);
288 if (niceval
> retval
)
291 } while_each_thread(g
, p
);
292 if (!uid_eq(uid
, cred
->uid
))
293 free_uid(user
); /* for find_user() */
297 read_unlock(&tasklist_lock
);
304 * emergency_restart - reboot the system
306 * Without shutting down any hardware or taking any locks
307 * reboot the system. This is called when we know we are in
308 * trouble so this is our best effort to reboot. This is
309 * safe to call in interrupt context.
311 void emergency_restart(void)
313 kmsg_dump(KMSG_DUMP_EMERG
);
314 machine_emergency_restart();
316 EXPORT_SYMBOL_GPL(emergency_restart
);
318 void kernel_restart_prepare(char *cmd
)
320 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
321 system_state
= SYSTEM_RESTART
;
322 usermodehelper_disable();
328 * register_reboot_notifier - Register function to be called at reboot time
329 * @nb: Info about notifier function to be called
331 * Registers a function with the list of functions
332 * to be called at reboot time.
334 * Currently always returns zero, as blocking_notifier_chain_register()
335 * always returns zero.
337 int register_reboot_notifier(struct notifier_block
*nb
)
339 return blocking_notifier_chain_register(&reboot_notifier_list
, nb
);
341 EXPORT_SYMBOL(register_reboot_notifier
);
344 * unregister_reboot_notifier - Unregister previously registered reboot notifier
345 * @nb: Hook to be unregistered
347 * Unregisters a previously registered reboot
350 * Returns zero on success, or %-ENOENT on failure.
352 int unregister_reboot_notifier(struct notifier_block
*nb
)
354 return blocking_notifier_chain_unregister(&reboot_notifier_list
, nb
);
356 EXPORT_SYMBOL(unregister_reboot_notifier
);
359 * kernel_restart - reboot the system
360 * @cmd: pointer to buffer containing command to execute for restart
363 * Shutdown everything and perform a clean reboot.
364 * This is not safe to call in interrupt context.
366 void kernel_restart(char *cmd
)
368 kernel_restart_prepare(cmd
);
370 printk(KERN_EMERG
"Restarting system.\n");
372 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
373 kmsg_dump(KMSG_DUMP_RESTART
);
374 machine_restart(cmd
);
376 EXPORT_SYMBOL_GPL(kernel_restart
);
378 static void kernel_shutdown_prepare(enum system_states state
)
380 blocking_notifier_call_chain(&reboot_notifier_list
,
381 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
382 system_state
= state
;
383 usermodehelper_disable();
387 * kernel_halt - halt the system
389 * Shutdown everything and perform a clean system halt.
391 void kernel_halt(void)
393 kernel_shutdown_prepare(SYSTEM_HALT
);
395 printk(KERN_EMERG
"System halted.\n");
396 kmsg_dump(KMSG_DUMP_HALT
);
400 EXPORT_SYMBOL_GPL(kernel_halt
);
403 * kernel_power_off - power_off the system
405 * Shutdown everything and perform a clean system power_off.
407 void kernel_power_off(void)
409 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
410 if (pm_power_off_prepare
)
411 pm_power_off_prepare();
412 disable_nonboot_cpus();
414 printk(KERN_EMERG
"Power down.\n");
415 kmsg_dump(KMSG_DUMP_POWEROFF
);
418 EXPORT_SYMBOL_GPL(kernel_power_off
);
420 static DEFINE_MUTEX(reboot_mutex
);
423 * Reboot system call: for obvious reasons only root may call it,
424 * and even root needs to set up some magic numbers in the registers
425 * so that some mistake won't make this reboot the whole machine.
426 * You can also set the meaning of the ctrl-alt-del-key here.
428 * reboot doesn't sync: do that yourself before calling this.
430 SYSCALL_DEFINE4(reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
436 /* We only trust the superuser with rebooting the system. */
437 if (!capable(CAP_SYS_BOOT
))
440 /* For safety, we require "magic" arguments. */
441 if (magic1
!= LINUX_REBOOT_MAGIC1
||
442 (magic2
!= LINUX_REBOOT_MAGIC2
&&
443 magic2
!= LINUX_REBOOT_MAGIC2A
&&
444 magic2
!= LINUX_REBOOT_MAGIC2B
&&
445 magic2
!= LINUX_REBOOT_MAGIC2C
))
449 * If pid namespaces are enabled and the current task is in a child
450 * pid_namespace, the command is handled by reboot_pid_ns() which will
453 ret
= reboot_pid_ns(task_active_pid_ns(current
), cmd
);
457 /* Instead of trying to make the power_off code look like
458 * halt when pm_power_off is not set do it the easy way.
460 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
461 cmd
= LINUX_REBOOT_CMD_HALT
;
463 mutex_lock(&reboot_mutex
);
465 case LINUX_REBOOT_CMD_RESTART
:
466 kernel_restart(NULL
);
469 case LINUX_REBOOT_CMD_CAD_ON
:
473 case LINUX_REBOOT_CMD_CAD_OFF
:
477 case LINUX_REBOOT_CMD_HALT
:
480 panic("cannot halt");
482 case LINUX_REBOOT_CMD_POWER_OFF
:
487 case LINUX_REBOOT_CMD_RESTART2
:
488 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
492 buffer
[sizeof(buffer
) - 1] = '\0';
494 kernel_restart(buffer
);
498 case LINUX_REBOOT_CMD_KEXEC
:
499 ret
= kernel_kexec();
503 #ifdef CONFIG_HIBERNATION
504 case LINUX_REBOOT_CMD_SW_SUSPEND
:
513 mutex_unlock(&reboot_mutex
);
517 static void deferred_cad(struct work_struct
*dummy
)
519 kernel_restart(NULL
);
523 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
524 * As it's called within an interrupt, it may NOT sync: the only choice
525 * is whether to reboot at once, or just ignore the ctrl-alt-del.
527 void ctrl_alt_del(void)
529 static DECLARE_WORK(cad_work
, deferred_cad
);
532 schedule_work(&cad_work
);
534 kill_cad_pid(SIGINT
, 1);
538 * Unprivileged users may change the real gid to the effective gid
539 * or vice versa. (BSD-style)
541 * If you set the real gid at all, or set the effective gid to a value not
542 * equal to the real gid, then the saved gid is set to the new effective gid.
544 * This makes it possible for a setgid program to completely drop its
545 * privileges, which is often a useful assertion to make when you are doing
546 * a security audit over a program.
548 * The general idea is that a program which uses just setregid() will be
549 * 100% compatible with BSD. A program which uses just setgid() will be
550 * 100% compatible with POSIX with saved IDs.
552 * SMP: There are not races, the GIDs are checked only by filesystem
553 * operations (as far as semantic preservation is concerned).
555 SYSCALL_DEFINE2(setregid
, gid_t
, rgid
, gid_t
, egid
)
557 struct user_namespace
*ns
= current_user_ns();
558 const struct cred
*old
;
563 krgid
= make_kgid(ns
, rgid
);
564 kegid
= make_kgid(ns
, egid
);
566 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
568 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
571 new = prepare_creds();
574 old
= current_cred();
577 if (rgid
!= (gid_t
) -1) {
578 if (gid_eq(old
->gid
, krgid
) ||
579 gid_eq(old
->egid
, krgid
) ||
580 nsown_capable(CAP_SETGID
))
585 if (egid
!= (gid_t
) -1) {
586 if (gid_eq(old
->gid
, kegid
) ||
587 gid_eq(old
->egid
, kegid
) ||
588 gid_eq(old
->sgid
, kegid
) ||
589 nsown_capable(CAP_SETGID
))
595 if (rgid
!= (gid_t
) -1 ||
596 (egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
)))
597 new->sgid
= new->egid
;
598 new->fsgid
= new->egid
;
600 return commit_creds(new);
608 * setgid() is implemented like SysV w/ SAVED_IDS
610 * SMP: Same implicit races as above.
612 SYSCALL_DEFINE1(setgid
, gid_t
, gid
)
614 struct user_namespace
*ns
= current_user_ns();
615 const struct cred
*old
;
620 kgid
= make_kgid(ns
, gid
);
621 if (!gid_valid(kgid
))
624 new = prepare_creds();
627 old
= current_cred();
630 if (nsown_capable(CAP_SETGID
))
631 new->gid
= new->egid
= new->sgid
= new->fsgid
= kgid
;
632 else if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->sgid
))
633 new->egid
= new->fsgid
= kgid
;
637 return commit_creds(new);
645 * change the user struct in a credentials set to match the new UID
647 static int set_user(struct cred
*new)
649 struct user_struct
*new_user
;
651 new_user
= alloc_uid(new->uid
);
656 * We don't fail in case of NPROC limit excess here because too many
657 * poorly written programs don't check set*uid() return code, assuming
658 * it never fails if called by root. We may still enforce NPROC limit
659 * for programs doing set*uid()+execve() by harmlessly deferring the
660 * failure to the execve() stage.
662 if (atomic_read(&new_user
->processes
) >= rlimit(RLIMIT_NPROC
) &&
663 new_user
!= INIT_USER
)
664 current
->flags
|= PF_NPROC_EXCEEDED
;
666 current
->flags
&= ~PF_NPROC_EXCEEDED
;
669 new->user
= new_user
;
674 * Unprivileged users may change the real uid to the effective uid
675 * or vice versa. (BSD-style)
677 * If you set the real uid at all, or set the effective uid to a value not
678 * equal to the real uid, then the saved uid is set to the new effective uid.
680 * This makes it possible for a setuid program to completely drop its
681 * privileges, which is often a useful assertion to make when you are doing
682 * a security audit over a program.
684 * The general idea is that a program which uses just setreuid() will be
685 * 100% compatible with BSD. A program which uses just setuid() will be
686 * 100% compatible with POSIX with saved IDs.
688 SYSCALL_DEFINE2(setreuid
, uid_t
, ruid
, uid_t
, euid
)
690 struct user_namespace
*ns
= current_user_ns();
691 const struct cred
*old
;
696 kruid
= make_kuid(ns
, ruid
);
697 keuid
= make_kuid(ns
, euid
);
699 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
701 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
704 new = prepare_creds();
707 old
= current_cred();
710 if (ruid
!= (uid_t
) -1) {
712 if (!uid_eq(old
->uid
, kruid
) &&
713 !uid_eq(old
->euid
, kruid
) &&
714 !nsown_capable(CAP_SETUID
))
718 if (euid
!= (uid_t
) -1) {
720 if (!uid_eq(old
->uid
, keuid
) &&
721 !uid_eq(old
->euid
, keuid
) &&
722 !uid_eq(old
->suid
, keuid
) &&
723 !nsown_capable(CAP_SETUID
))
727 if (!uid_eq(new->uid
, old
->uid
)) {
728 retval
= set_user(new);
732 if (ruid
!= (uid_t
) -1 ||
733 (euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
)))
734 new->suid
= new->euid
;
735 new->fsuid
= new->euid
;
737 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RE
);
741 return commit_creds(new);
749 * setuid() is implemented like SysV with SAVED_IDS
751 * Note that SAVED_ID's is deficient in that a setuid root program
752 * like sendmail, for example, cannot set its uid to be a normal
753 * user and then switch back, because if you're root, setuid() sets
754 * the saved uid too. If you don't like this, blame the bright people
755 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
756 * will allow a root program to temporarily drop privileges and be able to
757 * regain them by swapping the real and effective uid.
759 SYSCALL_DEFINE1(setuid
, uid_t
, uid
)
761 struct user_namespace
*ns
= current_user_ns();
762 const struct cred
*old
;
767 kuid
= make_kuid(ns
, uid
);
768 if (!uid_valid(kuid
))
771 new = prepare_creds();
774 old
= current_cred();
777 if (nsown_capable(CAP_SETUID
)) {
778 new->suid
= new->uid
= kuid
;
779 if (!uid_eq(kuid
, old
->uid
)) {
780 retval
= set_user(new);
784 } else if (!uid_eq(kuid
, old
->uid
) && !uid_eq(kuid
, new->suid
)) {
788 new->fsuid
= new->euid
= kuid
;
790 retval
= security_task_fix_setuid(new, old
, LSM_SETID_ID
);
794 return commit_creds(new);
803 * This function implements a generic ability to update ruid, euid,
804 * and suid. This allows you to implement the 4.4 compatible seteuid().
806 SYSCALL_DEFINE3(setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
808 struct user_namespace
*ns
= current_user_ns();
809 const struct cred
*old
;
812 kuid_t kruid
, keuid
, ksuid
;
814 kruid
= make_kuid(ns
, ruid
);
815 keuid
= make_kuid(ns
, euid
);
816 ksuid
= make_kuid(ns
, suid
);
818 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
821 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
824 if ((suid
!= (uid_t
) -1) && !uid_valid(ksuid
))
827 new = prepare_creds();
831 old
= current_cred();
834 if (!nsown_capable(CAP_SETUID
)) {
835 if (ruid
!= (uid_t
) -1 && !uid_eq(kruid
, old
->uid
) &&
836 !uid_eq(kruid
, old
->euid
) && !uid_eq(kruid
, old
->suid
))
838 if (euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
) &&
839 !uid_eq(keuid
, old
->euid
) && !uid_eq(keuid
, old
->suid
))
841 if (suid
!= (uid_t
) -1 && !uid_eq(ksuid
, old
->uid
) &&
842 !uid_eq(ksuid
, old
->euid
) && !uid_eq(ksuid
, old
->suid
))
846 if (ruid
!= (uid_t
) -1) {
848 if (!uid_eq(kruid
, old
->uid
)) {
849 retval
= set_user(new);
854 if (euid
!= (uid_t
) -1)
856 if (suid
!= (uid_t
) -1)
858 new->fsuid
= new->euid
;
860 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RES
);
864 return commit_creds(new);
871 SYSCALL_DEFINE3(getresuid
, uid_t __user
*, ruidp
, uid_t __user
*, euidp
, uid_t __user
*, suidp
)
873 const struct cred
*cred
= current_cred();
875 uid_t ruid
, euid
, suid
;
877 ruid
= from_kuid_munged(cred
->user_ns
, cred
->uid
);
878 euid
= from_kuid_munged(cred
->user_ns
, cred
->euid
);
879 suid
= from_kuid_munged(cred
->user_ns
, cred
->suid
);
881 if (!(retval
= put_user(ruid
, ruidp
)) &&
882 !(retval
= put_user(euid
, euidp
)))
883 retval
= put_user(suid
, suidp
);
889 * Same as above, but for rgid, egid, sgid.
891 SYSCALL_DEFINE3(setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
893 struct user_namespace
*ns
= current_user_ns();
894 const struct cred
*old
;
897 kgid_t krgid
, kegid
, ksgid
;
899 krgid
= make_kgid(ns
, rgid
);
900 kegid
= make_kgid(ns
, egid
);
901 ksgid
= make_kgid(ns
, sgid
);
903 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
905 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
907 if ((sgid
!= (gid_t
) -1) && !gid_valid(ksgid
))
910 new = prepare_creds();
913 old
= current_cred();
916 if (!nsown_capable(CAP_SETGID
)) {
917 if (rgid
!= (gid_t
) -1 && !gid_eq(krgid
, old
->gid
) &&
918 !gid_eq(krgid
, old
->egid
) && !gid_eq(krgid
, old
->sgid
))
920 if (egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
) &&
921 !gid_eq(kegid
, old
->egid
) && !gid_eq(kegid
, old
->sgid
))
923 if (sgid
!= (gid_t
) -1 && !gid_eq(ksgid
, old
->gid
) &&
924 !gid_eq(ksgid
, old
->egid
) && !gid_eq(ksgid
, old
->sgid
))
928 if (rgid
!= (gid_t
) -1)
930 if (egid
!= (gid_t
) -1)
932 if (sgid
!= (gid_t
) -1)
934 new->fsgid
= new->egid
;
936 return commit_creds(new);
943 SYSCALL_DEFINE3(getresgid
, gid_t __user
*, rgidp
, gid_t __user
*, egidp
, gid_t __user
*, sgidp
)
945 const struct cred
*cred
= current_cred();
947 gid_t rgid
, egid
, sgid
;
949 rgid
= from_kgid_munged(cred
->user_ns
, cred
->gid
);
950 egid
= from_kgid_munged(cred
->user_ns
, cred
->egid
);
951 sgid
= from_kgid_munged(cred
->user_ns
, cred
->sgid
);
953 if (!(retval
= put_user(rgid
, rgidp
)) &&
954 !(retval
= put_user(egid
, egidp
)))
955 retval
= put_user(sgid
, sgidp
);
962 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
963 * is used for "access()" and for the NFS daemon (letting nfsd stay at
964 * whatever uid it wants to). It normally shadows "euid", except when
965 * explicitly set by setfsuid() or for access..
967 SYSCALL_DEFINE1(setfsuid
, uid_t
, uid
)
969 const struct cred
*old
;
974 old
= current_cred();
975 old_fsuid
= from_kuid_munged(old
->user_ns
, old
->fsuid
);
977 kuid
= make_kuid(old
->user_ns
, uid
);
978 if (!uid_valid(kuid
))
981 new = prepare_creds();
985 if (uid_eq(kuid
, old
->uid
) || uid_eq(kuid
, old
->euid
) ||
986 uid_eq(kuid
, old
->suid
) || uid_eq(kuid
, old
->fsuid
) ||
987 nsown_capable(CAP_SETUID
)) {
988 if (!uid_eq(kuid
, old
->fsuid
)) {
990 if (security_task_fix_setuid(new, old
, LSM_SETID_FS
) == 0)
1004 * Samma på svenska..
1006 SYSCALL_DEFINE1(setfsgid
, gid_t
, gid
)
1008 const struct cred
*old
;
1013 old
= current_cred();
1014 old_fsgid
= from_kgid_munged(old
->user_ns
, old
->fsgid
);
1016 kgid
= make_kgid(old
->user_ns
, gid
);
1017 if (!gid_valid(kgid
))
1020 new = prepare_creds();
1024 if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->egid
) ||
1025 gid_eq(kgid
, old
->sgid
) || gid_eq(kgid
, old
->fsgid
) ||
1026 nsown_capable(CAP_SETGID
)) {
1027 if (!gid_eq(kgid
, old
->fsgid
)) {
1041 void do_sys_times(struct tms
*tms
)
1043 cputime_t tgutime
, tgstime
, cutime
, cstime
;
1045 spin_lock_irq(¤t
->sighand
->siglock
);
1046 thread_group_times(current
, &tgutime
, &tgstime
);
1047 cutime
= current
->signal
->cutime
;
1048 cstime
= current
->signal
->cstime
;
1049 spin_unlock_irq(¤t
->sighand
->siglock
);
1050 tms
->tms_utime
= cputime_to_clock_t(tgutime
);
1051 tms
->tms_stime
= cputime_to_clock_t(tgstime
);
1052 tms
->tms_cutime
= cputime_to_clock_t(cutime
);
1053 tms
->tms_cstime
= cputime_to_clock_t(cstime
);
1056 SYSCALL_DEFINE1(times
, struct tms __user
*, tbuf
)
1062 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1065 force_successful_syscall_return();
1066 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1070 * This needs some heavy checking ...
1071 * I just haven't the stomach for it. I also don't fully
1072 * understand sessions/pgrp etc. Let somebody who does explain it.
1074 * OK, I think I have the protection semantics right.... this is really
1075 * only important on a multi-user system anyway, to make sure one user
1076 * can't send a signal to a process owned by another. -TYT, 12/12/91
1078 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1081 SYSCALL_DEFINE2(setpgid
, pid_t
, pid
, pid_t
, pgid
)
1083 struct task_struct
*p
;
1084 struct task_struct
*group_leader
= current
->group_leader
;
1089 pid
= task_pid_vnr(group_leader
);
1096 /* From this point forward we keep holding onto the tasklist lock
1097 * so that our parent does not change from under us. -DaveM
1099 write_lock_irq(&tasklist_lock
);
1102 p
= find_task_by_vpid(pid
);
1107 if (!thread_group_leader(p
))
1110 if (same_thread_group(p
->real_parent
, group_leader
)) {
1112 if (task_session(p
) != task_session(group_leader
))
1119 if (p
!= group_leader
)
1124 if (p
->signal
->leader
)
1129 struct task_struct
*g
;
1131 pgrp
= find_vpid(pgid
);
1132 g
= pid_task(pgrp
, PIDTYPE_PGID
);
1133 if (!g
|| task_session(g
) != task_session(group_leader
))
1137 err
= security_task_setpgid(p
, pgid
);
1141 if (task_pgrp(p
) != pgrp
)
1142 change_pid(p
, PIDTYPE_PGID
, pgrp
);
1146 /* All paths lead to here, thus we are safe. -DaveM */
1147 write_unlock_irq(&tasklist_lock
);
1152 SYSCALL_DEFINE1(getpgid
, pid_t
, pid
)
1154 struct task_struct
*p
;
1160 grp
= task_pgrp(current
);
1163 p
= find_task_by_vpid(pid
);
1170 retval
= security_task_getpgid(p
);
1174 retval
= pid_vnr(grp
);
1180 #ifdef __ARCH_WANT_SYS_GETPGRP
1182 SYSCALL_DEFINE0(getpgrp
)
1184 return sys_getpgid(0);
1189 SYSCALL_DEFINE1(getsid
, pid_t
, pid
)
1191 struct task_struct
*p
;
1197 sid
= task_session(current
);
1200 p
= find_task_by_vpid(pid
);
1203 sid
= task_session(p
);
1207 retval
= security_task_getsid(p
);
1211 retval
= pid_vnr(sid
);
1217 SYSCALL_DEFINE0(setsid
)
1219 struct task_struct
*group_leader
= current
->group_leader
;
1220 struct pid
*sid
= task_pid(group_leader
);
1221 pid_t session
= pid_vnr(sid
);
1224 write_lock_irq(&tasklist_lock
);
1225 /* Fail if I am already a session leader */
1226 if (group_leader
->signal
->leader
)
1229 /* Fail if a process group id already exists that equals the
1230 * proposed session id.
1232 if (pid_task(sid
, PIDTYPE_PGID
))
1235 group_leader
->signal
->leader
= 1;
1236 __set_special_pids(sid
);
1238 proc_clear_tty(group_leader
);
1242 write_unlock_irq(&tasklist_lock
);
1244 proc_sid_connector(group_leader
);
1245 sched_autogroup_create_attach(group_leader
);
1250 DECLARE_RWSEM(uts_sem
);
1252 #ifdef COMPAT_UTS_MACHINE
1253 #define override_architecture(name) \
1254 (personality(current->personality) == PER_LINUX32 && \
1255 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1256 sizeof(COMPAT_UTS_MACHINE)))
1258 #define override_architecture(name) 0
1262 * Work around broken programs that cannot handle "Linux 3.0".
1263 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1265 static int override_release(char __user
*release
, int len
)
1270 if (current
->personality
& UNAME26
) {
1271 char *rest
= UTS_RELEASE
;
1276 if (*rest
== '.' && ++ndots
>= 3)
1278 if (!isdigit(*rest
) && *rest
!= '.')
1282 v
= ((LINUX_VERSION_CODE
>> 8) & 0xff) + 40;
1283 snprintf(buf
, len
, "2.6.%u%s", v
, rest
);
1284 ret
= copy_to_user(release
, buf
, len
);
1289 SYSCALL_DEFINE1(newuname
, struct new_utsname __user
*, name
)
1293 down_read(&uts_sem
);
1294 if (copy_to_user(name
, utsname(), sizeof *name
))
1298 if (!errno
&& override_release(name
->release
, sizeof(name
->release
)))
1300 if (!errno
&& override_architecture(name
))
1305 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1309 SYSCALL_DEFINE1(uname
, struct old_utsname __user
*, name
)
1316 down_read(&uts_sem
);
1317 if (copy_to_user(name
, utsname(), sizeof(*name
)))
1321 if (!error
&& override_release(name
->release
, sizeof(name
->release
)))
1323 if (!error
&& override_architecture(name
))
1328 SYSCALL_DEFINE1(olduname
, struct oldold_utsname __user
*, name
)
1334 if (!access_ok(VERIFY_WRITE
, name
, sizeof(struct oldold_utsname
)))
1337 down_read(&uts_sem
);
1338 error
= __copy_to_user(&name
->sysname
, &utsname()->sysname
,
1340 error
|= __put_user(0, name
->sysname
+ __OLD_UTS_LEN
);
1341 error
|= __copy_to_user(&name
->nodename
, &utsname()->nodename
,
1343 error
|= __put_user(0, name
->nodename
+ __OLD_UTS_LEN
);
1344 error
|= __copy_to_user(&name
->release
, &utsname()->release
,
1346 error
|= __put_user(0, name
->release
+ __OLD_UTS_LEN
);
1347 error
|= __copy_to_user(&name
->version
, &utsname()->version
,
1349 error
|= __put_user(0, name
->version
+ __OLD_UTS_LEN
);
1350 error
|= __copy_to_user(&name
->machine
, &utsname()->machine
,
1352 error
|= __put_user(0, name
->machine
+ __OLD_UTS_LEN
);
1355 if (!error
&& override_architecture(name
))
1357 if (!error
&& override_release(name
->release
, sizeof(name
->release
)))
1359 return error
? -EFAULT
: 0;
1363 SYSCALL_DEFINE2(sethostname
, char __user
*, name
, int, len
)
1366 char tmp
[__NEW_UTS_LEN
];
1368 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1371 if (len
< 0 || len
> __NEW_UTS_LEN
)
1373 down_write(&uts_sem
);
1375 if (!copy_from_user(tmp
, name
, len
)) {
1376 struct new_utsname
*u
= utsname();
1378 memcpy(u
->nodename
, tmp
, len
);
1379 memset(u
->nodename
+ len
, 0, sizeof(u
->nodename
) - len
);
1382 uts_proc_notify(UTS_PROC_HOSTNAME
);
1387 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1389 SYSCALL_DEFINE2(gethostname
, char __user
*, name
, int, len
)
1392 struct new_utsname
*u
;
1396 down_read(&uts_sem
);
1398 i
= 1 + strlen(u
->nodename
);
1402 if (copy_to_user(name
, u
->nodename
, i
))
1411 * Only setdomainname; getdomainname can be implemented by calling
1414 SYSCALL_DEFINE2(setdomainname
, char __user
*, name
, int, len
)
1417 char tmp
[__NEW_UTS_LEN
];
1419 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1421 if (len
< 0 || len
> __NEW_UTS_LEN
)
1424 down_write(&uts_sem
);
1426 if (!copy_from_user(tmp
, name
, len
)) {
1427 struct new_utsname
*u
= utsname();
1429 memcpy(u
->domainname
, tmp
, len
);
1430 memset(u
->domainname
+ len
, 0, sizeof(u
->domainname
) - len
);
1433 uts_proc_notify(UTS_PROC_DOMAINNAME
);
1438 SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1440 struct rlimit value
;
1443 ret
= do_prlimit(current
, resource
, NULL
, &value
);
1445 ret
= copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1450 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1453 * Back compatibility for getrlimit. Needed for some apps.
1456 SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
1457 struct rlimit __user
*, rlim
)
1460 if (resource
>= RLIM_NLIMITS
)
1463 task_lock(current
->group_leader
);
1464 x
= current
->signal
->rlim
[resource
];
1465 task_unlock(current
->group_leader
);
1466 if (x
.rlim_cur
> 0x7FFFFFFF)
1467 x
.rlim_cur
= 0x7FFFFFFF;
1468 if (x
.rlim_max
> 0x7FFFFFFF)
1469 x
.rlim_max
= 0x7FFFFFFF;
1470 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1475 static inline bool rlim64_is_infinity(__u64 rlim64
)
1477 #if BITS_PER_LONG < 64
1478 return rlim64
>= ULONG_MAX
;
1480 return rlim64
== RLIM64_INFINITY
;
1484 static void rlim_to_rlim64(const struct rlimit
*rlim
, struct rlimit64
*rlim64
)
1486 if (rlim
->rlim_cur
== RLIM_INFINITY
)
1487 rlim64
->rlim_cur
= RLIM64_INFINITY
;
1489 rlim64
->rlim_cur
= rlim
->rlim_cur
;
1490 if (rlim
->rlim_max
== RLIM_INFINITY
)
1491 rlim64
->rlim_max
= RLIM64_INFINITY
;
1493 rlim64
->rlim_max
= rlim
->rlim_max
;
1496 static void rlim64_to_rlim(const struct rlimit64
*rlim64
, struct rlimit
*rlim
)
1498 if (rlim64_is_infinity(rlim64
->rlim_cur
))
1499 rlim
->rlim_cur
= RLIM_INFINITY
;
1501 rlim
->rlim_cur
= (unsigned long)rlim64
->rlim_cur
;
1502 if (rlim64_is_infinity(rlim64
->rlim_max
))
1503 rlim
->rlim_max
= RLIM_INFINITY
;
1505 rlim
->rlim_max
= (unsigned long)rlim64
->rlim_max
;
1508 /* make sure you are allowed to change @tsk limits before calling this */
1509 int do_prlimit(struct task_struct
*tsk
, unsigned int resource
,
1510 struct rlimit
*new_rlim
, struct rlimit
*old_rlim
)
1512 struct rlimit
*rlim
;
1515 if (resource
>= RLIM_NLIMITS
)
1518 if (new_rlim
->rlim_cur
> new_rlim
->rlim_max
)
1520 if (resource
== RLIMIT_NOFILE
&&
1521 new_rlim
->rlim_max
> sysctl_nr_open
)
1525 /* protect tsk->signal and tsk->sighand from disappearing */
1526 read_lock(&tasklist_lock
);
1527 if (!tsk
->sighand
) {
1532 rlim
= tsk
->signal
->rlim
+ resource
;
1533 task_lock(tsk
->group_leader
);
1535 /* Keep the capable check against init_user_ns until
1536 cgroups can contain all limits */
1537 if (new_rlim
->rlim_max
> rlim
->rlim_max
&&
1538 !capable(CAP_SYS_RESOURCE
))
1541 retval
= security_task_setrlimit(tsk
->group_leader
,
1542 resource
, new_rlim
);
1543 if (resource
== RLIMIT_CPU
&& new_rlim
->rlim_cur
== 0) {
1545 * The caller is asking for an immediate RLIMIT_CPU
1546 * expiry. But we use the zero value to mean "it was
1547 * never set". So let's cheat and make it one second
1550 new_rlim
->rlim_cur
= 1;
1559 task_unlock(tsk
->group_leader
);
1562 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1563 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1564 * very long-standing error, and fixing it now risks breakage of
1565 * applications, so we live with it
1567 if (!retval
&& new_rlim
&& resource
== RLIMIT_CPU
&&
1568 new_rlim
->rlim_cur
!= RLIM_INFINITY
)
1569 update_rlimit_cpu(tsk
, new_rlim
->rlim_cur
);
1571 read_unlock(&tasklist_lock
);
1575 /* rcu lock must be held */
1576 static int check_prlimit_permission(struct task_struct
*task
)
1578 const struct cred
*cred
= current_cred(), *tcred
;
1580 if (current
== task
)
1583 tcred
= __task_cred(task
);
1584 if (uid_eq(cred
->uid
, tcred
->euid
) &&
1585 uid_eq(cred
->uid
, tcred
->suid
) &&
1586 uid_eq(cred
->uid
, tcred
->uid
) &&
1587 gid_eq(cred
->gid
, tcred
->egid
) &&
1588 gid_eq(cred
->gid
, tcred
->sgid
) &&
1589 gid_eq(cred
->gid
, tcred
->gid
))
1591 if (ns_capable(tcred
->user_ns
, CAP_SYS_RESOURCE
))
1597 SYSCALL_DEFINE4(prlimit64
, pid_t
, pid
, unsigned int, resource
,
1598 const struct rlimit64 __user
*, new_rlim
,
1599 struct rlimit64 __user
*, old_rlim
)
1601 struct rlimit64 old64
, new64
;
1602 struct rlimit old
, new;
1603 struct task_struct
*tsk
;
1607 if (copy_from_user(&new64
, new_rlim
, sizeof(new64
)))
1609 rlim64_to_rlim(&new64
, &new);
1613 tsk
= pid
? find_task_by_vpid(pid
) : current
;
1618 ret
= check_prlimit_permission(tsk
);
1623 get_task_struct(tsk
);
1626 ret
= do_prlimit(tsk
, resource
, new_rlim
? &new : NULL
,
1627 old_rlim
? &old
: NULL
);
1629 if (!ret
&& old_rlim
) {
1630 rlim_to_rlim64(&old
, &old64
);
1631 if (copy_to_user(old_rlim
, &old64
, sizeof(old64
)))
1635 put_task_struct(tsk
);
1639 SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1641 struct rlimit new_rlim
;
1643 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1645 return do_prlimit(current
, resource
, &new_rlim
, NULL
);
1649 * It would make sense to put struct rusage in the task_struct,
1650 * except that would make the task_struct be *really big*. After
1651 * task_struct gets moved into malloc'ed memory, it would
1652 * make sense to do this. It will make moving the rest of the information
1653 * a lot simpler! (Which we're not doing right now because we're not
1654 * measuring them yet).
1656 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1657 * races with threads incrementing their own counters. But since word
1658 * reads are atomic, we either get new values or old values and we don't
1659 * care which for the sums. We always take the siglock to protect reading
1660 * the c* fields from p->signal from races with exit.c updating those
1661 * fields when reaping, so a sample either gets all the additions of a
1662 * given child after it's reaped, or none so this sample is before reaping.
1665 * We need to take the siglock for CHILDEREN, SELF and BOTH
1666 * for the cases current multithreaded, non-current single threaded
1667 * non-current multithreaded. Thread traversal is now safe with
1669 * Strictly speaking, we donot need to take the siglock if we are current and
1670 * single threaded, as no one else can take our signal_struct away, no one
1671 * else can reap the children to update signal->c* counters, and no one else
1672 * can race with the signal-> fields. If we do not take any lock, the
1673 * signal-> fields could be read out of order while another thread was just
1674 * exiting. So we should place a read memory barrier when we avoid the lock.
1675 * On the writer side, write memory barrier is implied in __exit_signal
1676 * as __exit_signal releases the siglock spinlock after updating the signal->
1677 * fields. But we don't do this yet to keep things simple.
1681 static void accumulate_thread_rusage(struct task_struct
*t
, struct rusage
*r
)
1683 r
->ru_nvcsw
+= t
->nvcsw
;
1684 r
->ru_nivcsw
+= t
->nivcsw
;
1685 r
->ru_minflt
+= t
->min_flt
;
1686 r
->ru_majflt
+= t
->maj_flt
;
1687 r
->ru_inblock
+= task_io_get_inblock(t
);
1688 r
->ru_oublock
+= task_io_get_oublock(t
);
1691 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1693 struct task_struct
*t
;
1694 unsigned long flags
;
1695 cputime_t tgutime
, tgstime
, utime
, stime
;
1696 unsigned long maxrss
= 0;
1698 memset((char *) r
, 0, sizeof *r
);
1701 if (who
== RUSAGE_THREAD
) {
1702 task_times(current
, &utime
, &stime
);
1703 accumulate_thread_rusage(p
, r
);
1704 maxrss
= p
->signal
->maxrss
;
1708 if (!lock_task_sighand(p
, &flags
))
1713 case RUSAGE_CHILDREN
:
1714 utime
= p
->signal
->cutime
;
1715 stime
= p
->signal
->cstime
;
1716 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1717 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1718 r
->ru_minflt
= p
->signal
->cmin_flt
;
1719 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1720 r
->ru_inblock
= p
->signal
->cinblock
;
1721 r
->ru_oublock
= p
->signal
->coublock
;
1722 maxrss
= p
->signal
->cmaxrss
;
1724 if (who
== RUSAGE_CHILDREN
)
1728 thread_group_times(p
, &tgutime
, &tgstime
);
1731 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1732 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1733 r
->ru_minflt
+= p
->signal
->min_flt
;
1734 r
->ru_majflt
+= p
->signal
->maj_flt
;
1735 r
->ru_inblock
+= p
->signal
->inblock
;
1736 r
->ru_oublock
+= p
->signal
->oublock
;
1737 if (maxrss
< p
->signal
->maxrss
)
1738 maxrss
= p
->signal
->maxrss
;
1741 accumulate_thread_rusage(t
, r
);
1749 unlock_task_sighand(p
, &flags
);
1752 cputime_to_timeval(utime
, &r
->ru_utime
);
1753 cputime_to_timeval(stime
, &r
->ru_stime
);
1755 if (who
!= RUSAGE_CHILDREN
) {
1756 struct mm_struct
*mm
= get_task_mm(p
);
1758 setmax_mm_hiwater_rss(&maxrss
, mm
);
1762 r
->ru_maxrss
= maxrss
* (PAGE_SIZE
/ 1024); /* convert pages to KBs */
1765 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
1768 k_getrusage(p
, who
, &r
);
1769 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1772 SYSCALL_DEFINE2(getrusage
, int, who
, struct rusage __user
*, ru
)
1774 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1775 who
!= RUSAGE_THREAD
)
1777 return getrusage(current
, who
, ru
);
1780 SYSCALL_DEFINE1(umask
, int, mask
)
1782 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1786 #ifdef CONFIG_CHECKPOINT_RESTORE
1787 static int prctl_set_mm(int opt
, unsigned long addr
,
1788 unsigned long arg4
, unsigned long arg5
)
1790 unsigned long rlim
= rlimit(RLIMIT_DATA
);
1791 unsigned long vm_req_flags
;
1792 unsigned long vm_bad_flags
;
1793 struct vm_area_struct
*vma
;
1795 struct mm_struct
*mm
= current
->mm
;
1800 if (!capable(CAP_SYS_RESOURCE
))
1803 if (addr
>= TASK_SIZE
)
1806 down_read(&mm
->mmap_sem
);
1807 vma
= find_vma(mm
, addr
);
1809 if (opt
!= PR_SET_MM_START_BRK
&& opt
!= PR_SET_MM_BRK
) {
1810 /* It must be existing VMA */
1811 if (!vma
|| vma
->vm_start
> addr
)
1817 case PR_SET_MM_START_CODE
:
1818 case PR_SET_MM_END_CODE
:
1819 vm_req_flags
= VM_READ
| VM_EXEC
;
1820 vm_bad_flags
= VM_WRITE
| VM_MAYSHARE
;
1822 if ((vma
->vm_flags
& vm_req_flags
) != vm_req_flags
||
1823 (vma
->vm_flags
& vm_bad_flags
))
1826 if (opt
== PR_SET_MM_START_CODE
)
1827 mm
->start_code
= addr
;
1829 mm
->end_code
= addr
;
1832 case PR_SET_MM_START_DATA
:
1833 case PR_SET_MM_END_DATA
:
1834 vm_req_flags
= VM_READ
| VM_WRITE
;
1835 vm_bad_flags
= VM_EXEC
| VM_MAYSHARE
;
1837 if ((vma
->vm_flags
& vm_req_flags
) != vm_req_flags
||
1838 (vma
->vm_flags
& vm_bad_flags
))
1841 if (opt
== PR_SET_MM_START_DATA
)
1842 mm
->start_data
= addr
;
1844 mm
->end_data
= addr
;
1847 case PR_SET_MM_START_STACK
:
1849 #ifdef CONFIG_STACK_GROWSUP
1850 vm_req_flags
= VM_READ
| VM_WRITE
| VM_GROWSUP
;
1852 vm_req_flags
= VM_READ
| VM_WRITE
| VM_GROWSDOWN
;
1854 if ((vma
->vm_flags
& vm_req_flags
) != vm_req_flags
)
1857 mm
->start_stack
= addr
;
1860 case PR_SET_MM_START_BRK
:
1861 if (addr
<= mm
->end_data
)
1864 if (rlim
< RLIM_INFINITY
&&
1866 (mm
->end_data
- mm
->start_data
) > rlim
)
1869 mm
->start_brk
= addr
;
1873 if (addr
<= mm
->end_data
)
1876 if (rlim
< RLIM_INFINITY
&&
1877 (addr
- mm
->start_brk
) +
1878 (mm
->end_data
- mm
->start_data
) > rlim
)
1892 up_read(&mm
->mmap_sem
);
1896 #else /* CONFIG_CHECKPOINT_RESTORE */
1897 static int prctl_set_mm(int opt
, unsigned long addr
,
1898 unsigned long arg4
, unsigned long arg5
)
1904 SYSCALL_DEFINE5(prctl
, int, option
, unsigned long, arg2
, unsigned long, arg3
,
1905 unsigned long, arg4
, unsigned long, arg5
)
1907 struct task_struct
*me
= current
;
1908 unsigned char comm
[sizeof(me
->comm
)];
1911 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
1912 if (error
!= -ENOSYS
)
1917 case PR_SET_PDEATHSIG
:
1918 if (!valid_signal(arg2
)) {
1922 me
->pdeath_signal
= arg2
;
1925 case PR_GET_PDEATHSIG
:
1926 error
= put_user(me
->pdeath_signal
, (int __user
*)arg2
);
1928 case PR_GET_DUMPABLE
:
1929 error
= get_dumpable(me
->mm
);
1931 case PR_SET_DUMPABLE
:
1932 if (arg2
< 0 || arg2
> 1) {
1936 set_dumpable(me
->mm
, arg2
);
1940 case PR_SET_UNALIGN
:
1941 error
= SET_UNALIGN_CTL(me
, arg2
);
1943 case PR_GET_UNALIGN
:
1944 error
= GET_UNALIGN_CTL(me
, arg2
);
1947 error
= SET_FPEMU_CTL(me
, arg2
);
1950 error
= GET_FPEMU_CTL(me
, arg2
);
1953 error
= SET_FPEXC_CTL(me
, arg2
);
1956 error
= GET_FPEXC_CTL(me
, arg2
);
1959 error
= PR_TIMING_STATISTICAL
;
1962 if (arg2
!= PR_TIMING_STATISTICAL
)
1969 comm
[sizeof(me
->comm
)-1] = 0;
1970 if (strncpy_from_user(comm
, (char __user
*)arg2
,
1971 sizeof(me
->comm
) - 1) < 0)
1973 set_task_comm(me
, comm
);
1974 proc_comm_connector(me
);
1977 get_task_comm(comm
, me
);
1978 if (copy_to_user((char __user
*)arg2
, comm
,
1983 error
= GET_ENDIAN(me
, arg2
);
1986 error
= SET_ENDIAN(me
, arg2
);
1989 case PR_GET_SECCOMP
:
1990 error
= prctl_get_seccomp();
1992 case PR_SET_SECCOMP
:
1993 error
= prctl_set_seccomp(arg2
, (char __user
*)arg3
);
1996 error
= GET_TSC_CTL(arg2
);
1999 error
= SET_TSC_CTL(arg2
);
2001 case PR_TASK_PERF_EVENTS_DISABLE
:
2002 error
= perf_event_task_disable();
2004 case PR_TASK_PERF_EVENTS_ENABLE
:
2005 error
= perf_event_task_enable();
2007 case PR_GET_TIMERSLACK
:
2008 error
= current
->timer_slack_ns
;
2010 case PR_SET_TIMERSLACK
:
2012 current
->timer_slack_ns
=
2013 current
->default_timer_slack_ns
;
2015 current
->timer_slack_ns
= arg2
;
2022 case PR_MCE_KILL_CLEAR
:
2025 current
->flags
&= ~PF_MCE_PROCESS
;
2027 case PR_MCE_KILL_SET
:
2028 current
->flags
|= PF_MCE_PROCESS
;
2029 if (arg3
== PR_MCE_KILL_EARLY
)
2030 current
->flags
|= PF_MCE_EARLY
;
2031 else if (arg3
== PR_MCE_KILL_LATE
)
2032 current
->flags
&= ~PF_MCE_EARLY
;
2033 else if (arg3
== PR_MCE_KILL_DEFAULT
)
2035 ~(PF_MCE_EARLY
|PF_MCE_PROCESS
);
2044 case PR_MCE_KILL_GET
:
2045 if (arg2
| arg3
| arg4
| arg5
)
2047 if (current
->flags
& PF_MCE_PROCESS
)
2048 error
= (current
->flags
& PF_MCE_EARLY
) ?
2049 PR_MCE_KILL_EARLY
: PR_MCE_KILL_LATE
;
2051 error
= PR_MCE_KILL_DEFAULT
;
2054 error
= prctl_set_mm(arg2
, arg3
, arg4
, arg5
);
2056 case PR_SET_CHILD_SUBREAPER
:
2057 me
->signal
->is_child_subreaper
= !!arg2
;
2060 case PR_GET_CHILD_SUBREAPER
:
2061 error
= put_user(me
->signal
->is_child_subreaper
,
2062 (int __user
*) arg2
);
2064 case PR_SET_NO_NEW_PRIVS
:
2065 if (arg2
!= 1 || arg3
|| arg4
|| arg5
)
2068 current
->no_new_privs
= 1;
2070 case PR_GET_NO_NEW_PRIVS
:
2071 if (arg2
|| arg3
|| arg4
|| arg5
)
2073 return current
->no_new_privs
? 1 : 0;
2081 SYSCALL_DEFINE3(getcpu
, unsigned __user
*, cpup
, unsigned __user
*, nodep
,
2082 struct getcpu_cache __user
*, unused
)
2085 int cpu
= raw_smp_processor_id();
2087 err
|= put_user(cpu
, cpup
);
2089 err
|= put_user(cpu_to_node(cpu
), nodep
);
2090 return err
? -EFAULT
: 0;
2093 char poweroff_cmd
[POWEROFF_CMD_PATH_LEN
] = "/sbin/poweroff";
2095 static void argv_cleanup(struct subprocess_info
*info
)
2097 argv_free(info
->argv
);
2101 * orderly_poweroff - Trigger an orderly system poweroff
2102 * @force: force poweroff if command execution fails
2104 * This may be called from any context to trigger a system shutdown.
2105 * If the orderly shutdown fails, it will force an immediate shutdown.
2107 int orderly_poweroff(bool force
)
2110 char **argv
= argv_split(GFP_ATOMIC
, poweroff_cmd
, &argc
);
2111 static char *envp
[] = {
2113 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2117 struct subprocess_info
*info
;
2120 printk(KERN_WARNING
"%s failed to allocate memory for \"%s\"\n",
2121 __func__
, poweroff_cmd
);
2125 info
= call_usermodehelper_setup(argv
[0], argv
, envp
, GFP_ATOMIC
);
2131 call_usermodehelper_setfns(info
, NULL
, argv_cleanup
, NULL
);
2133 ret
= call_usermodehelper_exec(info
, UMH_NO_WAIT
);
2137 printk(KERN_WARNING
"Failed to start orderly shutdown: "
2138 "forcing the issue\n");
2140 /* I guess this should try to kick off some daemon to
2141 sync and poweroff asap. Or not even bother syncing
2142 if we're doing an emergency shutdown? */
2149 EXPORT_SYMBOL_GPL(orderly_poweroff
);