4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/capability.h>
23 #include <linux/device.h>
24 #include <linux/key.h>
25 #include <linux/times.h>
26 #include <linux/posix-timers.h>
27 #include <linux/security.h>
28 #include <linux/dcookies.h>
29 #include <linux/suspend.h>
30 #include <linux/tty.h>
31 #include <linux/signal.h>
32 #include <linux/cn_proc.h>
34 #include <linux/compat.h>
35 #include <linux/syscalls.h>
36 #include <linux/kprobes.h>
38 #include <asm/uaccess.h>
40 #include <asm/unistd.h>
42 #ifndef SET_UNALIGN_CTL
43 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
45 #ifndef GET_UNALIGN_CTL
46 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
49 # define SET_FPEMU_CTL(a,b) (-EINVAL)
52 # define GET_FPEMU_CTL(a,b) (-EINVAL)
55 # define SET_FPEXC_CTL(a,b) (-EINVAL)
58 # define GET_FPEXC_CTL(a,b) (-EINVAL)
62 * this is where the system-wide overflow UID and GID are defined, for
63 * architectures that now have 32-bit UID/GID but didn't in the past
66 int overflowuid
= DEFAULT_OVERFLOWUID
;
67 int overflowgid
= DEFAULT_OVERFLOWGID
;
70 EXPORT_SYMBOL(overflowuid
);
71 EXPORT_SYMBOL(overflowgid
);
75 * the same as above, but for filesystems which can only store a 16-bit
76 * UID and GID. as such, this is needed on all architectures
79 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
80 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
82 EXPORT_SYMBOL(fs_overflowuid
);
83 EXPORT_SYMBOL(fs_overflowgid
);
86 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
93 * Notifier list for kernel code which wants to be called
94 * at shutdown. This is used to stop any idling DMA operations
98 static struct notifier_block
*reboot_notifier_list
;
99 static DEFINE_RWLOCK(notifier_lock
);
102 * notifier_chain_register - Add notifier to a notifier chain
103 * @list: Pointer to root list pointer
104 * @n: New entry in notifier chain
106 * Adds a notifier to a notifier chain.
108 * Currently always returns zero.
111 int notifier_chain_register(struct notifier_block
**list
, struct notifier_block
*n
)
113 write_lock(¬ifier_lock
);
116 if(n
->priority
> (*list
)->priority
)
118 list
= &((*list
)->next
);
122 write_unlock(¬ifier_lock
);
126 EXPORT_SYMBOL(notifier_chain_register
);
129 * notifier_chain_unregister - Remove notifier from a notifier chain
130 * @nl: Pointer to root list pointer
131 * @n: New entry in notifier chain
133 * Removes a notifier from a notifier chain.
135 * Returns zero on success, or %-ENOENT on failure.
138 int notifier_chain_unregister(struct notifier_block
**nl
, struct notifier_block
*n
)
140 write_lock(¬ifier_lock
);
146 write_unlock(¬ifier_lock
);
151 write_unlock(¬ifier_lock
);
155 EXPORT_SYMBOL(notifier_chain_unregister
);
158 * notifier_call_chain - Call functions in a notifier chain
159 * @n: Pointer to root pointer of notifier chain
160 * @val: Value passed unmodified to notifier function
161 * @v: Pointer passed unmodified to notifier function
163 * Calls each function in a notifier chain in turn.
165 * If the return value of the notifier can be and'd
166 * with %NOTIFY_STOP_MASK, then notifier_call_chain
167 * will return immediately, with the return value of
168 * the notifier function which halted execution.
169 * Otherwise, the return value is the return value
170 * of the last notifier function called.
173 int __kprobes
notifier_call_chain(struct notifier_block
**n
, unsigned long val
, void *v
)
176 struct notifier_block
*nb
= *n
;
180 ret
=nb
->notifier_call(nb
,val
,v
);
181 if(ret
&NOTIFY_STOP_MASK
)
190 EXPORT_SYMBOL(notifier_call_chain
);
193 * register_reboot_notifier - Register function to be called at reboot time
194 * @nb: Info about notifier function to be called
196 * Registers a function with the list of functions
197 * to be called at reboot time.
199 * Currently always returns zero, as notifier_chain_register
200 * always returns zero.
203 int register_reboot_notifier(struct notifier_block
* nb
)
205 return notifier_chain_register(&reboot_notifier_list
, nb
);
208 EXPORT_SYMBOL(register_reboot_notifier
);
211 * unregister_reboot_notifier - Unregister previously registered reboot notifier
212 * @nb: Hook to be unregistered
214 * Unregisters a previously registered reboot
217 * Returns zero on success, or %-ENOENT on failure.
220 int unregister_reboot_notifier(struct notifier_block
* nb
)
222 return notifier_chain_unregister(&reboot_notifier_list
, nb
);
225 EXPORT_SYMBOL(unregister_reboot_notifier
);
227 #ifndef CONFIG_SECURITY
230 if (cap_raised(current
->cap_effective
, cap
)) {
231 current
->flags
|= PF_SUPERPRIV
;
236 EXPORT_SYMBOL(capable
);
239 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
243 if (p
->uid
!= current
->euid
&&
244 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
248 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
252 no_nice
= security_task_setnice(p
, niceval
);
259 set_user_nice(p
, niceval
);
264 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
266 struct task_struct
*g
, *p
;
267 struct user_struct
*user
;
270 if (which
> 2 || which
< 0)
273 /* normalize: avoid signed division (rounding problems) */
280 read_lock(&tasklist_lock
);
285 p
= find_task_by_pid(who
);
287 error
= set_one_prio(p
, niceval
, error
);
291 who
= process_group(current
);
292 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
293 error
= set_one_prio(p
, niceval
, error
);
294 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
297 user
= current
->user
;
301 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
302 goto out_unlock
; /* No processes for this user */
306 error
= set_one_prio(p
, niceval
, error
);
307 while_each_thread(g
, p
);
308 if (who
!= current
->uid
)
309 free_uid(user
); /* For find_user() */
313 read_unlock(&tasklist_lock
);
319 * Ugh. To avoid negative return values, "getpriority()" will
320 * not return the normal nice-value, but a negated value that
321 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
322 * to stay compatible.
324 asmlinkage
long sys_getpriority(int which
, int who
)
326 struct task_struct
*g
, *p
;
327 struct user_struct
*user
;
328 long niceval
, retval
= -ESRCH
;
330 if (which
> 2 || which
< 0)
333 read_lock(&tasklist_lock
);
338 p
= find_task_by_pid(who
);
340 niceval
= 20 - task_nice(p
);
341 if (niceval
> retval
)
347 who
= process_group(current
);
348 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
349 niceval
= 20 - task_nice(p
);
350 if (niceval
> retval
)
352 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
355 user
= current
->user
;
359 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
360 goto out_unlock
; /* No processes for this user */
364 niceval
= 20 - task_nice(p
);
365 if (niceval
> retval
)
368 while_each_thread(g
, p
);
369 if (who
!= current
->uid
)
370 free_uid(user
); /* for find_user() */
374 read_unlock(&tasklist_lock
);
380 * emergency_restart - reboot the system
382 * Without shutting down any hardware or taking any locks
383 * reboot the system. This is called when we know we are in
384 * trouble so this is our best effort to reboot. This is
385 * safe to call in interrupt context.
387 void emergency_restart(void)
389 machine_emergency_restart();
391 EXPORT_SYMBOL_GPL(emergency_restart
);
393 void kernel_restart_prepare(char *cmd
)
395 notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
396 system_state
= SYSTEM_RESTART
;
401 * kernel_restart - reboot the system
402 * @cmd: pointer to buffer containing command to execute for restart
405 * Shutdown everything and perform a clean reboot.
406 * This is not safe to call in interrupt context.
408 void kernel_restart(char *cmd
)
410 kernel_restart_prepare(cmd
);
412 printk(KERN_EMERG
"Restarting system.\n");
414 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
417 machine_restart(cmd
);
419 EXPORT_SYMBOL_GPL(kernel_restart
);
422 * kernel_kexec - reboot the system
424 * Move into place and start executing a preloaded standalone
425 * executable. If nothing was preloaded return an error.
427 void kernel_kexec(void)
430 struct kimage
*image
;
431 image
= xchg(&kexec_image
, 0);
435 kernel_restart_prepare(NULL
);
436 printk(KERN_EMERG
"Starting new kernel\n");
438 machine_kexec(image
);
441 EXPORT_SYMBOL_GPL(kernel_kexec
);
444 * kernel_halt - halt the system
446 * Shutdown everything and perform a clean system halt.
448 void kernel_halt_prepare(void)
450 notifier_call_chain(&reboot_notifier_list
, SYS_HALT
, NULL
);
451 system_state
= SYSTEM_HALT
;
454 void kernel_halt(void)
456 kernel_halt_prepare();
457 printk(KERN_EMERG
"System halted.\n");
460 EXPORT_SYMBOL_GPL(kernel_halt
);
463 * kernel_power_off - power_off the system
465 * Shutdown everything and perform a clean system power_off.
467 void kernel_power_off_prepare(void)
469 notifier_call_chain(&reboot_notifier_list
, SYS_POWER_OFF
, NULL
);
470 system_state
= SYSTEM_POWER_OFF
;
473 void kernel_power_off(void)
475 kernel_power_off_prepare();
476 printk(KERN_EMERG
"Power down.\n");
479 EXPORT_SYMBOL_GPL(kernel_power_off
);
482 * Reboot system call: for obvious reasons only root may call it,
483 * and even root needs to set up some magic numbers in the registers
484 * so that some mistake won't make this reboot the whole machine.
485 * You can also set the meaning of the ctrl-alt-del-key here.
487 * reboot doesn't sync: do that yourself before calling this.
489 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
493 /* We only trust the superuser with rebooting the system. */
494 if (!capable(CAP_SYS_BOOT
))
497 /* For safety, we require "magic" arguments. */
498 if (magic1
!= LINUX_REBOOT_MAGIC1
||
499 (magic2
!= LINUX_REBOOT_MAGIC2
&&
500 magic2
!= LINUX_REBOOT_MAGIC2A
&&
501 magic2
!= LINUX_REBOOT_MAGIC2B
&&
502 magic2
!= LINUX_REBOOT_MAGIC2C
))
505 /* Instead of trying to make the power_off code look like
506 * halt when pm_power_off is not set do it the easy way.
508 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
509 cmd
= LINUX_REBOOT_CMD_HALT
;
513 case LINUX_REBOOT_CMD_RESTART
:
514 kernel_restart(NULL
);
517 case LINUX_REBOOT_CMD_CAD_ON
:
521 case LINUX_REBOOT_CMD_CAD_OFF
:
525 case LINUX_REBOOT_CMD_HALT
:
531 case LINUX_REBOOT_CMD_POWER_OFF
:
537 case LINUX_REBOOT_CMD_RESTART2
:
538 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
542 buffer
[sizeof(buffer
) - 1] = '\0';
544 kernel_restart(buffer
);
547 case LINUX_REBOOT_CMD_KEXEC
:
552 #ifdef CONFIG_SOFTWARE_SUSPEND
553 case LINUX_REBOOT_CMD_SW_SUSPEND
:
555 int ret
= software_suspend();
569 static void deferred_cad(void *dummy
)
571 kernel_restart(NULL
);
575 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
576 * As it's called within an interrupt, it may NOT sync: the only choice
577 * is whether to reboot at once, or just ignore the ctrl-alt-del.
579 void ctrl_alt_del(void)
581 static DECLARE_WORK(cad_work
, deferred_cad
, NULL
);
584 schedule_work(&cad_work
);
586 kill_proc(cad_pid
, SIGINT
, 1);
591 * Unprivileged users may change the real gid to the effective gid
592 * or vice versa. (BSD-style)
594 * If you set the real gid at all, or set the effective gid to a value not
595 * equal to the real gid, then the saved gid is set to the new effective gid.
597 * This makes it possible for a setgid program to completely drop its
598 * privileges, which is often a useful assertion to make when you are doing
599 * a security audit over a program.
601 * The general idea is that a program which uses just setregid() will be
602 * 100% compatible with BSD. A program which uses just setgid() will be
603 * 100% compatible with POSIX with saved IDs.
605 * SMP: There are not races, the GIDs are checked only by filesystem
606 * operations (as far as semantic preservation is concerned).
608 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
610 int old_rgid
= current
->gid
;
611 int old_egid
= current
->egid
;
612 int new_rgid
= old_rgid
;
613 int new_egid
= old_egid
;
616 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
620 if (rgid
!= (gid_t
) -1) {
621 if ((old_rgid
== rgid
) ||
622 (current
->egid
==rgid
) ||
628 if (egid
!= (gid_t
) -1) {
629 if ((old_rgid
== egid
) ||
630 (current
->egid
== egid
) ||
631 (current
->sgid
== egid
) ||
638 if (new_egid
!= old_egid
)
640 current
->mm
->dumpable
= suid_dumpable
;
643 if (rgid
!= (gid_t
) -1 ||
644 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
645 current
->sgid
= new_egid
;
646 current
->fsgid
= new_egid
;
647 current
->egid
= new_egid
;
648 current
->gid
= new_rgid
;
649 key_fsgid_changed(current
);
650 proc_id_connector(current
, PROC_EVENT_GID
);
655 * setgid() is implemented like SysV w/ SAVED_IDS
657 * SMP: Same implicit races as above.
659 asmlinkage
long sys_setgid(gid_t gid
)
661 int old_egid
= current
->egid
;
664 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
668 if (capable(CAP_SETGID
))
672 current
->mm
->dumpable
= suid_dumpable
;
675 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
677 else if ((gid
== current
->gid
) || (gid
== current
->sgid
))
681 current
->mm
->dumpable
= suid_dumpable
;
684 current
->egid
= current
->fsgid
= gid
;
689 key_fsgid_changed(current
);
690 proc_id_connector(current
, PROC_EVENT_GID
);
694 static int set_user(uid_t new_ruid
, int dumpclear
)
696 struct user_struct
*new_user
;
698 new_user
= alloc_uid(new_ruid
);
702 if (atomic_read(&new_user
->processes
) >=
703 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
704 new_user
!= &root_user
) {
709 switch_uid(new_user
);
713 current
->mm
->dumpable
= suid_dumpable
;
716 current
->uid
= new_ruid
;
721 * Unprivileged users may change the real uid to the effective uid
722 * or vice versa. (BSD-style)
724 * If you set the real uid at all, or set the effective uid to a value not
725 * equal to the real uid, then the saved uid is set to the new effective uid.
727 * This makes it possible for a setuid program to completely drop its
728 * privileges, which is often a useful assertion to make when you are doing
729 * a security audit over a program.
731 * The general idea is that a program which uses just setreuid() will be
732 * 100% compatible with BSD. A program which uses just setuid() will be
733 * 100% compatible with POSIX with saved IDs.
735 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
737 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
740 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
744 new_ruid
= old_ruid
= current
->uid
;
745 new_euid
= old_euid
= current
->euid
;
746 old_suid
= current
->suid
;
748 if (ruid
!= (uid_t
) -1) {
750 if ((old_ruid
!= ruid
) &&
751 (current
->euid
!= ruid
) &&
752 !capable(CAP_SETUID
))
756 if (euid
!= (uid_t
) -1) {
758 if ((old_ruid
!= euid
) &&
759 (current
->euid
!= euid
) &&
760 (current
->suid
!= euid
) &&
761 !capable(CAP_SETUID
))
765 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
768 if (new_euid
!= old_euid
)
770 current
->mm
->dumpable
= suid_dumpable
;
773 current
->fsuid
= current
->euid
= new_euid
;
774 if (ruid
!= (uid_t
) -1 ||
775 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
776 current
->suid
= current
->euid
;
777 current
->fsuid
= current
->euid
;
779 key_fsuid_changed(current
);
780 proc_id_connector(current
, PROC_EVENT_UID
);
782 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
788 * setuid() is implemented like SysV with SAVED_IDS
790 * Note that SAVED_ID's is deficient in that a setuid root program
791 * like sendmail, for example, cannot set its uid to be a normal
792 * user and then switch back, because if you're root, setuid() sets
793 * the saved uid too. If you don't like this, blame the bright people
794 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
795 * will allow a root program to temporarily drop privileges and be able to
796 * regain them by swapping the real and effective uid.
798 asmlinkage
long sys_setuid(uid_t uid
)
800 int old_euid
= current
->euid
;
801 int old_ruid
, old_suid
, new_ruid
, new_suid
;
804 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
808 old_ruid
= new_ruid
= current
->uid
;
809 old_suid
= current
->suid
;
812 if (capable(CAP_SETUID
)) {
813 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
816 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
821 current
->mm
->dumpable
= suid_dumpable
;
824 current
->fsuid
= current
->euid
= uid
;
825 current
->suid
= new_suid
;
827 key_fsuid_changed(current
);
828 proc_id_connector(current
, PROC_EVENT_UID
);
830 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
835 * This function implements a generic ability to update ruid, euid,
836 * and suid. This allows you to implement the 4.4 compatible seteuid().
838 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
840 int old_ruid
= current
->uid
;
841 int old_euid
= current
->euid
;
842 int old_suid
= current
->suid
;
845 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
849 if (!capable(CAP_SETUID
)) {
850 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
851 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
853 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
854 (euid
!= current
->euid
) && (euid
!= current
->suid
))
856 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
857 (suid
!= current
->euid
) && (suid
!= current
->suid
))
860 if (ruid
!= (uid_t
) -1) {
861 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
864 if (euid
!= (uid_t
) -1) {
865 if (euid
!= current
->euid
)
867 current
->mm
->dumpable
= suid_dumpable
;
870 current
->euid
= euid
;
872 current
->fsuid
= current
->euid
;
873 if (suid
!= (uid_t
) -1)
874 current
->suid
= suid
;
876 key_fsuid_changed(current
);
877 proc_id_connector(current
, PROC_EVENT_UID
);
879 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
882 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
886 if (!(retval
= put_user(current
->uid
, ruid
)) &&
887 !(retval
= put_user(current
->euid
, euid
)))
888 retval
= put_user(current
->suid
, suid
);
894 * Same as above, but for rgid, egid, sgid.
896 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
900 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
904 if (!capable(CAP_SETGID
)) {
905 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
906 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
908 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
909 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
911 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
912 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
915 if (egid
!= (gid_t
) -1) {
916 if (egid
!= current
->egid
)
918 current
->mm
->dumpable
= suid_dumpable
;
921 current
->egid
= egid
;
923 current
->fsgid
= current
->egid
;
924 if (rgid
!= (gid_t
) -1)
926 if (sgid
!= (gid_t
) -1)
927 current
->sgid
= sgid
;
929 key_fsgid_changed(current
);
930 proc_id_connector(current
, PROC_EVENT_GID
);
934 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
938 if (!(retval
= put_user(current
->gid
, rgid
)) &&
939 !(retval
= put_user(current
->egid
, egid
)))
940 retval
= put_user(current
->sgid
, sgid
);
947 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
948 * is used for "access()" and for the NFS daemon (letting nfsd stay at
949 * whatever uid it wants to). It normally shadows "euid", except when
950 * explicitly set by setfsuid() or for access..
952 asmlinkage
long sys_setfsuid(uid_t uid
)
956 old_fsuid
= current
->fsuid
;
957 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
960 if (uid
== current
->uid
|| uid
== current
->euid
||
961 uid
== current
->suid
|| uid
== current
->fsuid
||
964 if (uid
!= old_fsuid
)
966 current
->mm
->dumpable
= suid_dumpable
;
969 current
->fsuid
= uid
;
972 key_fsuid_changed(current
);
973 proc_id_connector(current
, PROC_EVENT_UID
);
975 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
981 * Samma på svenska..
983 asmlinkage
long sys_setfsgid(gid_t gid
)
987 old_fsgid
= current
->fsgid
;
988 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
991 if (gid
== current
->gid
|| gid
== current
->egid
||
992 gid
== current
->sgid
|| gid
== current
->fsgid
||
995 if (gid
!= old_fsgid
)
997 current
->mm
->dumpable
= suid_dumpable
;
1000 current
->fsgid
= gid
;
1001 key_fsgid_changed(current
);
1002 proc_id_connector(current
, PROC_EVENT_GID
);
1007 asmlinkage
long sys_times(struct tms __user
* tbuf
)
1010 * In the SMP world we might just be unlucky and have one of
1011 * the times increment as we use it. Since the value is an
1012 * atomically safe type this is just fine. Conceptually its
1013 * as if the syscall took an instant longer to occur.
1017 cputime_t utime
, stime
, cutime
, cstime
;
1020 if (thread_group_empty(current
)) {
1022 * Single thread case without the use of any locks.
1024 * We may race with release_task if two threads are
1025 * executing. However, release task first adds up the
1026 * counters (__exit_signal) before removing the task
1027 * from the process tasklist (__unhash_process).
1028 * __exit_signal also acquires and releases the
1029 * siglock which results in the proper memory ordering
1030 * so that the list modifications are always visible
1031 * after the counters have been updated.
1033 * If the counters have been updated by the second thread
1034 * but the thread has not yet been removed from the list
1035 * then the other branch will be executing which will
1036 * block on tasklist_lock until the exit handling of the
1037 * other task is finished.
1039 * This also implies that the sighand->siglock cannot
1040 * be held by another processor. So we can also
1041 * skip acquiring that lock.
1043 utime
= cputime_add(current
->signal
->utime
, current
->utime
);
1044 stime
= cputime_add(current
->signal
->utime
, current
->stime
);
1045 cutime
= current
->signal
->cutime
;
1046 cstime
= current
->signal
->cstime
;
1051 /* Process with multiple threads */
1052 struct task_struct
*tsk
= current
;
1053 struct task_struct
*t
;
1055 read_lock(&tasklist_lock
);
1056 utime
= tsk
->signal
->utime
;
1057 stime
= tsk
->signal
->stime
;
1060 utime
= cputime_add(utime
, t
->utime
);
1061 stime
= cputime_add(stime
, t
->stime
);
1066 * While we have tasklist_lock read-locked, no dying thread
1067 * can be updating current->signal->[us]time. Instead,
1068 * we got their counts included in the live thread loop.
1069 * However, another thread can come in right now and
1070 * do a wait call that updates current->signal->c[us]time.
1071 * To make sure we always see that pair updated atomically,
1072 * we take the siglock around fetching them.
1074 spin_lock_irq(&tsk
->sighand
->siglock
);
1075 cutime
= tsk
->signal
->cutime
;
1076 cstime
= tsk
->signal
->cstime
;
1077 spin_unlock_irq(&tsk
->sighand
->siglock
);
1078 read_unlock(&tasklist_lock
);
1080 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1081 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1082 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1083 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1084 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1087 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1091 * This needs some heavy checking ...
1092 * I just haven't the stomach for it. I also don't fully
1093 * understand sessions/pgrp etc. Let somebody who does explain it.
1095 * OK, I think I have the protection semantics right.... this is really
1096 * only important on a multi-user system anyway, to make sure one user
1097 * can't send a signal to a process owned by another. -TYT, 12/12/91
1099 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1103 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1105 struct task_struct
*p
;
1106 struct task_struct
*group_leader
= current
->group_leader
;
1110 pid
= group_leader
->pid
;
1116 /* From this point forward we keep holding onto the tasklist lock
1117 * so that our parent does not change from under us. -DaveM
1119 write_lock_irq(&tasklist_lock
);
1122 p
= find_task_by_pid(pid
);
1127 if (!thread_group_leader(p
))
1130 if (p
->real_parent
== group_leader
) {
1132 if (p
->signal
->session
!= group_leader
->signal
->session
)
1139 if (p
!= group_leader
)
1144 if (p
->signal
->leader
)
1148 struct task_struct
*p
;
1150 do_each_task_pid(pgid
, PIDTYPE_PGID
, p
) {
1151 if (p
->signal
->session
== group_leader
->signal
->session
)
1153 } while_each_task_pid(pgid
, PIDTYPE_PGID
, p
);
1158 err
= security_task_setpgid(p
, pgid
);
1162 if (process_group(p
) != pgid
) {
1163 detach_pid(p
, PIDTYPE_PGID
);
1164 p
->signal
->pgrp
= pgid
;
1165 attach_pid(p
, PIDTYPE_PGID
, pgid
);
1170 /* All paths lead to here, thus we are safe. -DaveM */
1171 write_unlock_irq(&tasklist_lock
);
1175 asmlinkage
long sys_getpgid(pid_t pid
)
1178 return process_group(current
);
1181 struct task_struct
*p
;
1183 read_lock(&tasklist_lock
);
1184 p
= find_task_by_pid(pid
);
1188 retval
= security_task_getpgid(p
);
1190 retval
= process_group(p
);
1192 read_unlock(&tasklist_lock
);
1197 #ifdef __ARCH_WANT_SYS_GETPGRP
1199 asmlinkage
long sys_getpgrp(void)
1201 /* SMP - assuming writes are word atomic this is fine */
1202 return process_group(current
);
1207 asmlinkage
long sys_getsid(pid_t pid
)
1210 return current
->signal
->session
;
1213 struct task_struct
*p
;
1215 read_lock(&tasklist_lock
);
1216 p
= find_task_by_pid(pid
);
1220 retval
= security_task_getsid(p
);
1222 retval
= p
->signal
->session
;
1224 read_unlock(&tasklist_lock
);
1229 asmlinkage
long sys_setsid(void)
1231 struct task_struct
*group_leader
= current
->group_leader
;
1236 write_lock_irq(&tasklist_lock
);
1238 pid
= find_pid(PIDTYPE_PGID
, group_leader
->pid
);
1242 group_leader
->signal
->leader
= 1;
1243 __set_special_pids(group_leader
->pid
, group_leader
->pid
);
1244 group_leader
->signal
->tty
= NULL
;
1245 group_leader
->signal
->tty_old_pgrp
= 0;
1246 err
= process_group(group_leader
);
1248 write_unlock_irq(&tasklist_lock
);
1254 * Supplementary group IDs
1257 /* init to 2 - one for init_task, one to ensure it is never freed */
1258 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1260 struct group_info
*groups_alloc(int gidsetsize
)
1262 struct group_info
*group_info
;
1266 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1267 /* Make sure we always allocate at least one indirect block pointer */
1268 nblocks
= nblocks
? : 1;
1269 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1272 group_info
->ngroups
= gidsetsize
;
1273 group_info
->nblocks
= nblocks
;
1274 atomic_set(&group_info
->usage
, 1);
1276 if (gidsetsize
<= NGROUPS_SMALL
) {
1277 group_info
->blocks
[0] = group_info
->small_block
;
1279 for (i
= 0; i
< nblocks
; i
++) {
1281 b
= (void *)__get_free_page(GFP_USER
);
1283 goto out_undo_partial_alloc
;
1284 group_info
->blocks
[i
] = b
;
1289 out_undo_partial_alloc
:
1291 free_page((unsigned long)group_info
->blocks
[i
]);
1297 EXPORT_SYMBOL(groups_alloc
);
1299 void groups_free(struct group_info
*group_info
)
1301 if (group_info
->blocks
[0] != group_info
->small_block
) {
1303 for (i
= 0; i
< group_info
->nblocks
; i
++)
1304 free_page((unsigned long)group_info
->blocks
[i
]);
1309 EXPORT_SYMBOL(groups_free
);
1311 /* export the group_info to a user-space array */
1312 static int groups_to_user(gid_t __user
*grouplist
,
1313 struct group_info
*group_info
)
1316 int count
= group_info
->ngroups
;
1318 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1319 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1320 int off
= i
* NGROUPS_PER_BLOCK
;
1321 int len
= cp_count
* sizeof(*grouplist
);
1323 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1331 /* fill a group_info from a user-space array - it must be allocated already */
1332 static int groups_from_user(struct group_info
*group_info
,
1333 gid_t __user
*grouplist
)
1336 int count
= group_info
->ngroups
;
1338 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1339 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1340 int off
= i
* NGROUPS_PER_BLOCK
;
1341 int len
= cp_count
* sizeof(*grouplist
);
1343 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1351 /* a simple Shell sort */
1352 static void groups_sort(struct group_info
*group_info
)
1354 int base
, max
, stride
;
1355 int gidsetsize
= group_info
->ngroups
;
1357 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1362 max
= gidsetsize
- stride
;
1363 for (base
= 0; base
< max
; base
++) {
1365 int right
= left
+ stride
;
1366 gid_t tmp
= GROUP_AT(group_info
, right
);
1368 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1369 GROUP_AT(group_info
, right
) =
1370 GROUP_AT(group_info
, left
);
1374 GROUP_AT(group_info
, right
) = tmp
;
1380 /* a simple bsearch */
1381 int groups_search(struct group_info
*group_info
, gid_t grp
)
1389 right
= group_info
->ngroups
;
1390 while (left
< right
) {
1391 int mid
= (left
+right
)/2;
1392 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1403 /* validate and set current->group_info */
1404 int set_current_groups(struct group_info
*group_info
)
1407 struct group_info
*old_info
;
1409 retval
= security_task_setgroups(group_info
);
1413 groups_sort(group_info
);
1414 get_group_info(group_info
);
1417 old_info
= current
->group_info
;
1418 current
->group_info
= group_info
;
1419 task_unlock(current
);
1421 put_group_info(old_info
);
1426 EXPORT_SYMBOL(set_current_groups
);
1428 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1433 * SMP: Nobody else can change our grouplist. Thus we are
1440 /* no need to grab task_lock here; it cannot change */
1441 get_group_info(current
->group_info
);
1442 i
= current
->group_info
->ngroups
;
1444 if (i
> gidsetsize
) {
1448 if (groups_to_user(grouplist
, current
->group_info
)) {
1454 put_group_info(current
->group_info
);
1459 * SMP: Our groups are copy-on-write. We can set them safely
1460 * without another task interfering.
1463 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1465 struct group_info
*group_info
;
1468 if (!capable(CAP_SETGID
))
1470 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1473 group_info
= groups_alloc(gidsetsize
);
1476 retval
= groups_from_user(group_info
, grouplist
);
1478 put_group_info(group_info
);
1482 retval
= set_current_groups(group_info
);
1483 put_group_info(group_info
);
1489 * Check whether we're fsgid/egid or in the supplemental group..
1491 int in_group_p(gid_t grp
)
1494 if (grp
!= current
->fsgid
) {
1495 get_group_info(current
->group_info
);
1496 retval
= groups_search(current
->group_info
, grp
);
1497 put_group_info(current
->group_info
);
1502 EXPORT_SYMBOL(in_group_p
);
1504 int in_egroup_p(gid_t grp
)
1507 if (grp
!= current
->egid
) {
1508 get_group_info(current
->group_info
);
1509 retval
= groups_search(current
->group_info
, grp
);
1510 put_group_info(current
->group_info
);
1515 EXPORT_SYMBOL(in_egroup_p
);
1517 DECLARE_RWSEM(uts_sem
);
1519 EXPORT_SYMBOL(uts_sem
);
1521 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1525 down_read(&uts_sem
);
1526 if (copy_to_user(name
,&system_utsname
,sizeof *name
))
1532 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1535 char tmp
[__NEW_UTS_LEN
];
1537 if (!capable(CAP_SYS_ADMIN
))
1539 if (len
< 0 || len
> __NEW_UTS_LEN
)
1541 down_write(&uts_sem
);
1543 if (!copy_from_user(tmp
, name
, len
)) {
1544 memcpy(system_utsname
.nodename
, tmp
, len
);
1545 system_utsname
.nodename
[len
] = 0;
1552 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1554 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1560 down_read(&uts_sem
);
1561 i
= 1 + strlen(system_utsname
.nodename
);
1565 if (copy_to_user(name
, system_utsname
.nodename
, i
))
1574 * Only setdomainname; getdomainname can be implemented by calling
1577 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1580 char tmp
[__NEW_UTS_LEN
];
1582 if (!capable(CAP_SYS_ADMIN
))
1584 if (len
< 0 || len
> __NEW_UTS_LEN
)
1587 down_write(&uts_sem
);
1589 if (!copy_from_user(tmp
, name
, len
)) {
1590 memcpy(system_utsname
.domainname
, tmp
, len
);
1591 system_utsname
.domainname
[len
] = 0;
1598 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1600 if (resource
>= RLIM_NLIMITS
)
1603 struct rlimit value
;
1604 task_lock(current
->group_leader
);
1605 value
= current
->signal
->rlim
[resource
];
1606 task_unlock(current
->group_leader
);
1607 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1611 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1614 * Back compatibility for getrlimit. Needed for some apps.
1617 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1620 if (resource
>= RLIM_NLIMITS
)
1623 task_lock(current
->group_leader
);
1624 x
= current
->signal
->rlim
[resource
];
1625 task_unlock(current
->group_leader
);
1626 if(x
.rlim_cur
> 0x7FFFFFFF)
1627 x
.rlim_cur
= 0x7FFFFFFF;
1628 if(x
.rlim_max
> 0x7FFFFFFF)
1629 x
.rlim_max
= 0x7FFFFFFF;
1630 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1635 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1637 struct rlimit new_rlim
, *old_rlim
;
1640 if (resource
>= RLIM_NLIMITS
)
1642 if(copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1644 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1646 old_rlim
= current
->signal
->rlim
+ resource
;
1647 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1648 !capable(CAP_SYS_RESOURCE
))
1650 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1653 retval
= security_task_setrlimit(resource
, &new_rlim
);
1657 task_lock(current
->group_leader
);
1658 *old_rlim
= new_rlim
;
1659 task_unlock(current
->group_leader
);
1661 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
!= RLIM_INFINITY
&&
1662 (cputime_eq(current
->signal
->it_prof_expires
, cputime_zero
) ||
1663 new_rlim
.rlim_cur
<= cputime_to_secs(
1664 current
->signal
->it_prof_expires
))) {
1665 cputime_t cputime
= secs_to_cputime(new_rlim
.rlim_cur
);
1666 read_lock(&tasklist_lock
);
1667 spin_lock_irq(¤t
->sighand
->siglock
);
1668 set_process_cpu_timer(current
, CPUCLOCK_PROF
,
1670 spin_unlock_irq(¤t
->sighand
->siglock
);
1671 read_unlock(&tasklist_lock
);
1678 * It would make sense to put struct rusage in the task_struct,
1679 * except that would make the task_struct be *really big*. After
1680 * task_struct gets moved into malloc'ed memory, it would
1681 * make sense to do this. It will make moving the rest of the information
1682 * a lot simpler! (Which we're not doing right now because we're not
1683 * measuring them yet).
1685 * This expects to be called with tasklist_lock read-locked or better,
1686 * and the siglock not locked. It may momentarily take the siglock.
1688 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1689 * races with threads incrementing their own counters. But since word
1690 * reads are atomic, we either get new values or old values and we don't
1691 * care which for the sums. We always take the siglock to protect reading
1692 * the c* fields from p->signal from races with exit.c updating those
1693 * fields when reaping, so a sample either gets all the additions of a
1694 * given child after it's reaped, or none so this sample is before reaping.
1697 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1699 struct task_struct
*t
;
1700 unsigned long flags
;
1701 cputime_t utime
, stime
;
1703 memset((char *) r
, 0, sizeof *r
);
1705 if (unlikely(!p
->signal
))
1708 utime
= stime
= cputime_zero
;
1712 case RUSAGE_CHILDREN
:
1713 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1714 utime
= p
->signal
->cutime
;
1715 stime
= p
->signal
->cstime
;
1716 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1717 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1718 r
->ru_minflt
= p
->signal
->cmin_flt
;
1719 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1720 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1722 if (who
== RUSAGE_CHILDREN
)
1726 utime
= cputime_add(utime
, p
->signal
->utime
);
1727 stime
= cputime_add(stime
, p
->signal
->stime
);
1728 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1729 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1730 r
->ru_minflt
+= p
->signal
->min_flt
;
1731 r
->ru_majflt
+= p
->signal
->maj_flt
;
1734 utime
= cputime_add(utime
, t
->utime
);
1735 stime
= cputime_add(stime
, t
->stime
);
1736 r
->ru_nvcsw
+= t
->nvcsw
;
1737 r
->ru_nivcsw
+= t
->nivcsw
;
1738 r
->ru_minflt
+= t
->min_flt
;
1739 r
->ru_majflt
+= t
->maj_flt
;
1748 cputime_to_timeval(utime
, &r
->ru_utime
);
1749 cputime_to_timeval(stime
, &r
->ru_stime
);
1752 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
1755 read_lock(&tasklist_lock
);
1756 k_getrusage(p
, who
, &r
);
1757 read_unlock(&tasklist_lock
);
1758 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1761 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
1763 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
1765 return getrusage(current
, who
, ru
);
1768 asmlinkage
long sys_umask(int mask
)
1770 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1774 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
1775 unsigned long arg4
, unsigned long arg5
)
1779 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
1784 case PR_SET_PDEATHSIG
:
1785 if (!valid_signal(arg2
)) {
1789 current
->pdeath_signal
= arg2
;
1791 case PR_GET_PDEATHSIG
:
1792 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
1794 case PR_GET_DUMPABLE
:
1795 error
= current
->mm
->dumpable
;
1797 case PR_SET_DUMPABLE
:
1798 if (arg2
< 0 || arg2
> 2) {
1802 current
->mm
->dumpable
= arg2
;
1805 case PR_SET_UNALIGN
:
1806 error
= SET_UNALIGN_CTL(current
, arg2
);
1808 case PR_GET_UNALIGN
:
1809 error
= GET_UNALIGN_CTL(current
, arg2
);
1812 error
= SET_FPEMU_CTL(current
, arg2
);
1815 error
= GET_FPEMU_CTL(current
, arg2
);
1818 error
= SET_FPEXC_CTL(current
, arg2
);
1821 error
= GET_FPEXC_CTL(current
, arg2
);
1824 error
= PR_TIMING_STATISTICAL
;
1827 if (arg2
== PR_TIMING_STATISTICAL
)
1833 case PR_GET_KEEPCAPS
:
1834 if (current
->keep_capabilities
)
1837 case PR_SET_KEEPCAPS
:
1838 if (arg2
!= 0 && arg2
!= 1) {
1842 current
->keep_capabilities
= arg2
;
1845 struct task_struct
*me
= current
;
1846 unsigned char ncomm
[sizeof(me
->comm
)];
1848 ncomm
[sizeof(me
->comm
)-1] = 0;
1849 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
1850 sizeof(me
->comm
)-1) < 0)
1852 set_task_comm(me
, ncomm
);
1856 struct task_struct
*me
= current
;
1857 unsigned char tcomm
[sizeof(me
->comm
)];
1859 get_task_comm(tcomm
, me
);
1860 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))