4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
35 #include <linux/kprobes.h>
37 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
41 #ifndef SET_UNALIGN_CTL
42 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
44 #ifndef GET_UNALIGN_CTL
45 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
48 # define SET_FPEMU_CTL(a,b) (-EINVAL)
51 # define GET_FPEMU_CTL(a,b) (-EINVAL)
54 # define SET_FPEXC_CTL(a,b) (-EINVAL)
57 # define GET_FPEXC_CTL(a,b) (-EINVAL)
61 * this is where the system-wide overflow UID and GID are defined, for
62 * architectures that now have 32-bit UID/GID but didn't in the past
65 int overflowuid
= DEFAULT_OVERFLOWUID
;
66 int overflowgid
= DEFAULT_OVERFLOWGID
;
69 EXPORT_SYMBOL(overflowuid
);
70 EXPORT_SYMBOL(overflowgid
);
74 * the same as above, but for filesystems which can only store a 16-bit
75 * UID and GID. as such, this is needed on all architectures
78 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
79 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
81 EXPORT_SYMBOL(fs_overflowuid
);
82 EXPORT_SYMBOL(fs_overflowgid
);
85 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
92 * Notifier list for kernel code which wants to be called
93 * at shutdown. This is used to stop any idling DMA operations
97 static struct notifier_block
*reboot_notifier_list
;
98 static DEFINE_RWLOCK(notifier_lock
);
101 * notifier_chain_register - Add notifier to a notifier chain
102 * @list: Pointer to root list pointer
103 * @n: New entry in notifier chain
105 * Adds a notifier to a notifier chain.
107 * Currently always returns zero.
110 int notifier_chain_register(struct notifier_block
**list
, struct notifier_block
*n
)
112 write_lock(¬ifier_lock
);
115 if(n
->priority
> (*list
)->priority
)
117 list
= &((*list
)->next
);
121 write_unlock(¬ifier_lock
);
125 EXPORT_SYMBOL(notifier_chain_register
);
128 * notifier_chain_unregister - Remove notifier from a notifier chain
129 * @nl: Pointer to root list pointer
130 * @n: New entry in notifier chain
132 * Removes a notifier from a notifier chain.
134 * Returns zero on success, or %-ENOENT on failure.
137 int notifier_chain_unregister(struct notifier_block
**nl
, struct notifier_block
*n
)
139 write_lock(¬ifier_lock
);
145 write_unlock(¬ifier_lock
);
150 write_unlock(¬ifier_lock
);
154 EXPORT_SYMBOL(notifier_chain_unregister
);
157 * notifier_call_chain - Call functions in a notifier chain
158 * @n: Pointer to root pointer of notifier chain
159 * @val: Value passed unmodified to notifier function
160 * @v: Pointer passed unmodified to notifier function
162 * Calls each function in a notifier chain in turn.
164 * If the return value of the notifier can be and'd
165 * with %NOTIFY_STOP_MASK, then notifier_call_chain
166 * will return immediately, with the return value of
167 * the notifier function which halted execution.
168 * Otherwise, the return value is the return value
169 * of the last notifier function called.
172 int __kprobes
notifier_call_chain(struct notifier_block
**n
, unsigned long val
, void *v
)
175 struct notifier_block
*nb
= *n
;
179 ret
=nb
->notifier_call(nb
,val
,v
);
180 if(ret
&NOTIFY_STOP_MASK
)
189 EXPORT_SYMBOL(notifier_call_chain
);
192 * register_reboot_notifier - Register function to be called at reboot time
193 * @nb: Info about notifier function to be called
195 * Registers a function with the list of functions
196 * to be called at reboot time.
198 * Currently always returns zero, as notifier_chain_register
199 * always returns zero.
202 int register_reboot_notifier(struct notifier_block
* nb
)
204 return notifier_chain_register(&reboot_notifier_list
, nb
);
207 EXPORT_SYMBOL(register_reboot_notifier
);
210 * unregister_reboot_notifier - Unregister previously registered reboot notifier
211 * @nb: Hook to be unregistered
213 * Unregisters a previously registered reboot
216 * Returns zero on success, or %-ENOENT on failure.
219 int unregister_reboot_notifier(struct notifier_block
* nb
)
221 return notifier_chain_unregister(&reboot_notifier_list
, nb
);
224 EXPORT_SYMBOL(unregister_reboot_notifier
);
226 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
230 if (p
->uid
!= current
->euid
&&
231 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
235 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
239 no_nice
= security_task_setnice(p
, niceval
);
246 set_user_nice(p
, niceval
);
251 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
253 struct task_struct
*g
, *p
;
254 struct user_struct
*user
;
257 if (which
> 2 || which
< 0)
260 /* normalize: avoid signed division (rounding problems) */
267 read_lock(&tasklist_lock
);
272 p
= find_task_by_pid(who
);
274 error
= set_one_prio(p
, niceval
, error
);
278 who
= process_group(current
);
279 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
280 error
= set_one_prio(p
, niceval
, error
);
281 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
284 user
= current
->user
;
288 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
289 goto out_unlock
; /* No processes for this user */
293 error
= set_one_prio(p
, niceval
, error
);
294 while_each_thread(g
, p
);
295 if (who
!= current
->uid
)
296 free_uid(user
); /* For find_user() */
300 read_unlock(&tasklist_lock
);
306 * Ugh. To avoid negative return values, "getpriority()" will
307 * not return the normal nice-value, but a negated value that
308 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
309 * to stay compatible.
311 asmlinkage
long sys_getpriority(int which
, int who
)
313 struct task_struct
*g
, *p
;
314 struct user_struct
*user
;
315 long niceval
, retval
= -ESRCH
;
317 if (which
> 2 || which
< 0)
320 read_lock(&tasklist_lock
);
325 p
= find_task_by_pid(who
);
327 niceval
= 20 - task_nice(p
);
328 if (niceval
> retval
)
334 who
= process_group(current
);
335 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
336 niceval
= 20 - task_nice(p
);
337 if (niceval
> retval
)
339 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
342 user
= current
->user
;
346 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
347 goto out_unlock
; /* No processes for this user */
351 niceval
= 20 - task_nice(p
);
352 if (niceval
> retval
)
355 while_each_thread(g
, p
);
356 if (who
!= current
->uid
)
357 free_uid(user
); /* for find_user() */
361 read_unlock(&tasklist_lock
);
367 * emergency_restart - reboot the system
369 * Without shutting down any hardware or taking any locks
370 * reboot the system. This is called when we know we are in
371 * trouble so this is our best effort to reboot. This is
372 * safe to call in interrupt context.
374 void emergency_restart(void)
376 machine_emergency_restart();
378 EXPORT_SYMBOL_GPL(emergency_restart
);
380 void kernel_restart_prepare(char *cmd
)
382 notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
383 system_state
= SYSTEM_RESTART
;
388 * kernel_restart - reboot the system
389 * @cmd: pointer to buffer containing command to execute for restart
392 * Shutdown everything and perform a clean reboot.
393 * This is not safe to call in interrupt context.
395 void kernel_restart(char *cmd
)
397 kernel_restart_prepare(cmd
);
399 printk(KERN_EMERG
"Restarting system.\n");
401 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
404 machine_restart(cmd
);
406 EXPORT_SYMBOL_GPL(kernel_restart
);
409 * kernel_kexec - reboot the system
411 * Move into place and start executing a preloaded standalone
412 * executable. If nothing was preloaded return an error.
414 void kernel_kexec(void)
417 struct kimage
*image
;
418 image
= xchg(&kexec_image
, 0);
422 kernel_restart_prepare(NULL
);
423 printk(KERN_EMERG
"Starting new kernel\n");
425 machine_kexec(image
);
428 EXPORT_SYMBOL_GPL(kernel_kexec
);
431 * kernel_halt - halt the system
433 * Shutdown everything and perform a clean system halt.
435 void kernel_halt_prepare(void)
437 notifier_call_chain(&reboot_notifier_list
, SYS_HALT
, NULL
);
438 system_state
= SYSTEM_HALT
;
441 void kernel_halt(void)
443 kernel_halt_prepare();
444 printk(KERN_EMERG
"System halted.\n");
447 EXPORT_SYMBOL_GPL(kernel_halt
);
450 * kernel_power_off - power_off the system
452 * Shutdown everything and perform a clean system power_off.
454 void kernel_power_off_prepare(void)
456 notifier_call_chain(&reboot_notifier_list
, SYS_POWER_OFF
, NULL
);
457 system_state
= SYSTEM_POWER_OFF
;
460 void kernel_power_off(void)
462 kernel_power_off_prepare();
463 printk(KERN_EMERG
"Power down.\n");
466 EXPORT_SYMBOL_GPL(kernel_power_off
);
469 * Reboot system call: for obvious reasons only root may call it,
470 * and even root needs to set up some magic numbers in the registers
471 * so that some mistake won't make this reboot the whole machine.
472 * You can also set the meaning of the ctrl-alt-del-key here.
474 * reboot doesn't sync: do that yourself before calling this.
476 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
480 /* We only trust the superuser with rebooting the system. */
481 if (!capable(CAP_SYS_BOOT
))
484 /* For safety, we require "magic" arguments. */
485 if (magic1
!= LINUX_REBOOT_MAGIC1
||
486 (magic2
!= LINUX_REBOOT_MAGIC2
&&
487 magic2
!= LINUX_REBOOT_MAGIC2A
&&
488 magic2
!= LINUX_REBOOT_MAGIC2B
&&
489 magic2
!= LINUX_REBOOT_MAGIC2C
))
494 case LINUX_REBOOT_CMD_RESTART
:
495 kernel_restart(NULL
);
498 case LINUX_REBOOT_CMD_CAD_ON
:
502 case LINUX_REBOOT_CMD_CAD_OFF
:
506 case LINUX_REBOOT_CMD_HALT
:
512 case LINUX_REBOOT_CMD_POWER_OFF
:
518 case LINUX_REBOOT_CMD_RESTART2
:
519 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
523 buffer
[sizeof(buffer
) - 1] = '\0';
525 kernel_restart(buffer
);
528 case LINUX_REBOOT_CMD_KEXEC
:
533 #ifdef CONFIG_SOFTWARE_SUSPEND
534 case LINUX_REBOOT_CMD_SW_SUSPEND
:
536 int ret
= software_suspend();
550 static void deferred_cad(void *dummy
)
552 kernel_restart(NULL
);
556 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
557 * As it's called within an interrupt, it may NOT sync: the only choice
558 * is whether to reboot at once, or just ignore the ctrl-alt-del.
560 void ctrl_alt_del(void)
562 static DECLARE_WORK(cad_work
, deferred_cad
, NULL
);
565 schedule_work(&cad_work
);
567 kill_proc(cad_pid
, SIGINT
, 1);
572 * Unprivileged users may change the real gid to the effective gid
573 * or vice versa. (BSD-style)
575 * If you set the real gid at all, or set the effective gid to a value not
576 * equal to the real gid, then the saved gid is set to the new effective gid.
578 * This makes it possible for a setgid program to completely drop its
579 * privileges, which is often a useful assertion to make when you are doing
580 * a security audit over a program.
582 * The general idea is that a program which uses just setregid() will be
583 * 100% compatible with BSD. A program which uses just setgid() will be
584 * 100% compatible with POSIX with saved IDs.
586 * SMP: There are not races, the GIDs are checked only by filesystem
587 * operations (as far as semantic preservation is concerned).
589 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
591 int old_rgid
= current
->gid
;
592 int old_egid
= current
->egid
;
593 int new_rgid
= old_rgid
;
594 int new_egid
= old_egid
;
597 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
601 if (rgid
!= (gid_t
) -1) {
602 if ((old_rgid
== rgid
) ||
603 (current
->egid
==rgid
) ||
609 if (egid
!= (gid_t
) -1) {
610 if ((old_rgid
== egid
) ||
611 (current
->egid
== egid
) ||
612 (current
->sgid
== egid
) ||
619 if (new_egid
!= old_egid
)
621 current
->mm
->dumpable
= suid_dumpable
;
624 if (rgid
!= (gid_t
) -1 ||
625 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
626 current
->sgid
= new_egid
;
627 current
->fsgid
= new_egid
;
628 current
->egid
= new_egid
;
629 current
->gid
= new_rgid
;
630 key_fsgid_changed(current
);
631 proc_id_connector(current
, PROC_EVENT_GID
);
636 * setgid() is implemented like SysV w/ SAVED_IDS
638 * SMP: Same implicit races as above.
640 asmlinkage
long sys_setgid(gid_t gid
)
642 int old_egid
= current
->egid
;
645 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
649 if (capable(CAP_SETGID
))
653 current
->mm
->dumpable
= suid_dumpable
;
656 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
658 else if ((gid
== current
->gid
) || (gid
== current
->sgid
))
662 current
->mm
->dumpable
= suid_dumpable
;
665 current
->egid
= current
->fsgid
= gid
;
670 key_fsgid_changed(current
);
671 proc_id_connector(current
, PROC_EVENT_GID
);
675 static int set_user(uid_t new_ruid
, int dumpclear
)
677 struct user_struct
*new_user
;
679 new_user
= alloc_uid(new_ruid
);
683 if (atomic_read(&new_user
->processes
) >=
684 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
685 new_user
!= &root_user
) {
690 switch_uid(new_user
);
694 current
->mm
->dumpable
= suid_dumpable
;
697 current
->uid
= new_ruid
;
702 * Unprivileged users may change the real uid to the effective uid
703 * or vice versa. (BSD-style)
705 * If you set the real uid at all, or set the effective uid to a value not
706 * equal to the real uid, then the saved uid is set to the new effective uid.
708 * This makes it possible for a setuid program to completely drop its
709 * privileges, which is often a useful assertion to make when you are doing
710 * a security audit over a program.
712 * The general idea is that a program which uses just setreuid() will be
713 * 100% compatible with BSD. A program which uses just setuid() will be
714 * 100% compatible with POSIX with saved IDs.
716 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
718 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
721 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
725 new_ruid
= old_ruid
= current
->uid
;
726 new_euid
= old_euid
= current
->euid
;
727 old_suid
= current
->suid
;
729 if (ruid
!= (uid_t
) -1) {
731 if ((old_ruid
!= ruid
) &&
732 (current
->euid
!= ruid
) &&
733 !capable(CAP_SETUID
))
737 if (euid
!= (uid_t
) -1) {
739 if ((old_ruid
!= euid
) &&
740 (current
->euid
!= euid
) &&
741 (current
->suid
!= euid
) &&
742 !capable(CAP_SETUID
))
746 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
749 if (new_euid
!= old_euid
)
751 current
->mm
->dumpable
= suid_dumpable
;
754 current
->fsuid
= current
->euid
= new_euid
;
755 if (ruid
!= (uid_t
) -1 ||
756 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
757 current
->suid
= current
->euid
;
758 current
->fsuid
= current
->euid
;
760 key_fsuid_changed(current
);
761 proc_id_connector(current
, PROC_EVENT_UID
);
763 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
769 * setuid() is implemented like SysV with SAVED_IDS
771 * Note that SAVED_ID's is deficient in that a setuid root program
772 * like sendmail, for example, cannot set its uid to be a normal
773 * user and then switch back, because if you're root, setuid() sets
774 * the saved uid too. If you don't like this, blame the bright people
775 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
776 * will allow a root program to temporarily drop privileges and be able to
777 * regain them by swapping the real and effective uid.
779 asmlinkage
long sys_setuid(uid_t uid
)
781 int old_euid
= current
->euid
;
782 int old_ruid
, old_suid
, new_ruid
, new_suid
;
785 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
789 old_ruid
= new_ruid
= current
->uid
;
790 old_suid
= current
->suid
;
793 if (capable(CAP_SETUID
)) {
794 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
797 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
802 current
->mm
->dumpable
= suid_dumpable
;
805 current
->fsuid
= current
->euid
= uid
;
806 current
->suid
= new_suid
;
808 key_fsuid_changed(current
);
809 proc_id_connector(current
, PROC_EVENT_UID
);
811 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
816 * This function implements a generic ability to update ruid, euid,
817 * and suid. This allows you to implement the 4.4 compatible seteuid().
819 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
821 int old_ruid
= current
->uid
;
822 int old_euid
= current
->euid
;
823 int old_suid
= current
->suid
;
826 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
830 if (!capable(CAP_SETUID
)) {
831 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
832 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
834 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
835 (euid
!= current
->euid
) && (euid
!= current
->suid
))
837 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
838 (suid
!= current
->euid
) && (suid
!= current
->suid
))
841 if (ruid
!= (uid_t
) -1) {
842 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
845 if (euid
!= (uid_t
) -1) {
846 if (euid
!= current
->euid
)
848 current
->mm
->dumpable
= suid_dumpable
;
851 current
->euid
= euid
;
853 current
->fsuid
= current
->euid
;
854 if (suid
!= (uid_t
) -1)
855 current
->suid
= suid
;
857 key_fsuid_changed(current
);
858 proc_id_connector(current
, PROC_EVENT_UID
);
860 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
863 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
867 if (!(retval
= put_user(current
->uid
, ruid
)) &&
868 !(retval
= put_user(current
->euid
, euid
)))
869 retval
= put_user(current
->suid
, suid
);
875 * Same as above, but for rgid, egid, sgid.
877 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
881 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
885 if (!capable(CAP_SETGID
)) {
886 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
887 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
889 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
890 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
892 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
893 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
896 if (egid
!= (gid_t
) -1) {
897 if (egid
!= current
->egid
)
899 current
->mm
->dumpable
= suid_dumpable
;
902 current
->egid
= egid
;
904 current
->fsgid
= current
->egid
;
905 if (rgid
!= (gid_t
) -1)
907 if (sgid
!= (gid_t
) -1)
908 current
->sgid
= sgid
;
910 key_fsgid_changed(current
);
911 proc_id_connector(current
, PROC_EVENT_GID
);
915 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
919 if (!(retval
= put_user(current
->gid
, rgid
)) &&
920 !(retval
= put_user(current
->egid
, egid
)))
921 retval
= put_user(current
->sgid
, sgid
);
928 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
929 * is used for "access()" and for the NFS daemon (letting nfsd stay at
930 * whatever uid it wants to). It normally shadows "euid", except when
931 * explicitly set by setfsuid() or for access..
933 asmlinkage
long sys_setfsuid(uid_t uid
)
937 old_fsuid
= current
->fsuid
;
938 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
941 if (uid
== current
->uid
|| uid
== current
->euid
||
942 uid
== current
->suid
|| uid
== current
->fsuid
||
945 if (uid
!= old_fsuid
)
947 current
->mm
->dumpable
= suid_dumpable
;
950 current
->fsuid
= uid
;
953 key_fsuid_changed(current
);
954 proc_id_connector(current
, PROC_EVENT_UID
);
956 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
962 * Samma på svenska..
964 asmlinkage
long sys_setfsgid(gid_t gid
)
968 old_fsgid
= current
->fsgid
;
969 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
972 if (gid
== current
->gid
|| gid
== current
->egid
||
973 gid
== current
->sgid
|| gid
== current
->fsgid
||
976 if (gid
!= old_fsgid
)
978 current
->mm
->dumpable
= suid_dumpable
;
981 current
->fsgid
= gid
;
982 key_fsgid_changed(current
);
983 proc_id_connector(current
, PROC_EVENT_GID
);
988 asmlinkage
long sys_times(struct tms __user
* tbuf
)
991 * In the SMP world we might just be unlucky and have one of
992 * the times increment as we use it. Since the value is an
993 * atomically safe type this is just fine. Conceptually its
994 * as if the syscall took an instant longer to occur.
998 cputime_t utime
, stime
, cutime
, cstime
;
1001 if (thread_group_empty(current
)) {
1003 * Single thread case without the use of any locks.
1005 * We may race with release_task if two threads are
1006 * executing. However, release task first adds up the
1007 * counters (__exit_signal) before removing the task
1008 * from the process tasklist (__unhash_process).
1009 * __exit_signal also acquires and releases the
1010 * siglock which results in the proper memory ordering
1011 * so that the list modifications are always visible
1012 * after the counters have been updated.
1014 * If the counters have been updated by the second thread
1015 * but the thread has not yet been removed from the list
1016 * then the other branch will be executing which will
1017 * block on tasklist_lock until the exit handling of the
1018 * other task is finished.
1020 * This also implies that the sighand->siglock cannot
1021 * be held by another processor. So we can also
1022 * skip acquiring that lock.
1024 utime
= cputime_add(current
->signal
->utime
, current
->utime
);
1025 stime
= cputime_add(current
->signal
->utime
, current
->stime
);
1026 cutime
= current
->signal
->cutime
;
1027 cstime
= current
->signal
->cstime
;
1032 /* Process with multiple threads */
1033 struct task_struct
*tsk
= current
;
1034 struct task_struct
*t
;
1036 read_lock(&tasklist_lock
);
1037 utime
= tsk
->signal
->utime
;
1038 stime
= tsk
->signal
->stime
;
1041 utime
= cputime_add(utime
, t
->utime
);
1042 stime
= cputime_add(stime
, t
->stime
);
1047 * While we have tasklist_lock read-locked, no dying thread
1048 * can be updating current->signal->[us]time. Instead,
1049 * we got their counts included in the live thread loop.
1050 * However, another thread can come in right now and
1051 * do a wait call that updates current->signal->c[us]time.
1052 * To make sure we always see that pair updated atomically,
1053 * we take the siglock around fetching them.
1055 spin_lock_irq(&tsk
->sighand
->siglock
);
1056 cutime
= tsk
->signal
->cutime
;
1057 cstime
= tsk
->signal
->cstime
;
1058 spin_unlock_irq(&tsk
->sighand
->siglock
);
1059 read_unlock(&tasklist_lock
);
1061 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1062 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1063 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1064 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1065 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1068 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1072 * This needs some heavy checking ...
1073 * I just haven't the stomach for it. I also don't fully
1074 * understand sessions/pgrp etc. Let somebody who does explain it.
1076 * OK, I think I have the protection semantics right.... this is really
1077 * only important on a multi-user system anyway, to make sure one user
1078 * can't send a signal to a process owned by another. -TYT, 12/12/91
1080 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1084 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1086 struct task_struct
*p
;
1096 /* From this point forward we keep holding onto the tasklist lock
1097 * so that our parent does not change from under us. -DaveM
1099 write_lock_irq(&tasklist_lock
);
1102 p
= find_task_by_pid(pid
);
1107 if (!thread_group_leader(p
))
1110 if (p
->parent
== current
|| p
->real_parent
== current
) {
1112 if (p
->signal
->session
!= current
->signal
->session
)
1124 if (p
->signal
->leader
)
1128 struct task_struct
*p
;
1130 do_each_task_pid(pgid
, PIDTYPE_PGID
, p
) {
1131 if (p
->signal
->session
== current
->signal
->session
)
1133 } while_each_task_pid(pgid
, PIDTYPE_PGID
, p
);
1138 err
= security_task_setpgid(p
, pgid
);
1142 if (process_group(p
) != pgid
) {
1143 detach_pid(p
, PIDTYPE_PGID
);
1144 p
->signal
->pgrp
= pgid
;
1145 attach_pid(p
, PIDTYPE_PGID
, pgid
);
1150 /* All paths lead to here, thus we are safe. -DaveM */
1151 write_unlock_irq(&tasklist_lock
);
1155 asmlinkage
long sys_getpgid(pid_t pid
)
1158 return process_group(current
);
1161 struct task_struct
*p
;
1163 read_lock(&tasklist_lock
);
1164 p
= find_task_by_pid(pid
);
1168 retval
= security_task_getpgid(p
);
1170 retval
= process_group(p
);
1172 read_unlock(&tasklist_lock
);
1177 #ifdef __ARCH_WANT_SYS_GETPGRP
1179 asmlinkage
long sys_getpgrp(void)
1181 /* SMP - assuming writes are word atomic this is fine */
1182 return process_group(current
);
1187 asmlinkage
long sys_getsid(pid_t pid
)
1190 return current
->signal
->session
;
1193 struct task_struct
*p
;
1195 read_lock(&tasklist_lock
);
1196 p
= find_task_by_pid(pid
);
1200 retval
= security_task_getsid(p
);
1202 retval
= p
->signal
->session
;
1204 read_unlock(&tasklist_lock
);
1209 asmlinkage
long sys_setsid(void)
1214 if (!thread_group_leader(current
))
1218 write_lock_irq(&tasklist_lock
);
1220 pid
= find_pid(PIDTYPE_PGID
, current
->pid
);
1224 current
->signal
->leader
= 1;
1225 __set_special_pids(current
->pid
, current
->pid
);
1226 current
->signal
->tty
= NULL
;
1227 current
->signal
->tty_old_pgrp
= 0;
1228 err
= process_group(current
);
1230 write_unlock_irq(&tasklist_lock
);
1236 * Supplementary group IDs
1239 /* init to 2 - one for init_task, one to ensure it is never freed */
1240 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1242 struct group_info
*groups_alloc(int gidsetsize
)
1244 struct group_info
*group_info
;
1248 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1249 /* Make sure we always allocate at least one indirect block pointer */
1250 nblocks
= nblocks
? : 1;
1251 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1254 group_info
->ngroups
= gidsetsize
;
1255 group_info
->nblocks
= nblocks
;
1256 atomic_set(&group_info
->usage
, 1);
1258 if (gidsetsize
<= NGROUPS_SMALL
) {
1259 group_info
->blocks
[0] = group_info
->small_block
;
1261 for (i
= 0; i
< nblocks
; i
++) {
1263 b
= (void *)__get_free_page(GFP_USER
);
1265 goto out_undo_partial_alloc
;
1266 group_info
->blocks
[i
] = b
;
1271 out_undo_partial_alloc
:
1273 free_page((unsigned long)group_info
->blocks
[i
]);
1279 EXPORT_SYMBOL(groups_alloc
);
1281 void groups_free(struct group_info
*group_info
)
1283 if (group_info
->blocks
[0] != group_info
->small_block
) {
1285 for (i
= 0; i
< group_info
->nblocks
; i
++)
1286 free_page((unsigned long)group_info
->blocks
[i
]);
1291 EXPORT_SYMBOL(groups_free
);
1293 /* export the group_info to a user-space array */
1294 static int groups_to_user(gid_t __user
*grouplist
,
1295 struct group_info
*group_info
)
1298 int count
= group_info
->ngroups
;
1300 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1301 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1302 int off
= i
* NGROUPS_PER_BLOCK
;
1303 int len
= cp_count
* sizeof(*grouplist
);
1305 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1313 /* fill a group_info from a user-space array - it must be allocated already */
1314 static int groups_from_user(struct group_info
*group_info
,
1315 gid_t __user
*grouplist
)
1318 int count
= group_info
->ngroups
;
1320 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1321 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1322 int off
= i
* NGROUPS_PER_BLOCK
;
1323 int len
= cp_count
* sizeof(*grouplist
);
1325 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1333 /* a simple Shell sort */
1334 static void groups_sort(struct group_info
*group_info
)
1336 int base
, max
, stride
;
1337 int gidsetsize
= group_info
->ngroups
;
1339 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1344 max
= gidsetsize
- stride
;
1345 for (base
= 0; base
< max
; base
++) {
1347 int right
= left
+ stride
;
1348 gid_t tmp
= GROUP_AT(group_info
, right
);
1350 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1351 GROUP_AT(group_info
, right
) =
1352 GROUP_AT(group_info
, left
);
1356 GROUP_AT(group_info
, right
) = tmp
;
1362 /* a simple bsearch */
1363 int groups_search(struct group_info
*group_info
, gid_t grp
)
1371 right
= group_info
->ngroups
;
1372 while (left
< right
) {
1373 int mid
= (left
+right
)/2;
1374 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1385 /* validate and set current->group_info */
1386 int set_current_groups(struct group_info
*group_info
)
1389 struct group_info
*old_info
;
1391 retval
= security_task_setgroups(group_info
);
1395 groups_sort(group_info
);
1396 get_group_info(group_info
);
1399 old_info
= current
->group_info
;
1400 current
->group_info
= group_info
;
1401 task_unlock(current
);
1403 put_group_info(old_info
);
1408 EXPORT_SYMBOL(set_current_groups
);
1410 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1415 * SMP: Nobody else can change our grouplist. Thus we are
1422 /* no need to grab task_lock here; it cannot change */
1423 get_group_info(current
->group_info
);
1424 i
= current
->group_info
->ngroups
;
1426 if (i
> gidsetsize
) {
1430 if (groups_to_user(grouplist
, current
->group_info
)) {
1436 put_group_info(current
->group_info
);
1441 * SMP: Our groups are copy-on-write. We can set them safely
1442 * without another task interfering.
1445 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1447 struct group_info
*group_info
;
1450 if (!capable(CAP_SETGID
))
1452 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1455 group_info
= groups_alloc(gidsetsize
);
1458 retval
= groups_from_user(group_info
, grouplist
);
1460 put_group_info(group_info
);
1464 retval
= set_current_groups(group_info
);
1465 put_group_info(group_info
);
1471 * Check whether we're fsgid/egid or in the supplemental group..
1473 int in_group_p(gid_t grp
)
1476 if (grp
!= current
->fsgid
) {
1477 get_group_info(current
->group_info
);
1478 retval
= groups_search(current
->group_info
, grp
);
1479 put_group_info(current
->group_info
);
1484 EXPORT_SYMBOL(in_group_p
);
1486 int in_egroup_p(gid_t grp
)
1489 if (grp
!= current
->egid
) {
1490 get_group_info(current
->group_info
);
1491 retval
= groups_search(current
->group_info
, grp
);
1492 put_group_info(current
->group_info
);
1497 EXPORT_SYMBOL(in_egroup_p
);
1499 DECLARE_RWSEM(uts_sem
);
1501 EXPORT_SYMBOL(uts_sem
);
1503 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1507 down_read(&uts_sem
);
1508 if (copy_to_user(name
,&system_utsname
,sizeof *name
))
1514 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1517 char tmp
[__NEW_UTS_LEN
];
1519 if (!capable(CAP_SYS_ADMIN
))
1521 if (len
< 0 || len
> __NEW_UTS_LEN
)
1523 down_write(&uts_sem
);
1525 if (!copy_from_user(tmp
, name
, len
)) {
1526 memcpy(system_utsname
.nodename
, tmp
, len
);
1527 system_utsname
.nodename
[len
] = 0;
1534 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1536 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1542 down_read(&uts_sem
);
1543 i
= 1 + strlen(system_utsname
.nodename
);
1547 if (copy_to_user(name
, system_utsname
.nodename
, i
))
1556 * Only setdomainname; getdomainname can be implemented by calling
1559 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1562 char tmp
[__NEW_UTS_LEN
];
1564 if (!capable(CAP_SYS_ADMIN
))
1566 if (len
< 0 || len
> __NEW_UTS_LEN
)
1569 down_write(&uts_sem
);
1571 if (!copy_from_user(tmp
, name
, len
)) {
1572 memcpy(system_utsname
.domainname
, tmp
, len
);
1573 system_utsname
.domainname
[len
] = 0;
1580 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1582 if (resource
>= RLIM_NLIMITS
)
1585 struct rlimit value
;
1586 task_lock(current
->group_leader
);
1587 value
= current
->signal
->rlim
[resource
];
1588 task_unlock(current
->group_leader
);
1589 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1593 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1596 * Back compatibility for getrlimit. Needed for some apps.
1599 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1602 if (resource
>= RLIM_NLIMITS
)
1605 task_lock(current
->group_leader
);
1606 x
= current
->signal
->rlim
[resource
];
1607 task_unlock(current
->group_leader
);
1608 if(x
.rlim_cur
> 0x7FFFFFFF)
1609 x
.rlim_cur
= 0x7FFFFFFF;
1610 if(x
.rlim_max
> 0x7FFFFFFF)
1611 x
.rlim_max
= 0x7FFFFFFF;
1612 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1617 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1619 struct rlimit new_rlim
, *old_rlim
;
1622 if (resource
>= RLIM_NLIMITS
)
1624 if(copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1626 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1628 old_rlim
= current
->signal
->rlim
+ resource
;
1629 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1630 !capable(CAP_SYS_RESOURCE
))
1632 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1635 retval
= security_task_setrlimit(resource
, &new_rlim
);
1639 task_lock(current
->group_leader
);
1640 *old_rlim
= new_rlim
;
1641 task_unlock(current
->group_leader
);
1643 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
!= RLIM_INFINITY
&&
1644 (cputime_eq(current
->signal
->it_prof_expires
, cputime_zero
) ||
1645 new_rlim
.rlim_cur
<= cputime_to_secs(
1646 current
->signal
->it_prof_expires
))) {
1647 cputime_t cputime
= secs_to_cputime(new_rlim
.rlim_cur
);
1648 read_lock(&tasklist_lock
);
1649 spin_lock_irq(¤t
->sighand
->siglock
);
1650 set_process_cpu_timer(current
, CPUCLOCK_PROF
,
1652 spin_unlock_irq(¤t
->sighand
->siglock
);
1653 read_unlock(&tasklist_lock
);
1660 * It would make sense to put struct rusage in the task_struct,
1661 * except that would make the task_struct be *really big*. After
1662 * task_struct gets moved into malloc'ed memory, it would
1663 * make sense to do this. It will make moving the rest of the information
1664 * a lot simpler! (Which we're not doing right now because we're not
1665 * measuring them yet).
1667 * This expects to be called with tasklist_lock read-locked or better,
1668 * and the siglock not locked. It may momentarily take the siglock.
1670 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1671 * races with threads incrementing their own counters. But since word
1672 * reads are atomic, we either get new values or old values and we don't
1673 * care which for the sums. We always take the siglock to protect reading
1674 * the c* fields from p->signal from races with exit.c updating those
1675 * fields when reaping, so a sample either gets all the additions of a
1676 * given child after it's reaped, or none so this sample is before reaping.
1679 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1681 struct task_struct
*t
;
1682 unsigned long flags
;
1683 cputime_t utime
, stime
;
1685 memset((char *) r
, 0, sizeof *r
);
1687 if (unlikely(!p
->signal
))
1691 case RUSAGE_CHILDREN
:
1692 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1693 utime
= p
->signal
->cutime
;
1694 stime
= p
->signal
->cstime
;
1695 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1696 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1697 r
->ru_minflt
= p
->signal
->cmin_flt
;
1698 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1699 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1700 cputime_to_timeval(utime
, &r
->ru_utime
);
1701 cputime_to_timeval(stime
, &r
->ru_stime
);
1704 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1705 utime
= stime
= cputime_zero
;
1708 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1709 utime
= p
->signal
->cutime
;
1710 stime
= p
->signal
->cstime
;
1711 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1712 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1713 r
->ru_minflt
= p
->signal
->cmin_flt
;
1714 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1716 utime
= cputime_add(utime
, p
->signal
->utime
);
1717 stime
= cputime_add(stime
, p
->signal
->stime
);
1718 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1719 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1720 r
->ru_minflt
+= p
->signal
->min_flt
;
1721 r
->ru_majflt
+= p
->signal
->maj_flt
;
1724 utime
= cputime_add(utime
, t
->utime
);
1725 stime
= cputime_add(stime
, t
->stime
);
1726 r
->ru_nvcsw
+= t
->nvcsw
;
1727 r
->ru_nivcsw
+= t
->nivcsw
;
1728 r
->ru_minflt
+= t
->min_flt
;
1729 r
->ru_majflt
+= t
->maj_flt
;
1732 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1733 cputime_to_timeval(utime
, &r
->ru_utime
);
1734 cputime_to_timeval(stime
, &r
->ru_stime
);
1741 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
1744 read_lock(&tasklist_lock
);
1745 k_getrusage(p
, who
, &r
);
1746 read_unlock(&tasklist_lock
);
1747 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1750 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
1752 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
1754 return getrusage(current
, who
, ru
);
1757 asmlinkage
long sys_umask(int mask
)
1759 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1763 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
1764 unsigned long arg4
, unsigned long arg5
)
1768 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
1773 case PR_SET_PDEATHSIG
:
1774 if (!valid_signal(arg2
)) {
1778 current
->pdeath_signal
= arg2
;
1780 case PR_GET_PDEATHSIG
:
1781 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
1783 case PR_GET_DUMPABLE
:
1784 error
= current
->mm
->dumpable
;
1786 case PR_SET_DUMPABLE
:
1787 if (arg2
< 0 || arg2
> 2) {
1791 current
->mm
->dumpable
= arg2
;
1794 case PR_SET_UNALIGN
:
1795 error
= SET_UNALIGN_CTL(current
, arg2
);
1797 case PR_GET_UNALIGN
:
1798 error
= GET_UNALIGN_CTL(current
, arg2
);
1801 error
= SET_FPEMU_CTL(current
, arg2
);
1804 error
= GET_FPEMU_CTL(current
, arg2
);
1807 error
= SET_FPEXC_CTL(current
, arg2
);
1810 error
= GET_FPEXC_CTL(current
, arg2
);
1813 error
= PR_TIMING_STATISTICAL
;
1816 if (arg2
== PR_TIMING_STATISTICAL
)
1822 case PR_GET_KEEPCAPS
:
1823 if (current
->keep_capabilities
)
1826 case PR_SET_KEEPCAPS
:
1827 if (arg2
!= 0 && arg2
!= 1) {
1831 current
->keep_capabilities
= arg2
;
1834 struct task_struct
*me
= current
;
1835 unsigned char ncomm
[sizeof(me
->comm
)];
1837 ncomm
[sizeof(me
->comm
)-1] = 0;
1838 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
1839 sizeof(me
->comm
)-1) < 0)
1841 set_task_comm(me
, ncomm
);
1845 struct task_struct
*me
= current
;
1846 unsigned char tcomm
[sizeof(me
->comm
)];
1848 get_task_comm(tcomm
, me
);
1849 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))