4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
19 #include <linux/workqueue.h>
20 #include <linux/device.h>
21 #include <linux/times.h>
22 #include <linux/security.h>
23 #include <linux/dcookies.h>
25 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
29 #ifndef SET_UNALIGN_CTL
30 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
32 #ifndef GET_UNALIGN_CTL
33 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
36 # define SET_FPEMU_CTL(a,b) (-EINVAL)
39 # define GET_FPEMU_CTL(a,b) (-EINVAL)
42 # define SET_FPEXC_CTL(a,b) (-EINVAL)
45 # define GET_FPEXC_CTL(a,b) (-EINVAL)
49 * this is where the system-wide overflow UID and GID are defined, for
50 * architectures that now have 32-bit UID/GID but didn't in the past
53 int overflowuid
= DEFAULT_OVERFLOWUID
;
54 int overflowgid
= DEFAULT_OVERFLOWGID
;
57 * the same as above, but for filesystems which can only store a 16-bit
58 * UID and GID. as such, this is needed on all architectures
61 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
62 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
65 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
71 extern int system_running
;
74 * Notifier list for kernel code which wants to be called
75 * at shutdown. This is used to stop any idling DMA operations
79 static struct notifier_block
*reboot_notifier_list
;
80 rwlock_t notifier_lock
= RW_LOCK_UNLOCKED
;
83 * notifier_chain_register - Add notifier to a notifier chain
84 * @list: Pointer to root list pointer
85 * @n: New entry in notifier chain
87 * Adds a notifier to a notifier chain.
89 * Currently always returns zero.
92 int notifier_chain_register(struct notifier_block
**list
, struct notifier_block
*n
)
94 write_lock(¬ifier_lock
);
97 if(n
->priority
> (*list
)->priority
)
99 list
= &((*list
)->next
);
103 write_unlock(¬ifier_lock
);
108 * notifier_chain_unregister - Remove notifier from a notifier chain
109 * @nl: Pointer to root list pointer
110 * @n: New entry in notifier chain
112 * Removes a notifier from a notifier chain.
114 * Returns zero on success, or %-ENOENT on failure.
117 int notifier_chain_unregister(struct notifier_block
**nl
, struct notifier_block
*n
)
119 write_lock(¬ifier_lock
);
125 write_unlock(¬ifier_lock
);
130 write_unlock(¬ifier_lock
);
135 * notifier_call_chain - Call functions in a notifier chain
136 * @n: Pointer to root pointer of notifier chain
137 * @val: Value passed unmodified to notifier function
138 * @v: Pointer passed unmodified to notifier function
140 * Calls each function in a notifier chain in turn.
142 * If the return value of the notifier can be and'd
143 * with %NOTIFY_STOP_MASK, then notifier_call_chain
144 * will return immediately, with the return value of
145 * the notifier function which halted execution.
146 * Otherwise, the return value is the return value
147 * of the last notifier function called.
150 int notifier_call_chain(struct notifier_block
**n
, unsigned long val
, void *v
)
153 struct notifier_block
*nb
= *n
;
157 ret
=nb
->notifier_call(nb
,val
,v
);
158 if(ret
&NOTIFY_STOP_MASK
)
168 * register_reboot_notifier - Register function to be called at reboot time
169 * @nb: Info about notifier function to be called
171 * Registers a function with the list of functions
172 * to be called at reboot time.
174 * Currently always returns zero, as notifier_chain_register
175 * always returns zero.
178 int register_reboot_notifier(struct notifier_block
* nb
)
180 return notifier_chain_register(&reboot_notifier_list
, nb
);
184 * unregister_reboot_notifier - Unregister previously registered reboot notifier
185 * @nb: Hook to be unregistered
187 * Unregisters a previously registered reboot
190 * Returns zero on success, or %-ENOENT on failure.
193 int unregister_reboot_notifier(struct notifier_block
* nb
)
195 return notifier_chain_unregister(&reboot_notifier_list
, nb
);
198 asmlinkage
long sys_ni_syscall(void)
203 cond_syscall(sys_nfsservctl
)
204 cond_syscall(sys_quotactl
)
205 cond_syscall(sys_acct
)
206 cond_syscall(sys_lookup_dcookie
)
207 cond_syscall(sys_swapon
)
208 cond_syscall(sys_swapoff
)
209 cond_syscall(sys_init_module
)
210 cond_syscall(sys_delete_module
)
212 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
214 if (p
->uid
!= current
->euid
&&
215 p
->uid
!= current
->uid
&& !capable(CAP_SYS_NICE
)) {
222 if (niceval
< task_nice(p
) && !capable(CAP_SYS_NICE
))
225 set_user_nice(p
, niceval
);
230 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
232 struct task_struct
*g
, *p
;
233 struct user_struct
*user
;
238 if (which
> 2 || which
< 0)
241 /* normalize: avoid signed division (rounding problems) */
248 read_lock(&tasklist_lock
);
253 p
= find_task_by_pid(who
);
255 error
= set_one_prio(p
, niceval
, error
);
260 for_each_task_pid(who
, PIDTYPE_PGID
, p
, l
, pid
)
261 error
= set_one_prio(p
, niceval
, error
);
265 user
= current
->user
;
267 user
= find_user(who
);
274 error
= set_one_prio(p
, niceval
, error
);
275 while_each_thread(g
, p
);
279 read_unlock(&tasklist_lock
);
285 * Ugh. To avoid negative return values, "getpriority()" will
286 * not return the normal nice-value, but a negated value that
287 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
288 * to stay compatible.
290 asmlinkage
long sys_getpriority(int which
, int who
)
292 struct task_struct
*g
, *p
;
295 struct user_struct
*user
;
296 long niceval
, retval
= -ESRCH
;
298 if (which
> 2 || which
< 0)
301 read_lock(&tasklist_lock
);
306 p
= find_task_by_pid(who
);
308 niceval
= 20 - task_nice(p
);
309 if (niceval
> retval
)
316 for_each_task_pid(who
, PIDTYPE_PGID
, p
, l
, pid
) {
317 niceval
= 20 - task_nice(p
);
318 if (niceval
> retval
)
324 user
= current
->user
;
326 user
= find_user(who
);
333 niceval
= 20 - task_nice(p
);
334 if (niceval
> retval
)
337 while_each_thread(g
, p
);
341 read_unlock(&tasklist_lock
);
348 * Reboot system call: for obvious reasons only root may call it,
349 * and even root needs to set up some magic numbers in the registers
350 * so that some mistake won't make this reboot the whole machine.
351 * You can also set the meaning of the ctrl-alt-del-key here.
353 * reboot doesn't sync: do that yourself before calling this.
355 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void * arg
)
359 /* We only trust the superuser with rebooting the system. */
360 if (!capable(CAP_SYS_BOOT
))
363 /* For safety, we require "magic" arguments. */
364 if (magic1
!= LINUX_REBOOT_MAGIC1
||
365 (magic2
!= LINUX_REBOOT_MAGIC2
&& magic2
!= LINUX_REBOOT_MAGIC2A
&&
366 magic2
!= LINUX_REBOOT_MAGIC2B
))
371 case LINUX_REBOOT_CMD_RESTART
:
372 notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, NULL
);
375 printk(KERN_EMERG
"Restarting system.\n");
376 machine_restart(NULL
);
379 case LINUX_REBOOT_CMD_CAD_ON
:
383 case LINUX_REBOOT_CMD_CAD_OFF
:
387 case LINUX_REBOOT_CMD_HALT
:
388 notifier_call_chain(&reboot_notifier_list
, SYS_HALT
, NULL
);
391 printk(KERN_EMERG
"System halted.\n");
396 case LINUX_REBOOT_CMD_POWER_OFF
:
397 notifier_call_chain(&reboot_notifier_list
, SYS_POWER_OFF
, NULL
);
400 printk(KERN_EMERG
"Power down.\n");
405 case LINUX_REBOOT_CMD_RESTART2
:
406 if (strncpy_from_user(&buffer
[0], (char *)arg
, sizeof(buffer
) - 1) < 0) {
410 buffer
[sizeof(buffer
) - 1] = '\0';
412 notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, buffer
);
415 printk(KERN_EMERG
"Restarting system with command '%s'.\n", buffer
);
416 machine_restart(buffer
);
419 #ifdef CONFIG_SOFTWARE_SUSPEND
420 case LINUX_REBOOT_CMD_SW_SUSPEND
:
421 if (!software_suspend_enabled
) {
438 static void deferred_cad(void *dummy
)
440 notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, NULL
);
441 machine_restart(NULL
);
445 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
446 * As it's called within an interrupt, it may NOT sync: the only choice
447 * is whether to reboot at once, or just ignore the ctrl-alt-del.
449 void ctrl_alt_del(void)
451 static DECLARE_WORK(cad_work
, deferred_cad
, NULL
);
454 schedule_work(&cad_work
);
456 kill_proc(cad_pid
, SIGINT
, 1);
461 * Unprivileged users may change the real gid to the effective gid
462 * or vice versa. (BSD-style)
464 * If you set the real gid at all, or set the effective gid to a value not
465 * equal to the real gid, then the saved gid is set to the new effective gid.
467 * This makes it possible for a setgid program to completely drop its
468 * privileges, which is often a useful assertion to make when you are doing
469 * a security audit over a program.
471 * The general idea is that a program which uses just setregid() will be
472 * 100% compatible with BSD. A program which uses just setgid() will be
473 * 100% compatible with POSIX with saved IDs.
475 * SMP: There are not races, the GIDs are checked only by filesystem
476 * operations (as far as semantic preservation is concerned).
478 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
480 int old_rgid
= current
->gid
;
481 int old_egid
= current
->egid
;
482 int new_rgid
= old_rgid
;
483 int new_egid
= old_egid
;
486 retval
= security_ops
->task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
490 if (rgid
!= (gid_t
) -1) {
491 if ((old_rgid
== rgid
) ||
492 (current
->egid
==rgid
) ||
498 if (egid
!= (gid_t
) -1) {
499 if ((old_rgid
== egid
) ||
500 (current
->egid
== egid
) ||
501 (current
->sgid
== egid
) ||
508 if (new_egid
!= old_egid
)
510 current
->mm
->dumpable
= 0;
513 if (rgid
!= (gid_t
) -1 ||
514 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
515 current
->sgid
= new_egid
;
516 current
->fsgid
= new_egid
;
517 current
->egid
= new_egid
;
518 current
->gid
= new_rgid
;
523 * setgid() is implemented like SysV w/ SAVED_IDS
525 * SMP: Same implicit races as above.
527 asmlinkage
long sys_setgid(gid_t gid
)
529 int old_egid
= current
->egid
;
532 retval
= security_ops
->task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
536 if (capable(CAP_SETGID
))
540 current
->mm
->dumpable
=0;
543 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
545 else if ((gid
== current
->gid
) || (gid
== current
->sgid
))
549 current
->mm
->dumpable
=0;
552 current
->egid
= current
->fsgid
= gid
;
559 static int set_user(uid_t new_ruid
, int dumpclear
)
561 struct user_struct
*new_user
, *old_user
;
563 /* What if a process setreuid()'s and this brings the
564 * new uid over his NPROC rlimit? We can check this now
565 * cheaply with the new uid cache, so if it matters
566 * we should be checking for it. -DaveM
568 new_user
= alloc_uid(new_ruid
);
571 old_user
= current
->user
;
572 atomic_dec(&old_user
->processes
);
573 atomic_inc(&new_user
->processes
);
577 current
->mm
->dumpable
= 0;
580 current
->uid
= new_ruid
;
581 current
->user
= new_user
;
587 * Unprivileged users may change the real uid to the effective uid
588 * or vice versa. (BSD-style)
590 * If you set the real uid at all, or set the effective uid to a value not
591 * equal to the real uid, then the saved uid is set to the new effective uid.
593 * This makes it possible for a setuid program to completely drop its
594 * privileges, which is often a useful assertion to make when you are doing
595 * a security audit over a program.
597 * The general idea is that a program which uses just setreuid() will be
598 * 100% compatible with BSD. A program which uses just setuid() will be
599 * 100% compatible with POSIX with saved IDs.
601 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
603 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
606 retval
= security_ops
->task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
610 new_ruid
= old_ruid
= current
->uid
;
611 new_euid
= old_euid
= current
->euid
;
612 old_suid
= current
->suid
;
614 if (ruid
!= (uid_t
) -1) {
616 if ((old_ruid
!= ruid
) &&
617 (current
->euid
!= ruid
) &&
618 !capable(CAP_SETUID
))
622 if (euid
!= (uid_t
) -1) {
624 if ((old_ruid
!= euid
) &&
625 (current
->euid
!= euid
) &&
626 (current
->suid
!= euid
) &&
627 !capable(CAP_SETUID
))
631 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
634 if (new_euid
!= old_euid
)
636 current
->mm
->dumpable
=0;
639 current
->fsuid
= current
->euid
= new_euid
;
640 if (ruid
!= (uid_t
) -1 ||
641 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
642 current
->suid
= current
->euid
;
643 current
->fsuid
= current
->euid
;
645 return security_ops
->task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
651 * setuid() is implemented like SysV with SAVED_IDS
653 * Note that SAVED_ID's is deficient in that a setuid root program
654 * like sendmail, for example, cannot set its uid to be a normal
655 * user and then switch back, because if you're root, setuid() sets
656 * the saved uid too. If you don't like this, blame the bright people
657 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
658 * will allow a root program to temporarily drop privileges and be able to
659 * regain them by swapping the real and effective uid.
661 asmlinkage
long sys_setuid(uid_t uid
)
663 int old_euid
= current
->euid
;
664 int old_ruid
, old_suid
, new_ruid
, new_suid
;
667 retval
= security_ops
->task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
671 old_ruid
= new_ruid
= current
->uid
;
672 old_suid
= current
->suid
;
675 if (capable(CAP_SETUID
)) {
676 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
679 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
684 current
->mm
->dumpable
= 0;
687 current
->fsuid
= current
->euid
= uid
;
688 current
->suid
= new_suid
;
690 return security_ops
->task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
695 * This function implements a generic ability to update ruid, euid,
696 * and suid. This allows you to implement the 4.4 compatible seteuid().
698 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
700 int old_ruid
= current
->uid
;
701 int old_euid
= current
->euid
;
702 int old_suid
= current
->suid
;
705 retval
= security_ops
->task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
709 if (!capable(CAP_SETUID
)) {
710 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
711 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
713 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
714 (euid
!= current
->euid
) && (euid
!= current
->suid
))
716 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
717 (suid
!= current
->euid
) && (suid
!= current
->suid
))
720 if (ruid
!= (uid_t
) -1) {
721 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
724 if (euid
!= (uid_t
) -1) {
725 if (euid
!= current
->euid
)
727 current
->mm
->dumpable
= 0;
730 current
->euid
= euid
;
732 current
->fsuid
= current
->euid
;
733 if (suid
!= (uid_t
) -1)
734 current
->suid
= suid
;
736 return security_ops
->task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
739 asmlinkage
long sys_getresuid(uid_t
*ruid
, uid_t
*euid
, uid_t
*suid
)
743 if (!(retval
= put_user(current
->uid
, ruid
)) &&
744 !(retval
= put_user(current
->euid
, euid
)))
745 retval
= put_user(current
->suid
, suid
);
751 * Same as above, but for rgid, egid, sgid.
753 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
757 retval
= security_ops
->task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
761 if (!capable(CAP_SETGID
)) {
762 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
763 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
765 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
766 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
768 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
769 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
772 if (egid
!= (gid_t
) -1) {
773 if (egid
!= current
->egid
)
775 current
->mm
->dumpable
= 0;
778 current
->egid
= egid
;
780 current
->fsgid
= current
->egid
;
781 if (rgid
!= (gid_t
) -1)
783 if (sgid
!= (gid_t
) -1)
784 current
->sgid
= sgid
;
788 asmlinkage
long sys_getresgid(gid_t
*rgid
, gid_t
*egid
, gid_t
*sgid
)
792 if (!(retval
= put_user(current
->gid
, rgid
)) &&
793 !(retval
= put_user(current
->egid
, egid
)))
794 retval
= put_user(current
->sgid
, sgid
);
801 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
802 * is used for "access()" and for the NFS daemon (letting nfsd stay at
803 * whatever uid it wants to). It normally shadows "euid", except when
804 * explicitly set by setfsuid() or for access..
806 asmlinkage
long sys_setfsuid(uid_t uid
)
811 retval
= security_ops
->task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
815 old_fsuid
= current
->fsuid
;
816 if (uid
== current
->uid
|| uid
== current
->euid
||
817 uid
== current
->suid
|| uid
== current
->fsuid
||
820 if (uid
!= old_fsuid
)
822 current
->mm
->dumpable
= 0;
825 current
->fsuid
= uid
;
828 retval
= security_ops
->task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
836 * Samma på svenska..
838 asmlinkage
long sys_setfsgid(gid_t gid
)
843 retval
= security_ops
->task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
);
847 old_fsgid
= current
->fsgid
;
848 if (gid
== current
->gid
|| gid
== current
->egid
||
849 gid
== current
->sgid
|| gid
== current
->fsgid
||
852 if (gid
!= old_fsgid
)
854 current
->mm
->dumpable
= 0;
857 current
->fsgid
= gid
;
862 asmlinkage
long sys_times(struct tms
* tbuf
)
865 * In the SMP world we might just be unlucky and have one of
866 * the times increment as we use it. Since the value is an
867 * atomically safe type this is just fine. Conceptually its
868 * as if the syscall took an instant longer to occur.
872 tmp
.tms_utime
= jiffies_to_clock_t(current
->utime
);
873 tmp
.tms_stime
= jiffies_to_clock_t(current
->stime
);
874 tmp
.tms_cutime
= jiffies_to_clock_t(current
->cutime
);
875 tmp
.tms_cstime
= jiffies_to_clock_t(current
->cstime
);
876 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
879 return jiffies_to_clock_t(jiffies
);
883 * This needs some heavy checking ...
884 * I just haven't the stomach for it. I also don't fully
885 * understand sessions/pgrp etc. Let somebody who does explain it.
887 * OK, I think I have the protection semantics right.... this is really
888 * only important on a multi-user system anyway, to make sure one user
889 * can't send a signal to a process owned by another. -TYT, 12/12/91
891 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
895 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
897 struct task_struct
*p
;
907 /* From this point forward we keep holding onto the tasklist lock
908 * so that our parent does not change from under us. -DaveM
910 write_lock_irq(&tasklist_lock
);
913 p
= find_task_by_pid(pid
);
917 if (!thread_group_leader(p
))
920 if (p
->parent
== current
|| p
->real_parent
== current
) {
922 if (p
->session
!= current
->session
)
927 } else if (p
!= current
)
933 struct task_struct
*p
;
937 for_each_task_pid(pgid
, PIDTYPE_PGID
, p
, l
, pid
)
938 if (p
->session
== current
->session
)
944 if (p
->pgrp
!= pgid
) {
945 detach_pid(p
, PIDTYPE_PGID
);
947 attach_pid(p
, PIDTYPE_PGID
, pgid
);
951 /* All paths lead to here, thus we are safe. -DaveM */
952 write_unlock_irq(&tasklist_lock
);
956 asmlinkage
long sys_getpgid(pid_t pid
)
959 return current
->pgrp
;
962 struct task_struct
*p
;
964 read_lock(&tasklist_lock
);
965 p
= find_task_by_pid(pid
);
969 retval
= security_ops
->task_getpgid(p
);
973 read_unlock(&tasklist_lock
);
978 asmlinkage
long sys_getpgrp(void)
980 /* SMP - assuming writes are word atomic this is fine */
981 return current
->pgrp
;
984 asmlinkage
long sys_getsid(pid_t pid
)
987 return current
->session
;
990 struct task_struct
*p
;
992 read_lock(&tasklist_lock
);
993 p
= find_task_by_pid(pid
);
997 retval
= security_ops
->task_getsid(p
);
1001 read_unlock(&tasklist_lock
);
1006 asmlinkage
long sys_setsid(void)
1011 if (!thread_group_leader(current
))
1014 write_lock_irq(&tasklist_lock
);
1016 pid
= find_pid(PIDTYPE_PGID
, current
->pid
);
1020 current
->leader
= 1;
1021 if (current
->session
!= current
->pid
) {
1022 detach_pid(current
, PIDTYPE_SID
);
1023 current
->session
= current
->pid
;
1024 attach_pid(current
, PIDTYPE_SID
, current
->pid
);
1026 if (current
->pgrp
!= current
->pid
) {
1027 detach_pid(current
, PIDTYPE_PGID
);
1028 current
->pgrp
= current
->pid
;
1029 attach_pid(current
, PIDTYPE_PGID
, current
->pid
);
1031 current
->tty
= NULL
;
1032 current
->tty_old_pgrp
= 0;
1033 err
= current
->pgrp
;
1035 write_unlock_irq(&tasklist_lock
);
1040 * Supplementary group IDs
1042 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t
*grouplist
)
1047 * SMP: Nobody else can change our grouplist. Thus we are
1053 i
= current
->ngroups
;
1057 if (copy_to_user(grouplist
, current
->groups
, sizeof(gid_t
)*i
))
1064 * SMP: Our groups are not shared. We can copy to/from them safely
1065 * without another task interfering.
1068 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t
*grouplist
)
1070 gid_t groups
[NGROUPS
];
1073 if (!capable(CAP_SETGID
))
1075 if ((unsigned) gidsetsize
> NGROUPS
)
1077 if(copy_from_user(groups
, grouplist
, gidsetsize
* sizeof(gid_t
)))
1079 retval
= security_ops
->task_setgroups(gidsetsize
, groups
);
1082 memcpy(current
->groups
, groups
, gidsetsize
* sizeof(gid_t
));
1083 current
->ngroups
= gidsetsize
;
1087 static int supplemental_group_member(gid_t grp
)
1089 int i
= current
->ngroups
;
1092 gid_t
*groups
= current
->groups
;
1104 * Check whether we're fsgid/egid or in the supplemental group..
1106 int in_group_p(gid_t grp
)
1109 if (grp
!= current
->fsgid
)
1110 retval
= supplemental_group_member(grp
);
1114 int in_egroup_p(gid_t grp
)
1117 if (grp
!= current
->egid
)
1118 retval
= supplemental_group_member(grp
);
1122 DECLARE_RWSEM(uts_sem
);
1124 asmlinkage
long sys_newuname(struct new_utsname
* name
)
1128 down_read(&uts_sem
);
1129 if (copy_to_user(name
,&system_utsname
,sizeof *name
))
1135 asmlinkage
long sys_sethostname(char *name
, int len
)
1139 if (!capable(CAP_SYS_ADMIN
))
1141 if (len
< 0 || len
> __NEW_UTS_LEN
)
1143 down_write(&uts_sem
);
1145 if (!copy_from_user(system_utsname
.nodename
, name
, len
)) {
1146 system_utsname
.nodename
[len
] = 0;
1153 asmlinkage
long sys_gethostname(char *name
, int len
)
1159 down_read(&uts_sem
);
1160 i
= 1 + strlen(system_utsname
.nodename
);
1164 if (copy_to_user(name
, system_utsname
.nodename
, i
))
1171 * Only setdomainname; getdomainname can be implemented by calling
1174 asmlinkage
long sys_setdomainname(char *name
, int len
)
1178 if (!capable(CAP_SYS_ADMIN
))
1180 if (len
< 0 || len
> __NEW_UTS_LEN
)
1183 down_write(&uts_sem
);
1185 if (!copy_from_user(system_utsname
.domainname
, name
, len
)) {
1187 system_utsname
.domainname
[len
] = 0;
1193 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit
*rlim
)
1195 if (resource
>= RLIM_NLIMITS
)
1198 return copy_to_user(rlim
, current
->rlim
+ resource
, sizeof(*rlim
))
1202 #if !defined(__ia64__)
1205 * Back compatibility for getrlimit. Needed for some apps.
1208 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit
*rlim
)
1211 if (resource
>= RLIM_NLIMITS
)
1214 memcpy(&x
, current
->rlim
+ resource
, sizeof(*rlim
));
1215 if(x
.rlim_cur
> 0x7FFFFFFF)
1216 x
.rlim_cur
= 0x7FFFFFFF;
1217 if(x
.rlim_max
> 0x7FFFFFFF)
1218 x
.rlim_max
= 0x7FFFFFFF;
1219 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1224 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit
*rlim
)
1226 struct rlimit new_rlim
, *old_rlim
;
1229 if (resource
>= RLIM_NLIMITS
)
1231 if(copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1233 old_rlim
= current
->rlim
+ resource
;
1234 if (((new_rlim
.rlim_cur
> old_rlim
->rlim_max
) ||
1235 (new_rlim
.rlim_max
> old_rlim
->rlim_max
)) &&
1236 !capable(CAP_SYS_RESOURCE
))
1238 if (resource
== RLIMIT_NOFILE
) {
1239 if (new_rlim
.rlim_cur
> NR_OPEN
|| new_rlim
.rlim_max
> NR_OPEN
)
1243 retval
= security_ops
->task_setrlimit(resource
, &new_rlim
);
1247 *old_rlim
= new_rlim
;
1252 * It would make sense to put struct rusage in the task_struct,
1253 * except that would make the task_struct be *really big*. After
1254 * task_struct gets moved into malloc'ed memory, it would
1255 * make sense to do this. It will make moving the rest of the information
1256 * a lot simpler! (Which we're not doing right now because we're not
1257 * measuring them yet).
1259 * This is SMP safe. Either we are called from sys_getrusage on ourselves
1260 * below (we know we aren't going to exit/disappear and only we change our
1261 * rusage counters), or we are called from wait4() on a process which is
1262 * either stopped or zombied. In the zombied case the task won't get
1263 * reaped till shortly after the call to getrusage(), in both cases the
1264 * task being examined is in a frozen state so the counters won't change.
1266 * FIXME! Get the fault counts properly!
1268 int getrusage(struct task_struct
*p
, int who
, struct rusage
*ru
)
1272 memset((char *) &r
, 0, sizeof(r
));
1275 jiffies_to_timeval(p
->utime
, &r
.ru_utime
);
1276 jiffies_to_timeval(p
->stime
, &r
.ru_stime
);
1277 r
.ru_minflt
= p
->min_flt
;
1278 r
.ru_majflt
= p
->maj_flt
;
1279 r
.ru_nswap
= p
->nswap
;
1281 case RUSAGE_CHILDREN
:
1282 jiffies_to_timeval(p
->cutime
, &r
.ru_utime
);
1283 jiffies_to_timeval(p
->cstime
, &r
.ru_stime
);
1284 r
.ru_minflt
= p
->cmin_flt
;
1285 r
.ru_majflt
= p
->cmaj_flt
;
1286 r
.ru_nswap
= p
->cnswap
;
1289 jiffies_to_timeval(p
->utime
+ p
->cutime
, &r
.ru_utime
);
1290 jiffies_to_timeval(p
->stime
+ p
->cstime
, &r
.ru_stime
);
1291 r
.ru_minflt
= p
->min_flt
+ p
->cmin_flt
;
1292 r
.ru_majflt
= p
->maj_flt
+ p
->cmaj_flt
;
1293 r
.ru_nswap
= p
->nswap
+ p
->cnswap
;
1296 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1299 asmlinkage
long sys_getrusage(int who
, struct rusage
*ru
)
1301 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
1303 return getrusage(current
, who
, ru
);
1306 asmlinkage
long sys_umask(int mask
)
1308 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1312 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
1313 unsigned long arg4
, unsigned long arg5
)
1318 error
= security_ops
->task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
1323 case PR_SET_PDEATHSIG
:
1325 if (sig
< 0 || sig
> _NSIG
) {
1329 current
->pdeath_signal
= sig
;
1331 case PR_GET_PDEATHSIG
:
1332 error
= put_user(current
->pdeath_signal
, (int *)arg2
);
1334 case PR_GET_DUMPABLE
:
1335 if (current
->mm
->dumpable
)
1338 case PR_SET_DUMPABLE
:
1339 if (arg2
!= 0 && arg2
!= 1) {
1343 current
->mm
->dumpable
= arg2
;
1346 case PR_SET_UNALIGN
:
1347 error
= SET_UNALIGN_CTL(current
, arg2
);
1349 case PR_GET_UNALIGN
:
1350 error
= GET_UNALIGN_CTL(current
, arg2
);
1353 error
= SET_FPEMU_CTL(current
, arg2
);
1356 error
= GET_FPEMU_CTL(current
, arg2
);
1359 error
= SET_FPEXC_CTL(current
, arg2
);
1362 error
= GET_FPEXC_CTL(current
, arg2
);
1366 case PR_GET_KEEPCAPS
:
1367 if (current
->keep_capabilities
)
1370 case PR_SET_KEEPCAPS
:
1371 if (arg2
!= 0 && arg2
!= 1) {
1375 current
->keep_capabilities
= arg2
;
1384 EXPORT_SYMBOL(notifier_chain_register
);
1385 EXPORT_SYMBOL(notifier_chain_unregister
);
1386 EXPORT_SYMBOL(notifier_call_chain
);
1387 EXPORT_SYMBOL(register_reboot_notifier
);
1388 EXPORT_SYMBOL(unregister_reboot_notifier
);
1389 EXPORT_SYMBOL(in_group_p
);
1390 EXPORT_SYMBOL(in_egroup_p
);