4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/kernel.h>
18 #include <linux/kexec.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
35 #include <linux/kprobes.h>
37 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
41 #ifndef SET_UNALIGN_CTL
42 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
44 #ifndef GET_UNALIGN_CTL
45 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
48 # define SET_FPEMU_CTL(a,b) (-EINVAL)
51 # define GET_FPEMU_CTL(a,b) (-EINVAL)
54 # define SET_FPEXC_CTL(a,b) (-EINVAL)
57 # define GET_FPEXC_CTL(a,b) (-EINVAL)
60 # define GET_ENDIAN(a,b) (-EINVAL)
63 # define SET_ENDIAN(a,b) (-EINVAL)
67 * this is where the system-wide overflow UID and GID are defined, for
68 * architectures that now have 32-bit UID/GID but didn't in the past
71 int overflowuid
= DEFAULT_OVERFLOWUID
;
72 int overflowgid
= DEFAULT_OVERFLOWGID
;
75 EXPORT_SYMBOL(overflowuid
);
76 EXPORT_SYMBOL(overflowgid
);
80 * the same as above, but for filesystems which can only store a 16-bit
81 * UID and GID. as such, this is needed on all architectures
84 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
85 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
87 EXPORT_SYMBOL(fs_overflowuid
);
88 EXPORT_SYMBOL(fs_overflowgid
);
91 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
96 EXPORT_SYMBOL(cad_pid
);
99 * Notifier list for kernel code which wants to be called
100 * at shutdown. This is used to stop any idling DMA operations
104 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list
);
107 * Notifier chain core routines. The exported routines below
108 * are layered on top of these, with appropriate locking added.
111 static int notifier_chain_register(struct notifier_block
**nl
,
112 struct notifier_block
*n
)
114 while ((*nl
) != NULL
) {
115 if (n
->priority
> (*nl
)->priority
)
120 rcu_assign_pointer(*nl
, n
);
124 static int notifier_chain_unregister(struct notifier_block
**nl
,
125 struct notifier_block
*n
)
127 while ((*nl
) != NULL
) {
129 rcu_assign_pointer(*nl
, n
->next
);
137 static int __kprobes
notifier_call_chain(struct notifier_block
**nl
,
138 unsigned long val
, void *v
)
140 int ret
= NOTIFY_DONE
;
141 struct notifier_block
*nb
, *next_nb
;
143 nb
= rcu_dereference(*nl
);
145 next_nb
= rcu_dereference(nb
->next
);
146 ret
= nb
->notifier_call(nb
, val
, v
);
147 if ((ret
& NOTIFY_STOP_MASK
) == NOTIFY_STOP_MASK
)
155 * Atomic notifier chain routines. Registration and unregistration
156 * use a spinlock, and call_chain is synchronized by RCU (no locks).
160 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
161 * @nh: Pointer to head of the atomic notifier chain
162 * @n: New entry in notifier chain
164 * Adds a notifier to an atomic notifier chain.
166 * Currently always returns zero.
169 int atomic_notifier_chain_register(struct atomic_notifier_head
*nh
,
170 struct notifier_block
*n
)
175 spin_lock_irqsave(&nh
->lock
, flags
);
176 ret
= notifier_chain_register(&nh
->head
, n
);
177 spin_unlock_irqrestore(&nh
->lock
, flags
);
181 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register
);
184 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
185 * @nh: Pointer to head of the atomic notifier chain
186 * @n: Entry to remove from notifier chain
188 * Removes a notifier from an atomic notifier chain.
190 * Returns zero on success or %-ENOENT on failure.
192 int atomic_notifier_chain_unregister(struct atomic_notifier_head
*nh
,
193 struct notifier_block
*n
)
198 spin_lock_irqsave(&nh
->lock
, flags
);
199 ret
= notifier_chain_unregister(&nh
->head
, n
);
200 spin_unlock_irqrestore(&nh
->lock
, flags
);
205 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister
);
208 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
209 * @nh: Pointer to head of the atomic notifier chain
210 * @val: Value passed unmodified to notifier function
211 * @v: Pointer passed unmodified to notifier function
213 * Calls each function in a notifier chain in turn. The functions
214 * run in an atomic context, so they must not block.
215 * This routine uses RCU to synchronize with changes to the chain.
217 * If the return value of the notifier can be and'ed
218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
219 * will return immediately, with the return value of
220 * the notifier function which halted execution.
221 * Otherwise the return value is the return value
222 * of the last notifier function called.
225 int __kprobes
atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
226 unsigned long val
, void *v
)
231 ret
= notifier_call_chain(&nh
->head
, val
, v
);
236 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain
);
239 * Blocking notifier chain routines. All access to the chain is
240 * synchronized by an rwsem.
244 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
245 * @nh: Pointer to head of the blocking notifier chain
246 * @n: New entry in notifier chain
248 * Adds a notifier to a blocking notifier chain.
249 * Must be called in process context.
251 * Currently always returns zero.
254 int blocking_notifier_chain_register(struct blocking_notifier_head
*nh
,
255 struct notifier_block
*n
)
260 * This code gets used during boot-up, when task switching is
261 * not yet working and interrupts must remain disabled. At
262 * such times we must not call down_write().
264 if (unlikely(system_state
== SYSTEM_BOOTING
))
265 return notifier_chain_register(&nh
->head
, n
);
267 down_write(&nh
->rwsem
);
268 ret
= notifier_chain_register(&nh
->head
, n
);
269 up_write(&nh
->rwsem
);
273 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register
);
276 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
277 * @nh: Pointer to head of the blocking notifier chain
278 * @n: Entry to remove from notifier chain
280 * Removes a notifier from a blocking notifier chain.
281 * Must be called from process context.
283 * Returns zero on success or %-ENOENT on failure.
285 int blocking_notifier_chain_unregister(struct blocking_notifier_head
*nh
,
286 struct notifier_block
*n
)
291 * This code gets used during boot-up, when task switching is
292 * not yet working and interrupts must remain disabled. At
293 * such times we must not call down_write().
295 if (unlikely(system_state
== SYSTEM_BOOTING
))
296 return notifier_chain_unregister(&nh
->head
, n
);
298 down_write(&nh
->rwsem
);
299 ret
= notifier_chain_unregister(&nh
->head
, n
);
300 up_write(&nh
->rwsem
);
304 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister
);
307 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
308 * @nh: Pointer to head of the blocking notifier chain
309 * @val: Value passed unmodified to notifier function
310 * @v: Pointer passed unmodified to notifier function
312 * Calls each function in a notifier chain in turn. The functions
313 * run in a process context, so they are allowed to block.
315 * If the return value of the notifier can be and'ed
316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
317 * will return immediately, with the return value of
318 * the notifier function which halted execution.
319 * Otherwise the return value is the return value
320 * of the last notifier function called.
323 int blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
324 unsigned long val
, void *v
)
326 int ret
= NOTIFY_DONE
;
329 * We check the head outside the lock, but if this access is
330 * racy then it does not matter what the result of the test
331 * is, we re-check the list after having taken the lock anyway:
333 if (rcu_dereference(nh
->head
)) {
334 down_read(&nh
->rwsem
);
335 ret
= notifier_call_chain(&nh
->head
, val
, v
);
341 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain
);
344 * Raw notifier chain routines. There is no protection;
345 * the caller must provide it. Use at your own risk!
349 * raw_notifier_chain_register - Add notifier to a raw notifier chain
350 * @nh: Pointer to head of the raw notifier chain
351 * @n: New entry in notifier chain
353 * Adds a notifier to a raw notifier chain.
354 * All locking must be provided by the caller.
356 * Currently always returns zero.
359 int raw_notifier_chain_register(struct raw_notifier_head
*nh
,
360 struct notifier_block
*n
)
362 return notifier_chain_register(&nh
->head
, n
);
365 EXPORT_SYMBOL_GPL(raw_notifier_chain_register
);
368 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
369 * @nh: Pointer to head of the raw notifier chain
370 * @n: Entry to remove from notifier chain
372 * Removes a notifier from a raw notifier chain.
373 * All locking must be provided by the caller.
375 * Returns zero on success or %-ENOENT on failure.
377 int raw_notifier_chain_unregister(struct raw_notifier_head
*nh
,
378 struct notifier_block
*n
)
380 return notifier_chain_unregister(&nh
->head
, n
);
383 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister
);
386 * raw_notifier_call_chain - Call functions in a raw notifier chain
387 * @nh: Pointer to head of the raw notifier chain
388 * @val: Value passed unmodified to notifier function
389 * @v: Pointer passed unmodified to notifier function
391 * Calls each function in a notifier chain in turn. The functions
392 * run in an undefined context.
393 * All locking must be provided by the caller.
395 * If the return value of the notifier can be and'ed
396 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
397 * will return immediately, with the return value of
398 * the notifier function which halted execution.
399 * Otherwise the return value is the return value
400 * of the last notifier function called.
403 int raw_notifier_call_chain(struct raw_notifier_head
*nh
,
404 unsigned long val
, void *v
)
406 return notifier_call_chain(&nh
->head
, val
, v
);
409 EXPORT_SYMBOL_GPL(raw_notifier_call_chain
);
412 * SRCU notifier chain routines. Registration and unregistration
413 * use a mutex, and call_chain is synchronized by SRCU (no locks).
417 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
418 * @nh: Pointer to head of the SRCU notifier chain
419 * @n: New entry in notifier chain
421 * Adds a notifier to an SRCU notifier chain.
422 * Must be called in process context.
424 * Currently always returns zero.
427 int srcu_notifier_chain_register(struct srcu_notifier_head
*nh
,
428 struct notifier_block
*n
)
433 * This code gets used during boot-up, when task switching is
434 * not yet working and interrupts must remain disabled. At
435 * such times we must not call mutex_lock().
437 if (unlikely(system_state
== SYSTEM_BOOTING
))
438 return notifier_chain_register(&nh
->head
, n
);
440 mutex_lock(&nh
->mutex
);
441 ret
= notifier_chain_register(&nh
->head
, n
);
442 mutex_unlock(&nh
->mutex
);
446 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register
);
449 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
450 * @nh: Pointer to head of the SRCU notifier chain
451 * @n: Entry to remove from notifier chain
453 * Removes a notifier from an SRCU notifier chain.
454 * Must be called from process context.
456 * Returns zero on success or %-ENOENT on failure.
458 int srcu_notifier_chain_unregister(struct srcu_notifier_head
*nh
,
459 struct notifier_block
*n
)
464 * This code gets used during boot-up, when task switching is
465 * not yet working and interrupts must remain disabled. At
466 * such times we must not call mutex_lock().
468 if (unlikely(system_state
== SYSTEM_BOOTING
))
469 return notifier_chain_unregister(&nh
->head
, n
);
471 mutex_lock(&nh
->mutex
);
472 ret
= notifier_chain_unregister(&nh
->head
, n
);
473 mutex_unlock(&nh
->mutex
);
474 synchronize_srcu(&nh
->srcu
);
478 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister
);
481 * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
482 * @nh: Pointer to head of the SRCU notifier chain
483 * @val: Value passed unmodified to notifier function
484 * @v: Pointer passed unmodified to notifier function
486 * Calls each function in a notifier chain in turn. The functions
487 * run in a process context, so they are allowed to block.
489 * If the return value of the notifier can be and'ed
490 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
491 * will return immediately, with the return value of
492 * the notifier function which halted execution.
493 * Otherwise the return value is the return value
494 * of the last notifier function called.
497 int srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
498 unsigned long val
, void *v
)
503 idx
= srcu_read_lock(&nh
->srcu
);
504 ret
= notifier_call_chain(&nh
->head
, val
, v
);
505 srcu_read_unlock(&nh
->srcu
, idx
);
509 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain
);
512 * srcu_init_notifier_head - Initialize an SRCU notifier head
513 * @nh: Pointer to head of the srcu notifier chain
515 * Unlike other sorts of notifier heads, SRCU notifier heads require
516 * dynamic initialization. Be sure to call this routine before
517 * calling any of the other SRCU notifier routines for this head.
519 * If an SRCU notifier head is deallocated, it must first be cleaned
520 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
521 * per-cpu data (used by the SRCU mechanism) will leak.
524 void srcu_init_notifier_head(struct srcu_notifier_head
*nh
)
526 mutex_init(&nh
->mutex
);
527 if (init_srcu_struct(&nh
->srcu
) < 0)
532 EXPORT_SYMBOL_GPL(srcu_init_notifier_head
);
535 * register_reboot_notifier - Register function to be called at reboot time
536 * @nb: Info about notifier function to be called
538 * Registers a function with the list of functions
539 * to be called at reboot time.
541 * Currently always returns zero, as blocking_notifier_chain_register
542 * always returns zero.
545 int register_reboot_notifier(struct notifier_block
* nb
)
547 return blocking_notifier_chain_register(&reboot_notifier_list
, nb
);
550 EXPORT_SYMBOL(register_reboot_notifier
);
553 * unregister_reboot_notifier - Unregister previously registered reboot notifier
554 * @nb: Hook to be unregistered
556 * Unregisters a previously registered reboot
559 * Returns zero on success, or %-ENOENT on failure.
562 int unregister_reboot_notifier(struct notifier_block
* nb
)
564 return blocking_notifier_chain_unregister(&reboot_notifier_list
, nb
);
567 EXPORT_SYMBOL(unregister_reboot_notifier
);
569 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
573 if (p
->uid
!= current
->euid
&&
574 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
578 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
582 no_nice
= security_task_setnice(p
, niceval
);
589 set_user_nice(p
, niceval
);
594 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
596 struct task_struct
*g
, *p
;
597 struct user_struct
*user
;
600 if (which
> 2 || which
< 0)
603 /* normalize: avoid signed division (rounding problems) */
610 read_lock(&tasklist_lock
);
615 p
= find_task_by_pid(who
);
617 error
= set_one_prio(p
, niceval
, error
);
621 who
= process_group(current
);
622 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
623 error
= set_one_prio(p
, niceval
, error
);
624 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
627 user
= current
->user
;
631 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
632 goto out_unlock
; /* No processes for this user */
636 error
= set_one_prio(p
, niceval
, error
);
637 while_each_thread(g
, p
);
638 if (who
!= current
->uid
)
639 free_uid(user
); /* For find_user() */
643 read_unlock(&tasklist_lock
);
649 * Ugh. To avoid negative return values, "getpriority()" will
650 * not return the normal nice-value, but a negated value that
651 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
652 * to stay compatible.
654 asmlinkage
long sys_getpriority(int which
, int who
)
656 struct task_struct
*g
, *p
;
657 struct user_struct
*user
;
658 long niceval
, retval
= -ESRCH
;
660 if (which
> 2 || which
< 0)
663 read_lock(&tasklist_lock
);
668 p
= find_task_by_pid(who
);
670 niceval
= 20 - task_nice(p
);
671 if (niceval
> retval
)
677 who
= process_group(current
);
678 do_each_task_pid(who
, PIDTYPE_PGID
, p
) {
679 niceval
= 20 - task_nice(p
);
680 if (niceval
> retval
)
682 } while_each_task_pid(who
, PIDTYPE_PGID
, p
);
685 user
= current
->user
;
689 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
690 goto out_unlock
; /* No processes for this user */
694 niceval
= 20 - task_nice(p
);
695 if (niceval
> retval
)
698 while_each_thread(g
, p
);
699 if (who
!= current
->uid
)
700 free_uid(user
); /* for find_user() */
704 read_unlock(&tasklist_lock
);
710 * emergency_restart - reboot the system
712 * Without shutting down any hardware or taking any locks
713 * reboot the system. This is called when we know we are in
714 * trouble so this is our best effort to reboot. This is
715 * safe to call in interrupt context.
717 void emergency_restart(void)
719 machine_emergency_restart();
721 EXPORT_SYMBOL_GPL(emergency_restart
);
723 static void kernel_restart_prepare(char *cmd
)
725 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
726 system_state
= SYSTEM_RESTART
;
731 * kernel_restart - reboot the system
732 * @cmd: pointer to buffer containing command to execute for restart
735 * Shutdown everything and perform a clean reboot.
736 * This is not safe to call in interrupt context.
738 void kernel_restart(char *cmd
)
740 kernel_restart_prepare(cmd
);
742 printk(KERN_EMERG
"Restarting system.\n");
744 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
745 machine_restart(cmd
);
747 EXPORT_SYMBOL_GPL(kernel_restart
);
750 * kernel_kexec - reboot the system
752 * Move into place and start executing a preloaded standalone
753 * executable. If nothing was preloaded return an error.
755 static void kernel_kexec(void)
758 struct kimage
*image
;
759 image
= xchg(&kexec_image
, NULL
);
762 kernel_restart_prepare(NULL
);
763 printk(KERN_EMERG
"Starting new kernel\n");
765 machine_kexec(image
);
769 void kernel_shutdown_prepare(enum system_states state
)
771 blocking_notifier_call_chain(&reboot_notifier_list
,
772 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
773 system_state
= state
;
777 * kernel_halt - halt the system
779 * Shutdown everything and perform a clean system halt.
781 void kernel_halt(void)
783 kernel_shutdown_prepare(SYSTEM_HALT
);
784 printk(KERN_EMERG
"System halted.\n");
788 EXPORT_SYMBOL_GPL(kernel_halt
);
791 * kernel_power_off - power_off the system
793 * Shutdown everything and perform a clean system power_off.
795 void kernel_power_off(void)
797 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
798 printk(KERN_EMERG
"Power down.\n");
801 EXPORT_SYMBOL_GPL(kernel_power_off
);
803 * Reboot system call: for obvious reasons only root may call it,
804 * and even root needs to set up some magic numbers in the registers
805 * so that some mistake won't make this reboot the whole machine.
806 * You can also set the meaning of the ctrl-alt-del-key here.
808 * reboot doesn't sync: do that yourself before calling this.
810 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
814 /* We only trust the superuser with rebooting the system. */
815 if (!capable(CAP_SYS_BOOT
))
818 /* For safety, we require "magic" arguments. */
819 if (magic1
!= LINUX_REBOOT_MAGIC1
||
820 (magic2
!= LINUX_REBOOT_MAGIC2
&&
821 magic2
!= LINUX_REBOOT_MAGIC2A
&&
822 magic2
!= LINUX_REBOOT_MAGIC2B
&&
823 magic2
!= LINUX_REBOOT_MAGIC2C
))
826 /* Instead of trying to make the power_off code look like
827 * halt when pm_power_off is not set do it the easy way.
829 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
830 cmd
= LINUX_REBOOT_CMD_HALT
;
834 case LINUX_REBOOT_CMD_RESTART
:
835 kernel_restart(NULL
);
838 case LINUX_REBOOT_CMD_CAD_ON
:
842 case LINUX_REBOOT_CMD_CAD_OFF
:
846 case LINUX_REBOOT_CMD_HALT
:
852 case LINUX_REBOOT_CMD_POWER_OFF
:
858 case LINUX_REBOOT_CMD_RESTART2
:
859 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
863 buffer
[sizeof(buffer
) - 1] = '\0';
865 kernel_restart(buffer
);
868 case LINUX_REBOOT_CMD_KEXEC
:
873 #ifdef CONFIG_SOFTWARE_SUSPEND
874 case LINUX_REBOOT_CMD_SW_SUSPEND
:
876 int ret
= software_suspend();
890 static void deferred_cad(struct work_struct
*dummy
)
892 kernel_restart(NULL
);
896 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
897 * As it's called within an interrupt, it may NOT sync: the only choice
898 * is whether to reboot at once, or just ignore the ctrl-alt-del.
900 void ctrl_alt_del(void)
902 static DECLARE_WORK(cad_work
, deferred_cad
);
905 schedule_work(&cad_work
);
907 kill_cad_pid(SIGINT
, 1);
911 * Unprivileged users may change the real gid to the effective gid
912 * or vice versa. (BSD-style)
914 * If you set the real gid at all, or set the effective gid to a value not
915 * equal to the real gid, then the saved gid is set to the new effective gid.
917 * This makes it possible for a setgid program to completely drop its
918 * privileges, which is often a useful assertion to make when you are doing
919 * a security audit over a program.
921 * The general idea is that a program which uses just setregid() will be
922 * 100% compatible with BSD. A program which uses just setgid() will be
923 * 100% compatible with POSIX with saved IDs.
925 * SMP: There are not races, the GIDs are checked only by filesystem
926 * operations (as far as semantic preservation is concerned).
928 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
930 int old_rgid
= current
->gid
;
931 int old_egid
= current
->egid
;
932 int new_rgid
= old_rgid
;
933 int new_egid
= old_egid
;
936 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
940 if (rgid
!= (gid_t
) -1) {
941 if ((old_rgid
== rgid
) ||
942 (current
->egid
==rgid
) ||
948 if (egid
!= (gid_t
) -1) {
949 if ((old_rgid
== egid
) ||
950 (current
->egid
== egid
) ||
951 (current
->sgid
== egid
) ||
957 if (new_egid
!= old_egid
) {
958 current
->mm
->dumpable
= suid_dumpable
;
961 if (rgid
!= (gid_t
) -1 ||
962 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
963 current
->sgid
= new_egid
;
964 current
->fsgid
= new_egid
;
965 current
->egid
= new_egid
;
966 current
->gid
= new_rgid
;
967 key_fsgid_changed(current
);
968 proc_id_connector(current
, PROC_EVENT_GID
);
973 * setgid() is implemented like SysV w/ SAVED_IDS
975 * SMP: Same implicit races as above.
977 asmlinkage
long sys_setgid(gid_t gid
)
979 int old_egid
= current
->egid
;
982 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
986 if (capable(CAP_SETGID
)) {
987 if (old_egid
!= gid
) {
988 current
->mm
->dumpable
= suid_dumpable
;
991 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
992 } else if ((gid
== current
->gid
) || (gid
== current
->sgid
)) {
993 if (old_egid
!= gid
) {
994 current
->mm
->dumpable
= suid_dumpable
;
997 current
->egid
= current
->fsgid
= gid
;
1002 key_fsgid_changed(current
);
1003 proc_id_connector(current
, PROC_EVENT_GID
);
1007 static int set_user(uid_t new_ruid
, int dumpclear
)
1009 struct user_struct
*new_user
;
1011 new_user
= alloc_uid(new_ruid
);
1015 if (atomic_read(&new_user
->processes
) >=
1016 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
1017 new_user
!= &root_user
) {
1022 switch_uid(new_user
);
1025 current
->mm
->dumpable
= suid_dumpable
;
1028 current
->uid
= new_ruid
;
1033 * Unprivileged users may change the real uid to the effective uid
1034 * or vice versa. (BSD-style)
1036 * If you set the real uid at all, or set the effective uid to a value not
1037 * equal to the real uid, then the saved uid is set to the new effective uid.
1039 * This makes it possible for a setuid program to completely drop its
1040 * privileges, which is often a useful assertion to make when you are doing
1041 * a security audit over a program.
1043 * The general idea is that a program which uses just setreuid() will be
1044 * 100% compatible with BSD. A program which uses just setuid() will be
1045 * 100% compatible with POSIX with saved IDs.
1047 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
1049 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
1052 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
1056 new_ruid
= old_ruid
= current
->uid
;
1057 new_euid
= old_euid
= current
->euid
;
1058 old_suid
= current
->suid
;
1060 if (ruid
!= (uid_t
) -1) {
1062 if ((old_ruid
!= ruid
) &&
1063 (current
->euid
!= ruid
) &&
1064 !capable(CAP_SETUID
))
1068 if (euid
!= (uid_t
) -1) {
1070 if ((old_ruid
!= euid
) &&
1071 (current
->euid
!= euid
) &&
1072 (current
->suid
!= euid
) &&
1073 !capable(CAP_SETUID
))
1077 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
1080 if (new_euid
!= old_euid
) {
1081 current
->mm
->dumpable
= suid_dumpable
;
1084 current
->fsuid
= current
->euid
= new_euid
;
1085 if (ruid
!= (uid_t
) -1 ||
1086 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
1087 current
->suid
= current
->euid
;
1088 current
->fsuid
= current
->euid
;
1090 key_fsuid_changed(current
);
1091 proc_id_connector(current
, PROC_EVENT_UID
);
1093 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
1099 * setuid() is implemented like SysV with SAVED_IDS
1101 * Note that SAVED_ID's is deficient in that a setuid root program
1102 * like sendmail, for example, cannot set its uid to be a normal
1103 * user and then switch back, because if you're root, setuid() sets
1104 * the saved uid too. If you don't like this, blame the bright people
1105 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1106 * will allow a root program to temporarily drop privileges and be able to
1107 * regain them by swapping the real and effective uid.
1109 asmlinkage
long sys_setuid(uid_t uid
)
1111 int old_euid
= current
->euid
;
1112 int old_ruid
, old_suid
, new_suid
;
1115 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
1119 old_ruid
= current
->uid
;
1120 old_suid
= current
->suid
;
1121 new_suid
= old_suid
;
1123 if (capable(CAP_SETUID
)) {
1124 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
1127 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
1130 if (old_euid
!= uid
) {
1131 current
->mm
->dumpable
= suid_dumpable
;
1134 current
->fsuid
= current
->euid
= uid
;
1135 current
->suid
= new_suid
;
1137 key_fsuid_changed(current
);
1138 proc_id_connector(current
, PROC_EVENT_UID
);
1140 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
1145 * This function implements a generic ability to update ruid, euid,
1146 * and suid. This allows you to implement the 4.4 compatible seteuid().
1148 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
1150 int old_ruid
= current
->uid
;
1151 int old_euid
= current
->euid
;
1152 int old_suid
= current
->suid
;
1155 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
1159 if (!capable(CAP_SETUID
)) {
1160 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
1161 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
1163 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
1164 (euid
!= current
->euid
) && (euid
!= current
->suid
))
1166 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
1167 (suid
!= current
->euid
) && (suid
!= current
->suid
))
1170 if (ruid
!= (uid_t
) -1) {
1171 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
1174 if (euid
!= (uid_t
) -1) {
1175 if (euid
!= current
->euid
) {
1176 current
->mm
->dumpable
= suid_dumpable
;
1179 current
->euid
= euid
;
1181 current
->fsuid
= current
->euid
;
1182 if (suid
!= (uid_t
) -1)
1183 current
->suid
= suid
;
1185 key_fsuid_changed(current
);
1186 proc_id_connector(current
, PROC_EVENT_UID
);
1188 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
1191 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
1195 if (!(retval
= put_user(current
->uid
, ruid
)) &&
1196 !(retval
= put_user(current
->euid
, euid
)))
1197 retval
= put_user(current
->suid
, suid
);
1203 * Same as above, but for rgid, egid, sgid.
1205 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
1209 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
1213 if (!capable(CAP_SETGID
)) {
1214 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
1215 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
1217 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
1218 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
1220 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
1221 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
1224 if (egid
!= (gid_t
) -1) {
1225 if (egid
!= current
->egid
) {
1226 current
->mm
->dumpable
= suid_dumpable
;
1229 current
->egid
= egid
;
1231 current
->fsgid
= current
->egid
;
1232 if (rgid
!= (gid_t
) -1)
1233 current
->gid
= rgid
;
1234 if (sgid
!= (gid_t
) -1)
1235 current
->sgid
= sgid
;
1237 key_fsgid_changed(current
);
1238 proc_id_connector(current
, PROC_EVENT_GID
);
1242 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
1246 if (!(retval
= put_user(current
->gid
, rgid
)) &&
1247 !(retval
= put_user(current
->egid
, egid
)))
1248 retval
= put_user(current
->sgid
, sgid
);
1255 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1256 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1257 * whatever uid it wants to). It normally shadows "euid", except when
1258 * explicitly set by setfsuid() or for access..
1260 asmlinkage
long sys_setfsuid(uid_t uid
)
1264 old_fsuid
= current
->fsuid
;
1265 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
1268 if (uid
== current
->uid
|| uid
== current
->euid
||
1269 uid
== current
->suid
|| uid
== current
->fsuid
||
1270 capable(CAP_SETUID
)) {
1271 if (uid
!= old_fsuid
) {
1272 current
->mm
->dumpable
= suid_dumpable
;
1275 current
->fsuid
= uid
;
1278 key_fsuid_changed(current
);
1279 proc_id_connector(current
, PROC_EVENT_UID
);
1281 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
1287 * Samma på svenska..
1289 asmlinkage
long sys_setfsgid(gid_t gid
)
1293 old_fsgid
= current
->fsgid
;
1294 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
1297 if (gid
== current
->gid
|| gid
== current
->egid
||
1298 gid
== current
->sgid
|| gid
== current
->fsgid
||
1299 capable(CAP_SETGID
)) {
1300 if (gid
!= old_fsgid
) {
1301 current
->mm
->dumpable
= suid_dumpable
;
1304 current
->fsgid
= gid
;
1305 key_fsgid_changed(current
);
1306 proc_id_connector(current
, PROC_EVENT_GID
);
1311 asmlinkage
long sys_times(struct tms __user
* tbuf
)
1314 * In the SMP world we might just be unlucky and have one of
1315 * the times increment as we use it. Since the value is an
1316 * atomically safe type this is just fine. Conceptually its
1317 * as if the syscall took an instant longer to occur.
1321 struct task_struct
*tsk
= current
;
1322 struct task_struct
*t
;
1323 cputime_t utime
, stime
, cutime
, cstime
;
1325 spin_lock_irq(&tsk
->sighand
->siglock
);
1326 utime
= tsk
->signal
->utime
;
1327 stime
= tsk
->signal
->stime
;
1330 utime
= cputime_add(utime
, t
->utime
);
1331 stime
= cputime_add(stime
, t
->stime
);
1335 cutime
= tsk
->signal
->cutime
;
1336 cstime
= tsk
->signal
->cstime
;
1337 spin_unlock_irq(&tsk
->sighand
->siglock
);
1339 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1340 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1341 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1342 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1343 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1346 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1350 * This needs some heavy checking ...
1351 * I just haven't the stomach for it. I also don't fully
1352 * understand sessions/pgrp etc. Let somebody who does explain it.
1354 * OK, I think I have the protection semantics right.... this is really
1355 * only important on a multi-user system anyway, to make sure one user
1356 * can't send a signal to a process owned by another. -TYT, 12/12/91
1358 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1362 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1364 struct task_struct
*p
;
1365 struct task_struct
*group_leader
= current
->group_leader
;
1369 pid
= group_leader
->pid
;
1375 /* From this point forward we keep holding onto the tasklist lock
1376 * so that our parent does not change from under us. -DaveM
1378 write_lock_irq(&tasklist_lock
);
1381 p
= find_task_by_pid(pid
);
1386 if (!thread_group_leader(p
))
1389 if (p
->real_parent
== group_leader
) {
1391 if (process_session(p
) != process_session(group_leader
))
1398 if (p
!= group_leader
)
1403 if (p
->signal
->leader
)
1407 struct task_struct
*g
=
1408 find_task_by_pid_type(PIDTYPE_PGID
, pgid
);
1410 if (!g
|| process_session(g
) != process_session(group_leader
))
1414 err
= security_task_setpgid(p
, pgid
);
1418 if (process_group(p
) != pgid
) {
1419 detach_pid(p
, PIDTYPE_PGID
);
1420 p
->signal
->pgrp
= pgid
;
1421 attach_pid(p
, PIDTYPE_PGID
, pgid
);
1426 /* All paths lead to here, thus we are safe. -DaveM */
1427 write_unlock_irq(&tasklist_lock
);
1431 asmlinkage
long sys_getpgid(pid_t pid
)
1434 return process_group(current
);
1437 struct task_struct
*p
;
1439 read_lock(&tasklist_lock
);
1440 p
= find_task_by_pid(pid
);
1444 retval
= security_task_getpgid(p
);
1446 retval
= process_group(p
);
1448 read_unlock(&tasklist_lock
);
1453 #ifdef __ARCH_WANT_SYS_GETPGRP
1455 asmlinkage
long sys_getpgrp(void)
1457 /* SMP - assuming writes are word atomic this is fine */
1458 return process_group(current
);
1463 asmlinkage
long sys_getsid(pid_t pid
)
1466 return process_session(current
);
1469 struct task_struct
*p
;
1471 read_lock(&tasklist_lock
);
1472 p
= find_task_by_pid(pid
);
1476 retval
= security_task_getsid(p
);
1478 retval
= process_session(p
);
1480 read_unlock(&tasklist_lock
);
1485 asmlinkage
long sys_setsid(void)
1487 struct task_struct
*group_leader
= current
->group_leader
;
1491 write_lock_irq(&tasklist_lock
);
1493 /* Fail if I am already a session leader */
1494 if (group_leader
->signal
->leader
)
1497 session
= group_leader
->pid
;
1498 /* Fail if a process group id already exists that equals the
1499 * proposed session id.
1501 * Don't check if session id == 1 because kernel threads use this
1502 * session id and so the check will always fail and make it so
1503 * init cannot successfully call setsid.
1505 if (session
> 1 && find_task_by_pid_type(PIDTYPE_PGID
, session
))
1508 group_leader
->signal
->leader
= 1;
1509 __set_special_pids(session
, session
);
1511 spin_lock(&group_leader
->sighand
->siglock
);
1512 group_leader
->signal
->tty
= NULL
;
1513 group_leader
->signal
->tty_old_pgrp
= 0;
1514 spin_unlock(&group_leader
->sighand
->siglock
);
1516 err
= process_group(group_leader
);
1518 write_unlock_irq(&tasklist_lock
);
1523 * Supplementary group IDs
1526 /* init to 2 - one for init_task, one to ensure it is never freed */
1527 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1529 struct group_info
*groups_alloc(int gidsetsize
)
1531 struct group_info
*group_info
;
1535 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1536 /* Make sure we always allocate at least one indirect block pointer */
1537 nblocks
= nblocks
? : 1;
1538 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1541 group_info
->ngroups
= gidsetsize
;
1542 group_info
->nblocks
= nblocks
;
1543 atomic_set(&group_info
->usage
, 1);
1545 if (gidsetsize
<= NGROUPS_SMALL
)
1546 group_info
->blocks
[0] = group_info
->small_block
;
1548 for (i
= 0; i
< nblocks
; i
++) {
1550 b
= (void *)__get_free_page(GFP_USER
);
1552 goto out_undo_partial_alloc
;
1553 group_info
->blocks
[i
] = b
;
1558 out_undo_partial_alloc
:
1560 free_page((unsigned long)group_info
->blocks
[i
]);
1566 EXPORT_SYMBOL(groups_alloc
);
1568 void groups_free(struct group_info
*group_info
)
1570 if (group_info
->blocks
[0] != group_info
->small_block
) {
1572 for (i
= 0; i
< group_info
->nblocks
; i
++)
1573 free_page((unsigned long)group_info
->blocks
[i
]);
1578 EXPORT_SYMBOL(groups_free
);
1580 /* export the group_info to a user-space array */
1581 static int groups_to_user(gid_t __user
*grouplist
,
1582 struct group_info
*group_info
)
1585 int count
= group_info
->ngroups
;
1587 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1588 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1589 int off
= i
* NGROUPS_PER_BLOCK
;
1590 int len
= cp_count
* sizeof(*grouplist
);
1592 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1600 /* fill a group_info from a user-space array - it must be allocated already */
1601 static int groups_from_user(struct group_info
*group_info
,
1602 gid_t __user
*grouplist
)
1605 int count
= group_info
->ngroups
;
1607 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1608 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1609 int off
= i
* NGROUPS_PER_BLOCK
;
1610 int len
= cp_count
* sizeof(*grouplist
);
1612 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1620 /* a simple Shell sort */
1621 static void groups_sort(struct group_info
*group_info
)
1623 int base
, max
, stride
;
1624 int gidsetsize
= group_info
->ngroups
;
1626 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1631 max
= gidsetsize
- stride
;
1632 for (base
= 0; base
< max
; base
++) {
1634 int right
= left
+ stride
;
1635 gid_t tmp
= GROUP_AT(group_info
, right
);
1637 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1638 GROUP_AT(group_info
, right
) =
1639 GROUP_AT(group_info
, left
);
1643 GROUP_AT(group_info
, right
) = tmp
;
1649 /* a simple bsearch */
1650 int groups_search(struct group_info
*group_info
, gid_t grp
)
1652 unsigned int left
, right
;
1658 right
= group_info
->ngroups
;
1659 while (left
< right
) {
1660 unsigned int mid
= (left
+right
)/2;
1661 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1672 /* validate and set current->group_info */
1673 int set_current_groups(struct group_info
*group_info
)
1676 struct group_info
*old_info
;
1678 retval
= security_task_setgroups(group_info
);
1682 groups_sort(group_info
);
1683 get_group_info(group_info
);
1686 old_info
= current
->group_info
;
1687 current
->group_info
= group_info
;
1688 task_unlock(current
);
1690 put_group_info(old_info
);
1695 EXPORT_SYMBOL(set_current_groups
);
1697 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1702 * SMP: Nobody else can change our grouplist. Thus we are
1709 /* no need to grab task_lock here; it cannot change */
1710 i
= current
->group_info
->ngroups
;
1712 if (i
> gidsetsize
) {
1716 if (groups_to_user(grouplist
, current
->group_info
)) {
1726 * SMP: Our groups are copy-on-write. We can set them safely
1727 * without another task interfering.
1730 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1732 struct group_info
*group_info
;
1735 if (!capable(CAP_SETGID
))
1737 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1740 group_info
= groups_alloc(gidsetsize
);
1743 retval
= groups_from_user(group_info
, grouplist
);
1745 put_group_info(group_info
);
1749 retval
= set_current_groups(group_info
);
1750 put_group_info(group_info
);
1756 * Check whether we're fsgid/egid or in the supplemental group..
1758 int in_group_p(gid_t grp
)
1761 if (grp
!= current
->fsgid
)
1762 retval
= groups_search(current
->group_info
, grp
);
1766 EXPORT_SYMBOL(in_group_p
);
1768 int in_egroup_p(gid_t grp
)
1771 if (grp
!= current
->egid
)
1772 retval
= groups_search(current
->group_info
, grp
);
1776 EXPORT_SYMBOL(in_egroup_p
);
1778 DECLARE_RWSEM(uts_sem
);
1780 EXPORT_SYMBOL(uts_sem
);
1782 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1786 down_read(&uts_sem
);
1787 if (copy_to_user(name
, utsname(), sizeof *name
))
1793 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1796 char tmp
[__NEW_UTS_LEN
];
1798 if (!capable(CAP_SYS_ADMIN
))
1800 if (len
< 0 || len
> __NEW_UTS_LEN
)
1802 down_write(&uts_sem
);
1804 if (!copy_from_user(tmp
, name
, len
)) {
1805 memcpy(utsname()->nodename
, tmp
, len
);
1806 utsname()->nodename
[len
] = 0;
1813 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1815 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1821 down_read(&uts_sem
);
1822 i
= 1 + strlen(utsname()->nodename
);
1826 if (copy_to_user(name
, utsname()->nodename
, i
))
1835 * Only setdomainname; getdomainname can be implemented by calling
1838 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1841 char tmp
[__NEW_UTS_LEN
];
1843 if (!capable(CAP_SYS_ADMIN
))
1845 if (len
< 0 || len
> __NEW_UTS_LEN
)
1848 down_write(&uts_sem
);
1850 if (!copy_from_user(tmp
, name
, len
)) {
1851 memcpy(utsname()->domainname
, tmp
, len
);
1852 utsname()->domainname
[len
] = 0;
1859 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1861 if (resource
>= RLIM_NLIMITS
)
1864 struct rlimit value
;
1865 task_lock(current
->group_leader
);
1866 value
= current
->signal
->rlim
[resource
];
1867 task_unlock(current
->group_leader
);
1868 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1872 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1875 * Back compatibility for getrlimit. Needed for some apps.
1878 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1881 if (resource
>= RLIM_NLIMITS
)
1884 task_lock(current
->group_leader
);
1885 x
= current
->signal
->rlim
[resource
];
1886 task_unlock(current
->group_leader
);
1887 if (x
.rlim_cur
> 0x7FFFFFFF)
1888 x
.rlim_cur
= 0x7FFFFFFF;
1889 if (x
.rlim_max
> 0x7FFFFFFF)
1890 x
.rlim_max
= 0x7FFFFFFF;
1891 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1896 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1898 struct rlimit new_rlim
, *old_rlim
;
1899 unsigned long it_prof_secs
;
1902 if (resource
>= RLIM_NLIMITS
)
1904 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1906 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1908 old_rlim
= current
->signal
->rlim
+ resource
;
1909 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1910 !capable(CAP_SYS_RESOURCE
))
1912 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1915 retval
= security_task_setrlimit(resource
, &new_rlim
);
1919 task_lock(current
->group_leader
);
1920 *old_rlim
= new_rlim
;
1921 task_unlock(current
->group_leader
);
1923 if (resource
!= RLIMIT_CPU
)
1927 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1928 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1929 * very long-standing error, and fixing it now risks breakage of
1930 * applications, so we live with it
1932 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
1935 it_prof_secs
= cputime_to_secs(current
->signal
->it_prof_expires
);
1936 if (it_prof_secs
== 0 || new_rlim
.rlim_cur
<= it_prof_secs
) {
1937 unsigned long rlim_cur
= new_rlim
.rlim_cur
;
1940 if (rlim_cur
== 0) {
1942 * The caller is asking for an immediate RLIMIT_CPU
1943 * expiry. But we use the zero value to mean "it was
1944 * never set". So let's cheat and make it one second
1949 cputime
= secs_to_cputime(rlim_cur
);
1950 read_lock(&tasklist_lock
);
1951 spin_lock_irq(¤t
->sighand
->siglock
);
1952 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
1953 spin_unlock_irq(¤t
->sighand
->siglock
);
1954 read_unlock(&tasklist_lock
);
1961 * It would make sense to put struct rusage in the task_struct,
1962 * except that would make the task_struct be *really big*. After
1963 * task_struct gets moved into malloc'ed memory, it would
1964 * make sense to do this. It will make moving the rest of the information
1965 * a lot simpler! (Which we're not doing right now because we're not
1966 * measuring them yet).
1968 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1969 * races with threads incrementing their own counters. But since word
1970 * reads are atomic, we either get new values or old values and we don't
1971 * care which for the sums. We always take the siglock to protect reading
1972 * the c* fields from p->signal from races with exit.c updating those
1973 * fields when reaping, so a sample either gets all the additions of a
1974 * given child after it's reaped, or none so this sample is before reaping.
1977 * We need to take the siglock for CHILDEREN, SELF and BOTH
1978 * for the cases current multithreaded, non-current single threaded
1979 * non-current multithreaded. Thread traversal is now safe with
1981 * Strictly speaking, we donot need to take the siglock if we are current and
1982 * single threaded, as no one else can take our signal_struct away, no one
1983 * else can reap the children to update signal->c* counters, and no one else
1984 * can race with the signal-> fields. If we do not take any lock, the
1985 * signal-> fields could be read out of order while another thread was just
1986 * exiting. So we should place a read memory barrier when we avoid the lock.
1987 * On the writer side, write memory barrier is implied in __exit_signal
1988 * as __exit_signal releases the siglock spinlock after updating the signal->
1989 * fields. But we don't do this yet to keep things simple.
1993 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1995 struct task_struct
*t
;
1996 unsigned long flags
;
1997 cputime_t utime
, stime
;
1999 memset((char *) r
, 0, sizeof *r
);
2000 utime
= stime
= cputime_zero
;
2003 if (!lock_task_sighand(p
, &flags
)) {
2010 case RUSAGE_CHILDREN
:
2011 utime
= p
->signal
->cutime
;
2012 stime
= p
->signal
->cstime
;
2013 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
2014 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
2015 r
->ru_minflt
= p
->signal
->cmin_flt
;
2016 r
->ru_majflt
= p
->signal
->cmaj_flt
;
2018 if (who
== RUSAGE_CHILDREN
)
2022 utime
= cputime_add(utime
, p
->signal
->utime
);
2023 stime
= cputime_add(stime
, p
->signal
->stime
);
2024 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
2025 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
2026 r
->ru_minflt
+= p
->signal
->min_flt
;
2027 r
->ru_majflt
+= p
->signal
->maj_flt
;
2030 utime
= cputime_add(utime
, t
->utime
);
2031 stime
= cputime_add(stime
, t
->stime
);
2032 r
->ru_nvcsw
+= t
->nvcsw
;
2033 r
->ru_nivcsw
+= t
->nivcsw
;
2034 r
->ru_minflt
+= t
->min_flt
;
2035 r
->ru_majflt
+= t
->maj_flt
;
2044 unlock_task_sighand(p
, &flags
);
2047 cputime_to_timeval(utime
, &r
->ru_utime
);
2048 cputime_to_timeval(stime
, &r
->ru_stime
);
2051 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
2054 k_getrusage(p
, who
, &r
);
2055 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
2058 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
2060 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
2062 return getrusage(current
, who
, ru
);
2065 asmlinkage
long sys_umask(int mask
)
2067 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
2071 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
2072 unsigned long arg4
, unsigned long arg5
)
2076 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2081 case PR_SET_PDEATHSIG
:
2082 if (!valid_signal(arg2
)) {
2086 current
->pdeath_signal
= arg2
;
2088 case PR_GET_PDEATHSIG
:
2089 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
2091 case PR_GET_DUMPABLE
:
2092 error
= current
->mm
->dumpable
;
2094 case PR_SET_DUMPABLE
:
2095 if (arg2
< 0 || arg2
> 1) {
2099 current
->mm
->dumpable
= arg2
;
2102 case PR_SET_UNALIGN
:
2103 error
= SET_UNALIGN_CTL(current
, arg2
);
2105 case PR_GET_UNALIGN
:
2106 error
= GET_UNALIGN_CTL(current
, arg2
);
2109 error
= SET_FPEMU_CTL(current
, arg2
);
2112 error
= GET_FPEMU_CTL(current
, arg2
);
2115 error
= SET_FPEXC_CTL(current
, arg2
);
2118 error
= GET_FPEXC_CTL(current
, arg2
);
2121 error
= PR_TIMING_STATISTICAL
;
2124 if (arg2
== PR_TIMING_STATISTICAL
)
2130 case PR_GET_KEEPCAPS
:
2131 if (current
->keep_capabilities
)
2134 case PR_SET_KEEPCAPS
:
2135 if (arg2
!= 0 && arg2
!= 1) {
2139 current
->keep_capabilities
= arg2
;
2142 struct task_struct
*me
= current
;
2143 unsigned char ncomm
[sizeof(me
->comm
)];
2145 ncomm
[sizeof(me
->comm
)-1] = 0;
2146 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
2147 sizeof(me
->comm
)-1) < 0)
2149 set_task_comm(me
, ncomm
);
2153 struct task_struct
*me
= current
;
2154 unsigned char tcomm
[sizeof(me
->comm
)];
2156 get_task_comm(tcomm
, me
);
2157 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))
2162 error
= GET_ENDIAN(current
, arg2
);
2165 error
= SET_ENDIAN(current
, arg2
);
2175 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
2176 struct getcpu_cache __user
*cache
)
2179 int cpu
= raw_smp_processor_id();
2181 err
|= put_user(cpu
, cpup
);
2183 err
|= put_user(cpu_to_node(cpu
), nodep
);
2186 * The cache is not needed for this implementation,
2187 * but make sure user programs pass something
2188 * valid. vsyscall implementations can instead make
2189 * good use of the cache. Only use t0 and t1 because
2190 * these are available in both 32bit and 64bit ABI (no
2191 * need for a compat_getcpu). 32bit has enough
2194 unsigned long t0
, t1
;
2195 get_user(t0
, &cache
->blob
[0]);
2196 get_user(t1
, &cache
->blob
[1]);
2199 put_user(t0
, &cache
->blob
[0]);
2200 put_user(t1
, &cache
->blob
[1]);
2202 return err
? -EFAULT
: 0;