4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
37 #include <linux/compat.h>
38 #include <linux/syscalls.h>
39 #include <linux/kprobes.h>
40 #include <linux/user_namespace.h>
42 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
46 #ifndef SET_UNALIGN_CTL
47 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
49 #ifndef GET_UNALIGN_CTL
50 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
53 # define SET_FPEMU_CTL(a,b) (-EINVAL)
56 # define GET_FPEMU_CTL(a,b) (-EINVAL)
59 # define SET_FPEXC_CTL(a,b) (-EINVAL)
62 # define GET_FPEXC_CTL(a,b) (-EINVAL)
65 # define GET_ENDIAN(a,b) (-EINVAL)
68 # define SET_ENDIAN(a,b) (-EINVAL)
72 * this is where the system-wide overflow UID and GID are defined, for
73 * architectures that now have 32-bit UID/GID but didn't in the past
76 int overflowuid
= DEFAULT_OVERFLOWUID
;
77 int overflowgid
= DEFAULT_OVERFLOWGID
;
80 EXPORT_SYMBOL(overflowuid
);
81 EXPORT_SYMBOL(overflowgid
);
85 * the same as above, but for filesystems which can only store a 16-bit
86 * UID and GID. as such, this is needed on all architectures
89 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
90 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
92 EXPORT_SYMBOL(fs_overflowuid
);
93 EXPORT_SYMBOL(fs_overflowgid
);
96 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
101 EXPORT_SYMBOL(cad_pid
);
104 * If set, this is used for preparing the system to power off.
107 void (*pm_power_off_prepare
)(void);
108 EXPORT_SYMBOL(pm_power_off_prepare
);
111 * Notifier list for kernel code which wants to be called
112 * at shutdown. This is used to stop any idling DMA operations
116 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list
);
119 * Notifier chain core routines. The exported routines below
120 * are layered on top of these, with appropriate locking added.
123 static int notifier_chain_register(struct notifier_block
**nl
,
124 struct notifier_block
*n
)
126 while ((*nl
) != NULL
) {
127 if (n
->priority
> (*nl
)->priority
)
132 rcu_assign_pointer(*nl
, n
);
136 static int notifier_chain_unregister(struct notifier_block
**nl
,
137 struct notifier_block
*n
)
139 while ((*nl
) != NULL
) {
141 rcu_assign_pointer(*nl
, n
->next
);
150 * notifier_call_chain - Informs the registered notifiers about an event.
151 * @nl: Pointer to head of the blocking notifier chain
152 * @val: Value passed unmodified to notifier function
153 * @v: Pointer passed unmodified to notifier function
154 * @nr_to_call: Number of notifier functions to be called. Don't care
155 * value of this parameter is -1.
156 * @nr_calls: Records the number of notifications sent. Don't care
157 * value of this field is NULL.
158 * @returns: notifier_call_chain returns the value returned by the
159 * last notifier function called.
162 static int __kprobes
notifier_call_chain(struct notifier_block
**nl
,
163 unsigned long val
, void *v
,
164 int nr_to_call
, int *nr_calls
)
166 int ret
= NOTIFY_DONE
;
167 struct notifier_block
*nb
, *next_nb
;
169 nb
= rcu_dereference(*nl
);
171 while (nb
&& nr_to_call
) {
172 next_nb
= rcu_dereference(nb
->next
);
173 ret
= nb
->notifier_call(nb
, val
, v
);
178 if ((ret
& NOTIFY_STOP_MASK
) == NOTIFY_STOP_MASK
)
187 * Atomic notifier chain routines. Registration and unregistration
188 * use a spinlock, and call_chain is synchronized by RCU (no locks).
192 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
193 * @nh: Pointer to head of the atomic notifier chain
194 * @n: New entry in notifier chain
196 * Adds a notifier to an atomic notifier chain.
198 * Currently always returns zero.
201 int atomic_notifier_chain_register(struct atomic_notifier_head
*nh
,
202 struct notifier_block
*n
)
207 spin_lock_irqsave(&nh
->lock
, flags
);
208 ret
= notifier_chain_register(&nh
->head
, n
);
209 spin_unlock_irqrestore(&nh
->lock
, flags
);
213 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register
);
216 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
217 * @nh: Pointer to head of the atomic notifier chain
218 * @n: Entry to remove from notifier chain
220 * Removes a notifier from an atomic notifier chain.
222 * Returns zero on success or %-ENOENT on failure.
224 int atomic_notifier_chain_unregister(struct atomic_notifier_head
*nh
,
225 struct notifier_block
*n
)
230 spin_lock_irqsave(&nh
->lock
, flags
);
231 ret
= notifier_chain_unregister(&nh
->head
, n
);
232 spin_unlock_irqrestore(&nh
->lock
, flags
);
237 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister
);
240 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
241 * @nh: Pointer to head of the atomic notifier chain
242 * @val: Value passed unmodified to notifier function
243 * @v: Pointer passed unmodified to notifier function
244 * @nr_to_call: See the comment for notifier_call_chain.
245 * @nr_calls: See the comment for notifier_call_chain.
247 * Calls each function in a notifier chain in turn. The functions
248 * run in an atomic context, so they must not block.
249 * This routine uses RCU to synchronize with changes to the chain.
251 * If the return value of the notifier can be and'ed
252 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
253 * will return immediately, with the return value of
254 * the notifier function which halted execution.
255 * Otherwise the return value is the return value
256 * of the last notifier function called.
259 int __kprobes
__atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
260 unsigned long val
, void *v
,
261 int nr_to_call
, int *nr_calls
)
266 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
271 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain
);
273 int __kprobes
atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
274 unsigned long val
, void *v
)
276 return __atomic_notifier_call_chain(nh
, val
, v
, -1, NULL
);
279 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain
);
281 * Blocking notifier chain routines. All access to the chain is
282 * synchronized by an rwsem.
286 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
287 * @nh: Pointer to head of the blocking notifier chain
288 * @n: New entry in notifier chain
290 * Adds a notifier to a blocking notifier chain.
291 * Must be called in process context.
293 * Currently always returns zero.
296 int blocking_notifier_chain_register(struct blocking_notifier_head
*nh
,
297 struct notifier_block
*n
)
302 * This code gets used during boot-up, when task switching is
303 * not yet working and interrupts must remain disabled. At
304 * such times we must not call down_write().
306 if (unlikely(system_state
== SYSTEM_BOOTING
))
307 return notifier_chain_register(&nh
->head
, n
);
309 down_write(&nh
->rwsem
);
310 ret
= notifier_chain_register(&nh
->head
, n
);
311 up_write(&nh
->rwsem
);
315 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register
);
318 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
319 * @nh: Pointer to head of the blocking notifier chain
320 * @n: Entry to remove from notifier chain
322 * Removes a notifier from a blocking notifier chain.
323 * Must be called from process context.
325 * Returns zero on success or %-ENOENT on failure.
327 int blocking_notifier_chain_unregister(struct blocking_notifier_head
*nh
,
328 struct notifier_block
*n
)
333 * This code gets used during boot-up, when task switching is
334 * not yet working and interrupts must remain disabled. At
335 * such times we must not call down_write().
337 if (unlikely(system_state
== SYSTEM_BOOTING
))
338 return notifier_chain_unregister(&nh
->head
, n
);
340 down_write(&nh
->rwsem
);
341 ret
= notifier_chain_unregister(&nh
->head
, n
);
342 up_write(&nh
->rwsem
);
346 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister
);
349 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
350 * @nh: Pointer to head of the blocking notifier chain
351 * @val: Value passed unmodified to notifier function
352 * @v: Pointer passed unmodified to notifier function
353 * @nr_to_call: See comment for notifier_call_chain.
354 * @nr_calls: See comment for notifier_call_chain.
356 * Calls each function in a notifier chain in turn. The functions
357 * run in a process context, so they are allowed to block.
359 * If the return value of the notifier can be and'ed
360 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
361 * will return immediately, with the return value of
362 * the notifier function which halted execution.
363 * Otherwise the return value is the return value
364 * of the last notifier function called.
367 int __blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
368 unsigned long val
, void *v
,
369 int nr_to_call
, int *nr_calls
)
371 int ret
= NOTIFY_DONE
;
374 * We check the head outside the lock, but if this access is
375 * racy then it does not matter what the result of the test
376 * is, we re-check the list after having taken the lock anyway:
378 if (rcu_dereference(nh
->head
)) {
379 down_read(&nh
->rwsem
);
380 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
,
386 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain
);
388 int blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
389 unsigned long val
, void *v
)
391 return __blocking_notifier_call_chain(nh
, val
, v
, -1, NULL
);
393 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain
);
396 * Raw notifier chain routines. There is no protection;
397 * the caller must provide it. Use at your own risk!
401 * raw_notifier_chain_register - Add notifier to a raw notifier chain
402 * @nh: Pointer to head of the raw notifier chain
403 * @n: New entry in notifier chain
405 * Adds a notifier to a raw notifier chain.
406 * All locking must be provided by the caller.
408 * Currently always returns zero.
411 int raw_notifier_chain_register(struct raw_notifier_head
*nh
,
412 struct notifier_block
*n
)
414 return notifier_chain_register(&nh
->head
, n
);
417 EXPORT_SYMBOL_GPL(raw_notifier_chain_register
);
420 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
421 * @nh: Pointer to head of the raw notifier chain
422 * @n: Entry to remove from notifier chain
424 * Removes a notifier from a raw notifier chain.
425 * All locking must be provided by the caller.
427 * Returns zero on success or %-ENOENT on failure.
429 int raw_notifier_chain_unregister(struct raw_notifier_head
*nh
,
430 struct notifier_block
*n
)
432 return notifier_chain_unregister(&nh
->head
, n
);
435 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister
);
438 * __raw_notifier_call_chain - Call functions in a raw notifier chain
439 * @nh: Pointer to head of the raw notifier chain
440 * @val: Value passed unmodified to notifier function
441 * @v: Pointer passed unmodified to notifier function
442 * @nr_to_call: See comment for notifier_call_chain.
443 * @nr_calls: See comment for notifier_call_chain
445 * Calls each function in a notifier chain in turn. The functions
446 * run in an undefined context.
447 * All locking must be provided by the caller.
449 * If the return value of the notifier can be and'ed
450 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
451 * will return immediately, with the return value of
452 * the notifier function which halted execution.
453 * Otherwise the return value is the return value
454 * of the last notifier function called.
457 int __raw_notifier_call_chain(struct raw_notifier_head
*nh
,
458 unsigned long val
, void *v
,
459 int nr_to_call
, int *nr_calls
)
461 return notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
464 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain
);
466 int raw_notifier_call_chain(struct raw_notifier_head
*nh
,
467 unsigned long val
, void *v
)
469 return __raw_notifier_call_chain(nh
, val
, v
, -1, NULL
);
472 EXPORT_SYMBOL_GPL(raw_notifier_call_chain
);
475 * SRCU notifier chain routines. Registration and unregistration
476 * use a mutex, and call_chain is synchronized by SRCU (no locks).
480 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
481 * @nh: Pointer to head of the SRCU notifier chain
482 * @n: New entry in notifier chain
484 * Adds a notifier to an SRCU notifier chain.
485 * Must be called in process context.
487 * Currently always returns zero.
490 int srcu_notifier_chain_register(struct srcu_notifier_head
*nh
,
491 struct notifier_block
*n
)
496 * This code gets used during boot-up, when task switching is
497 * not yet working and interrupts must remain disabled. At
498 * such times we must not call mutex_lock().
500 if (unlikely(system_state
== SYSTEM_BOOTING
))
501 return notifier_chain_register(&nh
->head
, n
);
503 mutex_lock(&nh
->mutex
);
504 ret
= notifier_chain_register(&nh
->head
, n
);
505 mutex_unlock(&nh
->mutex
);
509 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register
);
512 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
513 * @nh: Pointer to head of the SRCU notifier chain
514 * @n: Entry to remove from notifier chain
516 * Removes a notifier from an SRCU notifier chain.
517 * Must be called from process context.
519 * Returns zero on success or %-ENOENT on failure.
521 int srcu_notifier_chain_unregister(struct srcu_notifier_head
*nh
,
522 struct notifier_block
*n
)
527 * This code gets used during boot-up, when task switching is
528 * not yet working and interrupts must remain disabled. At
529 * such times we must not call mutex_lock().
531 if (unlikely(system_state
== SYSTEM_BOOTING
))
532 return notifier_chain_unregister(&nh
->head
, n
);
534 mutex_lock(&nh
->mutex
);
535 ret
= notifier_chain_unregister(&nh
->head
, n
);
536 mutex_unlock(&nh
->mutex
);
537 synchronize_srcu(&nh
->srcu
);
541 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister
);
544 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
545 * @nh: Pointer to head of the SRCU notifier chain
546 * @val: Value passed unmodified to notifier function
547 * @v: Pointer passed unmodified to notifier function
548 * @nr_to_call: See comment for notifier_call_chain.
549 * @nr_calls: See comment for notifier_call_chain
551 * Calls each function in a notifier chain in turn. The functions
552 * run in a process context, so they are allowed to block.
554 * If the return value of the notifier can be and'ed
555 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
556 * will return immediately, with the return value of
557 * the notifier function which halted execution.
558 * Otherwise the return value is the return value
559 * of the last notifier function called.
562 int __srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
563 unsigned long val
, void *v
,
564 int nr_to_call
, int *nr_calls
)
569 idx
= srcu_read_lock(&nh
->srcu
);
570 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
571 srcu_read_unlock(&nh
->srcu
, idx
);
574 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain
);
576 int srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
577 unsigned long val
, void *v
)
579 return __srcu_notifier_call_chain(nh
, val
, v
, -1, NULL
);
581 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain
);
584 * srcu_init_notifier_head - Initialize an SRCU notifier head
585 * @nh: Pointer to head of the srcu notifier chain
587 * Unlike other sorts of notifier heads, SRCU notifier heads require
588 * dynamic initialization. Be sure to call this routine before
589 * calling any of the other SRCU notifier routines for this head.
591 * If an SRCU notifier head is deallocated, it must first be cleaned
592 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
593 * per-cpu data (used by the SRCU mechanism) will leak.
596 void srcu_init_notifier_head(struct srcu_notifier_head
*nh
)
598 mutex_init(&nh
->mutex
);
599 if (init_srcu_struct(&nh
->srcu
) < 0)
604 EXPORT_SYMBOL_GPL(srcu_init_notifier_head
);
607 * register_reboot_notifier - Register function to be called at reboot time
608 * @nb: Info about notifier function to be called
610 * Registers a function with the list of functions
611 * to be called at reboot time.
613 * Currently always returns zero, as blocking_notifier_chain_register()
614 * always returns zero.
617 int register_reboot_notifier(struct notifier_block
* nb
)
619 return blocking_notifier_chain_register(&reboot_notifier_list
, nb
);
622 EXPORT_SYMBOL(register_reboot_notifier
);
625 * unregister_reboot_notifier - Unregister previously registered reboot notifier
626 * @nb: Hook to be unregistered
628 * Unregisters a previously registered reboot
631 * Returns zero on success, or %-ENOENT on failure.
634 int unregister_reboot_notifier(struct notifier_block
* nb
)
636 return blocking_notifier_chain_unregister(&reboot_notifier_list
, nb
);
639 EXPORT_SYMBOL(unregister_reboot_notifier
);
641 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
645 if (p
->uid
!= current
->euid
&&
646 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
650 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
654 no_nice
= security_task_setnice(p
, niceval
);
661 set_user_nice(p
, niceval
);
666 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
668 struct task_struct
*g
, *p
;
669 struct user_struct
*user
;
673 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
676 /* normalize: avoid signed division (rounding problems) */
683 read_lock(&tasklist_lock
);
687 p
= find_task_by_pid(who
);
691 error
= set_one_prio(p
, niceval
, error
);
695 pgrp
= find_pid(who
);
697 pgrp
= task_pgrp(current
);
698 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
699 error
= set_one_prio(p
, niceval
, error
);
700 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
703 user
= current
->user
;
707 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
708 goto out_unlock
; /* No processes for this user */
712 error
= set_one_prio(p
, niceval
, error
);
713 while_each_thread(g
, p
);
714 if (who
!= current
->uid
)
715 free_uid(user
); /* For find_user() */
719 read_unlock(&tasklist_lock
);
725 * Ugh. To avoid negative return values, "getpriority()" will
726 * not return the normal nice-value, but a negated value that
727 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
728 * to stay compatible.
730 asmlinkage
long sys_getpriority(int which
, int who
)
732 struct task_struct
*g
, *p
;
733 struct user_struct
*user
;
734 long niceval
, retval
= -ESRCH
;
737 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
740 read_lock(&tasklist_lock
);
744 p
= find_task_by_pid(who
);
748 niceval
= 20 - task_nice(p
);
749 if (niceval
> retval
)
755 pgrp
= find_pid(who
);
757 pgrp
= task_pgrp(current
);
758 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
759 niceval
= 20 - task_nice(p
);
760 if (niceval
> retval
)
762 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
765 user
= current
->user
;
769 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
770 goto out_unlock
; /* No processes for this user */
774 niceval
= 20 - task_nice(p
);
775 if (niceval
> retval
)
778 while_each_thread(g
, p
);
779 if (who
!= current
->uid
)
780 free_uid(user
); /* for find_user() */
784 read_unlock(&tasklist_lock
);
790 * emergency_restart - reboot the system
792 * Without shutting down any hardware or taking any locks
793 * reboot the system. This is called when we know we are in
794 * trouble so this is our best effort to reboot. This is
795 * safe to call in interrupt context.
797 void emergency_restart(void)
799 machine_emergency_restart();
801 EXPORT_SYMBOL_GPL(emergency_restart
);
803 static void kernel_restart_prepare(char *cmd
)
805 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
806 system_state
= SYSTEM_RESTART
;
812 * kernel_restart - reboot the system
813 * @cmd: pointer to buffer containing command to execute for restart
816 * Shutdown everything and perform a clean reboot.
817 * This is not safe to call in interrupt context.
819 void kernel_restart(char *cmd
)
821 kernel_restart_prepare(cmd
);
823 printk(KERN_EMERG
"Restarting system.\n");
825 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
826 machine_restart(cmd
);
828 EXPORT_SYMBOL_GPL(kernel_restart
);
831 * kernel_kexec - reboot the system
833 * Move into place and start executing a preloaded standalone
834 * executable. If nothing was preloaded return an error.
836 static void kernel_kexec(void)
839 struct kimage
*image
;
840 image
= xchg(&kexec_image
, NULL
);
843 kernel_restart_prepare(NULL
);
844 printk(KERN_EMERG
"Starting new kernel\n");
846 machine_kexec(image
);
850 void kernel_shutdown_prepare(enum system_states state
)
852 blocking_notifier_call_chain(&reboot_notifier_list
,
853 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
854 system_state
= state
;
858 * kernel_halt - halt the system
860 * Shutdown everything and perform a clean system halt.
862 void kernel_halt(void)
864 kernel_shutdown_prepare(SYSTEM_HALT
);
866 printk(KERN_EMERG
"System halted.\n");
870 EXPORT_SYMBOL_GPL(kernel_halt
);
873 * kernel_power_off - power_off the system
875 * Shutdown everything and perform a clean system power_off.
877 void kernel_power_off(void)
879 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
880 if (pm_power_off_prepare
)
881 pm_power_off_prepare();
882 disable_nonboot_cpus();
884 printk(KERN_EMERG
"Power down.\n");
887 EXPORT_SYMBOL_GPL(kernel_power_off
);
889 * Reboot system call: for obvious reasons only root may call it,
890 * and even root needs to set up some magic numbers in the registers
891 * so that some mistake won't make this reboot the whole machine.
892 * You can also set the meaning of the ctrl-alt-del-key here.
894 * reboot doesn't sync: do that yourself before calling this.
896 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
900 /* We only trust the superuser with rebooting the system. */
901 if (!capable(CAP_SYS_BOOT
))
904 /* For safety, we require "magic" arguments. */
905 if (magic1
!= LINUX_REBOOT_MAGIC1
||
906 (magic2
!= LINUX_REBOOT_MAGIC2
&&
907 magic2
!= LINUX_REBOOT_MAGIC2A
&&
908 magic2
!= LINUX_REBOOT_MAGIC2B
&&
909 magic2
!= LINUX_REBOOT_MAGIC2C
))
912 /* Instead of trying to make the power_off code look like
913 * halt when pm_power_off is not set do it the easy way.
915 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
916 cmd
= LINUX_REBOOT_CMD_HALT
;
920 case LINUX_REBOOT_CMD_RESTART
:
921 kernel_restart(NULL
);
924 case LINUX_REBOOT_CMD_CAD_ON
:
928 case LINUX_REBOOT_CMD_CAD_OFF
:
932 case LINUX_REBOOT_CMD_HALT
:
938 case LINUX_REBOOT_CMD_POWER_OFF
:
944 case LINUX_REBOOT_CMD_RESTART2
:
945 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
949 buffer
[sizeof(buffer
) - 1] = '\0';
951 kernel_restart(buffer
);
954 case LINUX_REBOOT_CMD_KEXEC
:
959 #ifdef CONFIG_HIBERNATION
960 case LINUX_REBOOT_CMD_SW_SUSPEND
:
962 int ret
= hibernate();
976 static void deferred_cad(struct work_struct
*dummy
)
978 kernel_restart(NULL
);
982 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
983 * As it's called within an interrupt, it may NOT sync: the only choice
984 * is whether to reboot at once, or just ignore the ctrl-alt-del.
986 void ctrl_alt_del(void)
988 static DECLARE_WORK(cad_work
, deferred_cad
);
991 schedule_work(&cad_work
);
993 kill_cad_pid(SIGINT
, 1);
997 * Unprivileged users may change the real gid to the effective gid
998 * or vice versa. (BSD-style)
1000 * If you set the real gid at all, or set the effective gid to a value not
1001 * equal to the real gid, then the saved gid is set to the new effective gid.
1003 * This makes it possible for a setgid program to completely drop its
1004 * privileges, which is often a useful assertion to make when you are doing
1005 * a security audit over a program.
1007 * The general idea is that a program which uses just setregid() will be
1008 * 100% compatible with BSD. A program which uses just setgid() will be
1009 * 100% compatible with POSIX with saved IDs.
1011 * SMP: There are not races, the GIDs are checked only by filesystem
1012 * operations (as far as semantic preservation is concerned).
1014 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
1016 int old_rgid
= current
->gid
;
1017 int old_egid
= current
->egid
;
1018 int new_rgid
= old_rgid
;
1019 int new_egid
= old_egid
;
1022 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
1026 if (rgid
!= (gid_t
) -1) {
1027 if ((old_rgid
== rgid
) ||
1028 (current
->egid
==rgid
) ||
1029 capable(CAP_SETGID
))
1034 if (egid
!= (gid_t
) -1) {
1035 if ((old_rgid
== egid
) ||
1036 (current
->egid
== egid
) ||
1037 (current
->sgid
== egid
) ||
1038 capable(CAP_SETGID
))
1043 if (new_egid
!= old_egid
) {
1044 set_dumpable(current
->mm
, suid_dumpable
);
1047 if (rgid
!= (gid_t
) -1 ||
1048 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
1049 current
->sgid
= new_egid
;
1050 current
->fsgid
= new_egid
;
1051 current
->egid
= new_egid
;
1052 current
->gid
= new_rgid
;
1053 key_fsgid_changed(current
);
1054 proc_id_connector(current
, PROC_EVENT_GID
);
1059 * setgid() is implemented like SysV w/ SAVED_IDS
1061 * SMP: Same implicit races as above.
1063 asmlinkage
long sys_setgid(gid_t gid
)
1065 int old_egid
= current
->egid
;
1068 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
1072 if (capable(CAP_SETGID
)) {
1073 if (old_egid
!= gid
) {
1074 set_dumpable(current
->mm
, suid_dumpable
);
1077 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
1078 } else if ((gid
== current
->gid
) || (gid
== current
->sgid
)) {
1079 if (old_egid
!= gid
) {
1080 set_dumpable(current
->mm
, suid_dumpable
);
1083 current
->egid
= current
->fsgid
= gid
;
1088 key_fsgid_changed(current
);
1089 proc_id_connector(current
, PROC_EVENT_GID
);
1093 static int set_user(uid_t new_ruid
, int dumpclear
)
1095 struct user_struct
*new_user
;
1097 new_user
= alloc_uid(current
->nsproxy
->user_ns
, new_ruid
);
1101 if (atomic_read(&new_user
->processes
) >=
1102 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
1103 new_user
!= current
->nsproxy
->user_ns
->root_user
) {
1108 switch_uid(new_user
);
1111 set_dumpable(current
->mm
, suid_dumpable
);
1114 current
->uid
= new_ruid
;
1119 * Unprivileged users may change the real uid to the effective uid
1120 * or vice versa. (BSD-style)
1122 * If you set the real uid at all, or set the effective uid to a value not
1123 * equal to the real uid, then the saved uid is set to the new effective uid.
1125 * This makes it possible for a setuid program to completely drop its
1126 * privileges, which is often a useful assertion to make when you are doing
1127 * a security audit over a program.
1129 * The general idea is that a program which uses just setreuid() will be
1130 * 100% compatible with BSD. A program which uses just setuid() will be
1131 * 100% compatible with POSIX with saved IDs.
1133 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
1135 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
1138 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
1142 new_ruid
= old_ruid
= current
->uid
;
1143 new_euid
= old_euid
= current
->euid
;
1144 old_suid
= current
->suid
;
1146 if (ruid
!= (uid_t
) -1) {
1148 if ((old_ruid
!= ruid
) &&
1149 (current
->euid
!= ruid
) &&
1150 !capable(CAP_SETUID
))
1154 if (euid
!= (uid_t
) -1) {
1156 if ((old_ruid
!= euid
) &&
1157 (current
->euid
!= euid
) &&
1158 (current
->suid
!= euid
) &&
1159 !capable(CAP_SETUID
))
1163 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
1166 if (new_euid
!= old_euid
) {
1167 set_dumpable(current
->mm
, suid_dumpable
);
1170 current
->fsuid
= current
->euid
= new_euid
;
1171 if (ruid
!= (uid_t
) -1 ||
1172 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
1173 current
->suid
= current
->euid
;
1174 current
->fsuid
= current
->euid
;
1176 key_fsuid_changed(current
);
1177 proc_id_connector(current
, PROC_EVENT_UID
);
1179 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
1185 * setuid() is implemented like SysV with SAVED_IDS
1187 * Note that SAVED_ID's is deficient in that a setuid root program
1188 * like sendmail, for example, cannot set its uid to be a normal
1189 * user and then switch back, because if you're root, setuid() sets
1190 * the saved uid too. If you don't like this, blame the bright people
1191 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1192 * will allow a root program to temporarily drop privileges and be able to
1193 * regain them by swapping the real and effective uid.
1195 asmlinkage
long sys_setuid(uid_t uid
)
1197 int old_euid
= current
->euid
;
1198 int old_ruid
, old_suid
, new_suid
;
1201 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
1205 old_ruid
= current
->uid
;
1206 old_suid
= current
->suid
;
1207 new_suid
= old_suid
;
1209 if (capable(CAP_SETUID
)) {
1210 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
1213 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
1216 if (old_euid
!= uid
) {
1217 set_dumpable(current
->mm
, suid_dumpable
);
1220 current
->fsuid
= current
->euid
= uid
;
1221 current
->suid
= new_suid
;
1223 key_fsuid_changed(current
);
1224 proc_id_connector(current
, PROC_EVENT_UID
);
1226 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
1231 * This function implements a generic ability to update ruid, euid,
1232 * and suid. This allows you to implement the 4.4 compatible seteuid().
1234 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
1236 int old_ruid
= current
->uid
;
1237 int old_euid
= current
->euid
;
1238 int old_suid
= current
->suid
;
1241 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
1245 if (!capable(CAP_SETUID
)) {
1246 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
1247 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
1249 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
1250 (euid
!= current
->euid
) && (euid
!= current
->suid
))
1252 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
1253 (suid
!= current
->euid
) && (suid
!= current
->suid
))
1256 if (ruid
!= (uid_t
) -1) {
1257 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
1260 if (euid
!= (uid_t
) -1) {
1261 if (euid
!= current
->euid
) {
1262 set_dumpable(current
->mm
, suid_dumpable
);
1265 current
->euid
= euid
;
1267 current
->fsuid
= current
->euid
;
1268 if (suid
!= (uid_t
) -1)
1269 current
->suid
= suid
;
1271 key_fsuid_changed(current
);
1272 proc_id_connector(current
, PROC_EVENT_UID
);
1274 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
1277 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
1281 if (!(retval
= put_user(current
->uid
, ruid
)) &&
1282 !(retval
= put_user(current
->euid
, euid
)))
1283 retval
= put_user(current
->suid
, suid
);
1289 * Same as above, but for rgid, egid, sgid.
1291 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
1295 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
1299 if (!capable(CAP_SETGID
)) {
1300 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
1301 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
1303 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
1304 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
1306 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
1307 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
1310 if (egid
!= (gid_t
) -1) {
1311 if (egid
!= current
->egid
) {
1312 set_dumpable(current
->mm
, suid_dumpable
);
1315 current
->egid
= egid
;
1317 current
->fsgid
= current
->egid
;
1318 if (rgid
!= (gid_t
) -1)
1319 current
->gid
= rgid
;
1320 if (sgid
!= (gid_t
) -1)
1321 current
->sgid
= sgid
;
1323 key_fsgid_changed(current
);
1324 proc_id_connector(current
, PROC_EVENT_GID
);
1328 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
1332 if (!(retval
= put_user(current
->gid
, rgid
)) &&
1333 !(retval
= put_user(current
->egid
, egid
)))
1334 retval
= put_user(current
->sgid
, sgid
);
1341 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1342 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1343 * whatever uid it wants to). It normally shadows "euid", except when
1344 * explicitly set by setfsuid() or for access..
1346 asmlinkage
long sys_setfsuid(uid_t uid
)
1350 old_fsuid
= current
->fsuid
;
1351 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
1354 if (uid
== current
->uid
|| uid
== current
->euid
||
1355 uid
== current
->suid
|| uid
== current
->fsuid
||
1356 capable(CAP_SETUID
)) {
1357 if (uid
!= old_fsuid
) {
1358 set_dumpable(current
->mm
, suid_dumpable
);
1361 current
->fsuid
= uid
;
1364 key_fsuid_changed(current
);
1365 proc_id_connector(current
, PROC_EVENT_UID
);
1367 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
1373 * Samma på svenska..
1375 asmlinkage
long sys_setfsgid(gid_t gid
)
1379 old_fsgid
= current
->fsgid
;
1380 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
1383 if (gid
== current
->gid
|| gid
== current
->egid
||
1384 gid
== current
->sgid
|| gid
== current
->fsgid
||
1385 capable(CAP_SETGID
)) {
1386 if (gid
!= old_fsgid
) {
1387 set_dumpable(current
->mm
, suid_dumpable
);
1390 current
->fsgid
= gid
;
1391 key_fsgid_changed(current
);
1392 proc_id_connector(current
, PROC_EVENT_GID
);
1397 asmlinkage
long sys_times(struct tms __user
* tbuf
)
1400 * In the SMP world we might just be unlucky and have one of
1401 * the times increment as we use it. Since the value is an
1402 * atomically safe type this is just fine. Conceptually its
1403 * as if the syscall took an instant longer to occur.
1407 struct task_struct
*tsk
= current
;
1408 struct task_struct
*t
;
1409 cputime_t utime
, stime
, cutime
, cstime
;
1411 spin_lock_irq(&tsk
->sighand
->siglock
);
1412 utime
= tsk
->signal
->utime
;
1413 stime
= tsk
->signal
->stime
;
1416 utime
= cputime_add(utime
, t
->utime
);
1417 stime
= cputime_add(stime
, t
->stime
);
1421 cutime
= tsk
->signal
->cutime
;
1422 cstime
= tsk
->signal
->cstime
;
1423 spin_unlock_irq(&tsk
->sighand
->siglock
);
1425 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1426 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1427 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1428 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1429 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1432 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1436 * This needs some heavy checking ...
1437 * I just haven't the stomach for it. I also don't fully
1438 * understand sessions/pgrp etc. Let somebody who does explain it.
1440 * OK, I think I have the protection semantics right.... this is really
1441 * only important on a multi-user system anyway, to make sure one user
1442 * can't send a signal to a process owned by another. -TYT, 12/12/91
1444 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1447 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1449 struct task_struct
*p
;
1450 struct task_struct
*group_leader
= current
->group_leader
;
1454 pid
= group_leader
->pid
;
1460 /* From this point forward we keep holding onto the tasklist lock
1461 * so that our parent does not change from under us. -DaveM
1463 write_lock_irq(&tasklist_lock
);
1466 p
= find_task_by_pid(pid
);
1471 if (!thread_group_leader(p
))
1474 if (p
->real_parent
->tgid
== group_leader
->tgid
) {
1476 if (task_session(p
) != task_session(group_leader
))
1483 if (p
!= group_leader
)
1488 if (p
->signal
->leader
)
1492 struct task_struct
*g
=
1493 find_task_by_pid_type(PIDTYPE_PGID
, pgid
);
1495 if (!g
|| task_session(g
) != task_session(group_leader
))
1499 err
= security_task_setpgid(p
, pgid
);
1503 if (process_group(p
) != pgid
) {
1504 detach_pid(p
, PIDTYPE_PGID
);
1505 p
->signal
->pgrp
= pgid
;
1506 attach_pid(p
, PIDTYPE_PGID
, find_pid(pgid
));
1511 /* All paths lead to here, thus we are safe. -DaveM */
1512 write_unlock_irq(&tasklist_lock
);
1516 asmlinkage
long sys_getpgid(pid_t pid
)
1519 return process_group(current
);
1522 struct task_struct
*p
;
1524 read_lock(&tasklist_lock
);
1525 p
= find_task_by_pid(pid
);
1529 retval
= security_task_getpgid(p
);
1531 retval
= process_group(p
);
1533 read_unlock(&tasklist_lock
);
1538 #ifdef __ARCH_WANT_SYS_GETPGRP
1540 asmlinkage
long sys_getpgrp(void)
1542 /* SMP - assuming writes are word atomic this is fine */
1543 return process_group(current
);
1548 asmlinkage
long sys_getsid(pid_t pid
)
1551 return process_session(current
);
1554 struct task_struct
*p
;
1556 read_lock(&tasklist_lock
);
1557 p
= find_task_by_pid(pid
);
1561 retval
= security_task_getsid(p
);
1563 retval
= process_session(p
);
1565 read_unlock(&tasklist_lock
);
1570 asmlinkage
long sys_setsid(void)
1572 struct task_struct
*group_leader
= current
->group_leader
;
1576 write_lock_irq(&tasklist_lock
);
1578 /* Fail if I am already a session leader */
1579 if (group_leader
->signal
->leader
)
1582 session
= group_leader
->pid
;
1583 /* Fail if a process group id already exists that equals the
1584 * proposed session id.
1586 * Don't check if session id == 1 because kernel threads use this
1587 * session id and so the check will always fail and make it so
1588 * init cannot successfully call setsid.
1590 if (session
> 1 && find_task_by_pid_type(PIDTYPE_PGID
, session
))
1593 group_leader
->signal
->leader
= 1;
1594 __set_special_pids(session
, session
);
1596 spin_lock(&group_leader
->sighand
->siglock
);
1597 group_leader
->signal
->tty
= NULL
;
1598 spin_unlock(&group_leader
->sighand
->siglock
);
1600 err
= process_group(group_leader
);
1602 write_unlock_irq(&tasklist_lock
);
1607 * Supplementary group IDs
1610 /* init to 2 - one for init_task, one to ensure it is never freed */
1611 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1613 struct group_info
*groups_alloc(int gidsetsize
)
1615 struct group_info
*group_info
;
1619 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1620 /* Make sure we always allocate at least one indirect block pointer */
1621 nblocks
= nblocks
? : 1;
1622 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1625 group_info
->ngroups
= gidsetsize
;
1626 group_info
->nblocks
= nblocks
;
1627 atomic_set(&group_info
->usage
, 1);
1629 if (gidsetsize
<= NGROUPS_SMALL
)
1630 group_info
->blocks
[0] = group_info
->small_block
;
1632 for (i
= 0; i
< nblocks
; i
++) {
1634 b
= (void *)__get_free_page(GFP_USER
);
1636 goto out_undo_partial_alloc
;
1637 group_info
->blocks
[i
] = b
;
1642 out_undo_partial_alloc
:
1644 free_page((unsigned long)group_info
->blocks
[i
]);
1650 EXPORT_SYMBOL(groups_alloc
);
1652 void groups_free(struct group_info
*group_info
)
1654 if (group_info
->blocks
[0] != group_info
->small_block
) {
1656 for (i
= 0; i
< group_info
->nblocks
; i
++)
1657 free_page((unsigned long)group_info
->blocks
[i
]);
1662 EXPORT_SYMBOL(groups_free
);
1664 /* export the group_info to a user-space array */
1665 static int groups_to_user(gid_t __user
*grouplist
,
1666 struct group_info
*group_info
)
1669 int count
= group_info
->ngroups
;
1671 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1672 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1673 int off
= i
* NGROUPS_PER_BLOCK
;
1674 int len
= cp_count
* sizeof(*grouplist
);
1676 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1684 /* fill a group_info from a user-space array - it must be allocated already */
1685 static int groups_from_user(struct group_info
*group_info
,
1686 gid_t __user
*grouplist
)
1689 int count
= group_info
->ngroups
;
1691 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1692 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1693 int off
= i
* NGROUPS_PER_BLOCK
;
1694 int len
= cp_count
* sizeof(*grouplist
);
1696 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1704 /* a simple Shell sort */
1705 static void groups_sort(struct group_info
*group_info
)
1707 int base
, max
, stride
;
1708 int gidsetsize
= group_info
->ngroups
;
1710 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1715 max
= gidsetsize
- stride
;
1716 for (base
= 0; base
< max
; base
++) {
1718 int right
= left
+ stride
;
1719 gid_t tmp
= GROUP_AT(group_info
, right
);
1721 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1722 GROUP_AT(group_info
, right
) =
1723 GROUP_AT(group_info
, left
);
1727 GROUP_AT(group_info
, right
) = tmp
;
1733 /* a simple bsearch */
1734 int groups_search(struct group_info
*group_info
, gid_t grp
)
1736 unsigned int left
, right
;
1742 right
= group_info
->ngroups
;
1743 while (left
< right
) {
1744 unsigned int mid
= (left
+right
)/2;
1745 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1756 /* validate and set current->group_info */
1757 int set_current_groups(struct group_info
*group_info
)
1760 struct group_info
*old_info
;
1762 retval
= security_task_setgroups(group_info
);
1766 groups_sort(group_info
);
1767 get_group_info(group_info
);
1770 old_info
= current
->group_info
;
1771 current
->group_info
= group_info
;
1772 task_unlock(current
);
1774 put_group_info(old_info
);
1779 EXPORT_SYMBOL(set_current_groups
);
1781 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1786 * SMP: Nobody else can change our grouplist. Thus we are
1793 /* no need to grab task_lock here; it cannot change */
1794 i
= current
->group_info
->ngroups
;
1796 if (i
> gidsetsize
) {
1800 if (groups_to_user(grouplist
, current
->group_info
)) {
1810 * SMP: Our groups are copy-on-write. We can set them safely
1811 * without another task interfering.
1814 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1816 struct group_info
*group_info
;
1819 if (!capable(CAP_SETGID
))
1821 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1824 group_info
= groups_alloc(gidsetsize
);
1827 retval
= groups_from_user(group_info
, grouplist
);
1829 put_group_info(group_info
);
1833 retval
= set_current_groups(group_info
);
1834 put_group_info(group_info
);
1840 * Check whether we're fsgid/egid or in the supplemental group..
1842 int in_group_p(gid_t grp
)
1845 if (grp
!= current
->fsgid
)
1846 retval
= groups_search(current
->group_info
, grp
);
1850 EXPORT_SYMBOL(in_group_p
);
1852 int in_egroup_p(gid_t grp
)
1855 if (grp
!= current
->egid
)
1856 retval
= groups_search(current
->group_info
, grp
);
1860 EXPORT_SYMBOL(in_egroup_p
);
1862 DECLARE_RWSEM(uts_sem
);
1864 EXPORT_SYMBOL(uts_sem
);
1866 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1870 down_read(&uts_sem
);
1871 if (copy_to_user(name
, utsname(), sizeof *name
))
1877 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1880 char tmp
[__NEW_UTS_LEN
];
1882 if (!capable(CAP_SYS_ADMIN
))
1884 if (len
< 0 || len
> __NEW_UTS_LEN
)
1886 down_write(&uts_sem
);
1888 if (!copy_from_user(tmp
, name
, len
)) {
1889 memcpy(utsname()->nodename
, tmp
, len
);
1890 utsname()->nodename
[len
] = 0;
1897 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1899 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1905 down_read(&uts_sem
);
1906 i
= 1 + strlen(utsname()->nodename
);
1910 if (copy_to_user(name
, utsname()->nodename
, i
))
1919 * Only setdomainname; getdomainname can be implemented by calling
1922 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1925 char tmp
[__NEW_UTS_LEN
];
1927 if (!capable(CAP_SYS_ADMIN
))
1929 if (len
< 0 || len
> __NEW_UTS_LEN
)
1932 down_write(&uts_sem
);
1934 if (!copy_from_user(tmp
, name
, len
)) {
1935 memcpy(utsname()->domainname
, tmp
, len
);
1936 utsname()->domainname
[len
] = 0;
1943 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1945 if (resource
>= RLIM_NLIMITS
)
1948 struct rlimit value
;
1949 task_lock(current
->group_leader
);
1950 value
= current
->signal
->rlim
[resource
];
1951 task_unlock(current
->group_leader
);
1952 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1956 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1959 * Back compatibility for getrlimit. Needed for some apps.
1962 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1965 if (resource
>= RLIM_NLIMITS
)
1968 task_lock(current
->group_leader
);
1969 x
= current
->signal
->rlim
[resource
];
1970 task_unlock(current
->group_leader
);
1971 if (x
.rlim_cur
> 0x7FFFFFFF)
1972 x
.rlim_cur
= 0x7FFFFFFF;
1973 if (x
.rlim_max
> 0x7FFFFFFF)
1974 x
.rlim_max
= 0x7FFFFFFF;
1975 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1980 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1982 struct rlimit new_rlim
, *old_rlim
;
1983 unsigned long it_prof_secs
;
1986 if (resource
>= RLIM_NLIMITS
)
1988 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1990 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1992 old_rlim
= current
->signal
->rlim
+ resource
;
1993 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1994 !capable(CAP_SYS_RESOURCE
))
1996 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1999 retval
= security_task_setrlimit(resource
, &new_rlim
);
2003 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
== 0) {
2005 * The caller is asking for an immediate RLIMIT_CPU
2006 * expiry. But we use the zero value to mean "it was
2007 * never set". So let's cheat and make it one second
2010 new_rlim
.rlim_cur
= 1;
2013 task_lock(current
->group_leader
);
2014 *old_rlim
= new_rlim
;
2015 task_unlock(current
->group_leader
);
2017 if (resource
!= RLIMIT_CPU
)
2021 * RLIMIT_CPU handling. Note that the kernel fails to return an error
2022 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
2023 * very long-standing error, and fixing it now risks breakage of
2024 * applications, so we live with it
2026 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
2029 it_prof_secs
= cputime_to_secs(current
->signal
->it_prof_expires
);
2030 if (it_prof_secs
== 0 || new_rlim
.rlim_cur
<= it_prof_secs
) {
2031 unsigned long rlim_cur
= new_rlim
.rlim_cur
;
2034 cputime
= secs_to_cputime(rlim_cur
);
2035 read_lock(&tasklist_lock
);
2036 spin_lock_irq(¤t
->sighand
->siglock
);
2037 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
2038 spin_unlock_irq(¤t
->sighand
->siglock
);
2039 read_unlock(&tasklist_lock
);
2046 * It would make sense to put struct rusage in the task_struct,
2047 * except that would make the task_struct be *really big*. After
2048 * task_struct gets moved into malloc'ed memory, it would
2049 * make sense to do this. It will make moving the rest of the information
2050 * a lot simpler! (Which we're not doing right now because we're not
2051 * measuring them yet).
2053 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
2054 * races with threads incrementing their own counters. But since word
2055 * reads are atomic, we either get new values or old values and we don't
2056 * care which for the sums. We always take the siglock to protect reading
2057 * the c* fields from p->signal from races with exit.c updating those
2058 * fields when reaping, so a sample either gets all the additions of a
2059 * given child after it's reaped, or none so this sample is before reaping.
2062 * We need to take the siglock for CHILDEREN, SELF and BOTH
2063 * for the cases current multithreaded, non-current single threaded
2064 * non-current multithreaded. Thread traversal is now safe with
2066 * Strictly speaking, we donot need to take the siglock if we are current and
2067 * single threaded, as no one else can take our signal_struct away, no one
2068 * else can reap the children to update signal->c* counters, and no one else
2069 * can race with the signal-> fields. If we do not take any lock, the
2070 * signal-> fields could be read out of order while another thread was just
2071 * exiting. So we should place a read memory barrier when we avoid the lock.
2072 * On the writer side, write memory barrier is implied in __exit_signal
2073 * as __exit_signal releases the siglock spinlock after updating the signal->
2074 * fields. But we don't do this yet to keep things simple.
2078 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
2080 struct task_struct
*t
;
2081 unsigned long flags
;
2082 cputime_t utime
, stime
;
2084 memset((char *) r
, 0, sizeof *r
);
2085 utime
= stime
= cputime_zero
;
2088 if (!lock_task_sighand(p
, &flags
)) {
2095 case RUSAGE_CHILDREN
:
2096 utime
= p
->signal
->cutime
;
2097 stime
= p
->signal
->cstime
;
2098 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
2099 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
2100 r
->ru_minflt
= p
->signal
->cmin_flt
;
2101 r
->ru_majflt
= p
->signal
->cmaj_flt
;
2102 r
->ru_inblock
= p
->signal
->cinblock
;
2103 r
->ru_oublock
= p
->signal
->coublock
;
2105 if (who
== RUSAGE_CHILDREN
)
2109 utime
= cputime_add(utime
, p
->signal
->utime
);
2110 stime
= cputime_add(stime
, p
->signal
->stime
);
2111 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
2112 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
2113 r
->ru_minflt
+= p
->signal
->min_flt
;
2114 r
->ru_majflt
+= p
->signal
->maj_flt
;
2115 r
->ru_inblock
+= p
->signal
->inblock
;
2116 r
->ru_oublock
+= p
->signal
->oublock
;
2119 utime
= cputime_add(utime
, t
->utime
);
2120 stime
= cputime_add(stime
, t
->stime
);
2121 r
->ru_nvcsw
+= t
->nvcsw
;
2122 r
->ru_nivcsw
+= t
->nivcsw
;
2123 r
->ru_minflt
+= t
->min_flt
;
2124 r
->ru_majflt
+= t
->maj_flt
;
2125 r
->ru_inblock
+= task_io_get_inblock(t
);
2126 r
->ru_oublock
+= task_io_get_oublock(t
);
2135 unlock_task_sighand(p
, &flags
);
2138 cputime_to_timeval(utime
, &r
->ru_utime
);
2139 cputime_to_timeval(stime
, &r
->ru_stime
);
2142 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
2145 k_getrusage(p
, who
, &r
);
2146 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
2149 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
2151 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
2153 return getrusage(current
, who
, ru
);
2156 asmlinkage
long sys_umask(int mask
)
2158 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
2162 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
2163 unsigned long arg4
, unsigned long arg5
)
2167 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2172 case PR_SET_PDEATHSIG
:
2173 if (!valid_signal(arg2
)) {
2177 current
->pdeath_signal
= arg2
;
2179 case PR_GET_PDEATHSIG
:
2180 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
2182 case PR_GET_DUMPABLE
:
2183 error
= get_dumpable(current
->mm
);
2185 case PR_SET_DUMPABLE
:
2186 if (arg2
< 0 || arg2
> 1) {
2190 set_dumpable(current
->mm
, arg2
);
2193 case PR_SET_UNALIGN
:
2194 error
= SET_UNALIGN_CTL(current
, arg2
);
2196 case PR_GET_UNALIGN
:
2197 error
= GET_UNALIGN_CTL(current
, arg2
);
2200 error
= SET_FPEMU_CTL(current
, arg2
);
2203 error
= GET_FPEMU_CTL(current
, arg2
);
2206 error
= SET_FPEXC_CTL(current
, arg2
);
2209 error
= GET_FPEXC_CTL(current
, arg2
);
2212 error
= PR_TIMING_STATISTICAL
;
2215 if (arg2
== PR_TIMING_STATISTICAL
)
2221 case PR_GET_KEEPCAPS
:
2222 if (current
->keep_capabilities
)
2225 case PR_SET_KEEPCAPS
:
2226 if (arg2
!= 0 && arg2
!= 1) {
2230 current
->keep_capabilities
= arg2
;
2233 struct task_struct
*me
= current
;
2234 unsigned char ncomm
[sizeof(me
->comm
)];
2236 ncomm
[sizeof(me
->comm
)-1] = 0;
2237 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
2238 sizeof(me
->comm
)-1) < 0)
2240 set_task_comm(me
, ncomm
);
2244 struct task_struct
*me
= current
;
2245 unsigned char tcomm
[sizeof(me
->comm
)];
2247 get_task_comm(tcomm
, me
);
2248 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))
2253 error
= GET_ENDIAN(current
, arg2
);
2256 error
= SET_ENDIAN(current
, arg2
);
2259 case PR_GET_SECCOMP
:
2260 error
= prctl_get_seccomp();
2262 case PR_SET_SECCOMP
:
2263 error
= prctl_set_seccomp(arg2
);
2273 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
2274 struct getcpu_cache __user
*cache
)
2277 int cpu
= raw_smp_processor_id();
2279 err
|= put_user(cpu
, cpup
);
2281 err
|= put_user(cpu_to_node(cpu
), nodep
);
2284 * The cache is not needed for this implementation,
2285 * but make sure user programs pass something
2286 * valid. vsyscall implementations can instead make
2287 * good use of the cache. Only use t0 and t1 because
2288 * these are available in both 32bit and 64bit ABI (no
2289 * need for a compat_getcpu). 32bit has enough
2292 unsigned long t0
, t1
;
2293 get_user(t0
, &cache
->blob
[0]);
2294 get_user(t1
, &cache
->blob
[1]);
2297 put_user(t0
, &cache
->blob
[0]);
2298 put_user(t1
, &cache
->blob
[1]);
2300 return err
? -EFAULT
: 0;
2303 char poweroff_cmd
[POWEROFF_CMD_PATH_LEN
] = "/sbin/poweroff";
2305 static void argv_cleanup(char **argv
, char **envp
)
2311 * orderly_poweroff - Trigger an orderly system poweroff
2312 * @force: force poweroff if command execution fails
2314 * This may be called from any context to trigger a system shutdown.
2315 * If the orderly shutdown fails, it will force an immediate shutdown.
2317 int orderly_poweroff(bool force
)
2320 char **argv
= argv_split(GFP_ATOMIC
, poweroff_cmd
, &argc
);
2321 static char *envp
[] = {
2323 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2327 struct subprocess_info
*info
;
2330 printk(KERN_WARNING
"%s failed to allocate memory for \"%s\"\n",
2331 __func__
, poweroff_cmd
);
2335 info
= call_usermodehelper_setup(argv
[0], argv
, envp
);
2341 call_usermodehelper_setcleanup(info
, argv_cleanup
);
2343 ret
= call_usermodehelper_exec(info
, UMH_NO_WAIT
);
2347 printk(KERN_WARNING
"Failed to start orderly shutdown: "
2348 "forcing the issue\n");
2350 /* I guess this should try to kick off some daemon to
2351 sync and poweroff asap. Or not even bother syncing
2352 if we're doing an emergency shutdown? */
2359 EXPORT_SYMBOL_GPL(orderly_poweroff
);