4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
36 #include <linux/compat.h>
37 #include <linux/syscalls.h>
38 #include <linux/kprobes.h>
39 #include <linux/user_namespace.h>
41 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
45 #ifndef SET_UNALIGN_CTL
46 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
48 #ifndef GET_UNALIGN_CTL
49 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
52 # define SET_FPEMU_CTL(a,b) (-EINVAL)
55 # define GET_FPEMU_CTL(a,b) (-EINVAL)
58 # define SET_FPEXC_CTL(a,b) (-EINVAL)
61 # define GET_FPEXC_CTL(a,b) (-EINVAL)
64 # define GET_ENDIAN(a,b) (-EINVAL)
67 # define SET_ENDIAN(a,b) (-EINVAL)
71 * this is where the system-wide overflow UID and GID are defined, for
72 * architectures that now have 32-bit UID/GID but didn't in the past
75 int overflowuid
= DEFAULT_OVERFLOWUID
;
76 int overflowgid
= DEFAULT_OVERFLOWGID
;
79 EXPORT_SYMBOL(overflowuid
);
80 EXPORT_SYMBOL(overflowgid
);
84 * the same as above, but for filesystems which can only store a 16-bit
85 * UID and GID. as such, this is needed on all architectures
88 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
89 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
91 EXPORT_SYMBOL(fs_overflowuid
);
92 EXPORT_SYMBOL(fs_overflowgid
);
95 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
100 EXPORT_SYMBOL(cad_pid
);
103 * Notifier list for kernel code which wants to be called
104 * at shutdown. This is used to stop any idling DMA operations
108 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list
);
111 * Notifier chain core routines. The exported routines below
112 * are layered on top of these, with appropriate locking added.
115 static int notifier_chain_register(struct notifier_block
**nl
,
116 struct notifier_block
*n
)
118 while ((*nl
) != NULL
) {
119 if (n
->priority
> (*nl
)->priority
)
124 rcu_assign_pointer(*nl
, n
);
128 static int notifier_chain_unregister(struct notifier_block
**nl
,
129 struct notifier_block
*n
)
131 while ((*nl
) != NULL
) {
133 rcu_assign_pointer(*nl
, n
->next
);
142 * notifier_call_chain - Informs the registered notifiers about an event.
143 * @nl: Pointer to head of the blocking notifier chain
144 * @val: Value passed unmodified to notifier function
145 * @v: Pointer passed unmodified to notifier function
146 * @nr_to_call: Number of notifier functions to be called. Don't care
147 * value of this parameter is -1.
148 * @nr_calls: Records the number of notifications sent. Don't care
149 * value of this field is NULL.
150 * @returns: notifier_call_chain returns the value returned by the
151 * last notifier function called.
154 static int __kprobes
notifier_call_chain(struct notifier_block
**nl
,
155 unsigned long val
, void *v
,
156 int nr_to_call
, int *nr_calls
)
158 int ret
= NOTIFY_DONE
;
159 struct notifier_block
*nb
, *next_nb
;
161 nb
= rcu_dereference(*nl
);
163 while (nb
&& nr_to_call
) {
164 next_nb
= rcu_dereference(nb
->next
);
165 ret
= nb
->notifier_call(nb
, val
, v
);
170 if ((ret
& NOTIFY_STOP_MASK
) == NOTIFY_STOP_MASK
)
179 * Atomic notifier chain routines. Registration and unregistration
180 * use a spinlock, and call_chain is synchronized by RCU (no locks).
184 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
185 * @nh: Pointer to head of the atomic notifier chain
186 * @n: New entry in notifier chain
188 * Adds a notifier to an atomic notifier chain.
190 * Currently always returns zero.
193 int atomic_notifier_chain_register(struct atomic_notifier_head
*nh
,
194 struct notifier_block
*n
)
199 spin_lock_irqsave(&nh
->lock
, flags
);
200 ret
= notifier_chain_register(&nh
->head
, n
);
201 spin_unlock_irqrestore(&nh
->lock
, flags
);
205 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register
);
208 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
209 * @nh: Pointer to head of the atomic notifier chain
210 * @n: Entry to remove from notifier chain
212 * Removes a notifier from an atomic notifier chain.
214 * Returns zero on success or %-ENOENT on failure.
216 int atomic_notifier_chain_unregister(struct atomic_notifier_head
*nh
,
217 struct notifier_block
*n
)
222 spin_lock_irqsave(&nh
->lock
, flags
);
223 ret
= notifier_chain_unregister(&nh
->head
, n
);
224 spin_unlock_irqrestore(&nh
->lock
, flags
);
229 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister
);
232 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
233 * @nh: Pointer to head of the atomic notifier chain
234 * @val: Value passed unmodified to notifier function
235 * @v: Pointer passed unmodified to notifier function
236 * @nr_to_call: See the comment for notifier_call_chain.
237 * @nr_calls: See the comment for notifier_call_chain.
239 * Calls each function in a notifier chain in turn. The functions
240 * run in an atomic context, so they must not block.
241 * This routine uses RCU to synchronize with changes to the chain.
243 * If the return value of the notifier can be and'ed
244 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
245 * will return immediately, with the return value of
246 * the notifier function which halted execution.
247 * Otherwise the return value is the return value
248 * of the last notifier function called.
251 int __kprobes
__atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
252 unsigned long val
, void *v
,
253 int nr_to_call
, int *nr_calls
)
258 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
263 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain
);
265 int __kprobes
atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
266 unsigned long val
, void *v
)
268 return __atomic_notifier_call_chain(nh
, val
, v
, -1, NULL
);
271 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain
);
273 * Blocking notifier chain routines. All access to the chain is
274 * synchronized by an rwsem.
278 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
279 * @nh: Pointer to head of the blocking notifier chain
280 * @n: New entry in notifier chain
282 * Adds a notifier to a blocking notifier chain.
283 * Must be called in process context.
285 * Currently always returns zero.
288 int blocking_notifier_chain_register(struct blocking_notifier_head
*nh
,
289 struct notifier_block
*n
)
294 * This code gets used during boot-up, when task switching is
295 * not yet working and interrupts must remain disabled. At
296 * such times we must not call down_write().
298 if (unlikely(system_state
== SYSTEM_BOOTING
))
299 return notifier_chain_register(&nh
->head
, n
);
301 down_write(&nh
->rwsem
);
302 ret
= notifier_chain_register(&nh
->head
, n
);
303 up_write(&nh
->rwsem
);
307 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register
);
310 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
311 * @nh: Pointer to head of the blocking notifier chain
312 * @n: Entry to remove from notifier chain
314 * Removes a notifier from a blocking notifier chain.
315 * Must be called from process context.
317 * Returns zero on success or %-ENOENT on failure.
319 int blocking_notifier_chain_unregister(struct blocking_notifier_head
*nh
,
320 struct notifier_block
*n
)
325 * This code gets used during boot-up, when task switching is
326 * not yet working and interrupts must remain disabled. At
327 * such times we must not call down_write().
329 if (unlikely(system_state
== SYSTEM_BOOTING
))
330 return notifier_chain_unregister(&nh
->head
, n
);
332 down_write(&nh
->rwsem
);
333 ret
= notifier_chain_unregister(&nh
->head
, n
);
334 up_write(&nh
->rwsem
);
338 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister
);
341 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
342 * @nh: Pointer to head of the blocking notifier chain
343 * @val: Value passed unmodified to notifier function
344 * @v: Pointer passed unmodified to notifier function
345 * @nr_to_call: See comment for notifier_call_chain.
346 * @nr_calls: See comment for notifier_call_chain.
348 * Calls each function in a notifier chain in turn. The functions
349 * run in a process context, so they are allowed to block.
351 * If the return value of the notifier can be and'ed
352 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
353 * will return immediately, with the return value of
354 * the notifier function which halted execution.
355 * Otherwise the return value is the return value
356 * of the last notifier function called.
359 int __blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
360 unsigned long val
, void *v
,
361 int nr_to_call
, int *nr_calls
)
363 int ret
= NOTIFY_DONE
;
366 * We check the head outside the lock, but if this access is
367 * racy then it does not matter what the result of the test
368 * is, we re-check the list after having taken the lock anyway:
370 if (rcu_dereference(nh
->head
)) {
371 down_read(&nh
->rwsem
);
372 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
,
378 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain
);
380 int blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
381 unsigned long val
, void *v
)
383 return __blocking_notifier_call_chain(nh
, val
, v
, -1, NULL
);
385 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain
);
388 * Raw notifier chain routines. There is no protection;
389 * the caller must provide it. Use at your own risk!
393 * raw_notifier_chain_register - Add notifier to a raw notifier chain
394 * @nh: Pointer to head of the raw notifier chain
395 * @n: New entry in notifier chain
397 * Adds a notifier to a raw notifier chain.
398 * All locking must be provided by the caller.
400 * Currently always returns zero.
403 int raw_notifier_chain_register(struct raw_notifier_head
*nh
,
404 struct notifier_block
*n
)
406 return notifier_chain_register(&nh
->head
, n
);
409 EXPORT_SYMBOL_GPL(raw_notifier_chain_register
);
412 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
413 * @nh: Pointer to head of the raw notifier chain
414 * @n: Entry to remove from notifier chain
416 * Removes a notifier from a raw notifier chain.
417 * All locking must be provided by the caller.
419 * Returns zero on success or %-ENOENT on failure.
421 int raw_notifier_chain_unregister(struct raw_notifier_head
*nh
,
422 struct notifier_block
*n
)
424 return notifier_chain_unregister(&nh
->head
, n
);
427 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister
);
430 * __raw_notifier_call_chain - Call functions in a raw notifier chain
431 * @nh: Pointer to head of the raw notifier chain
432 * @val: Value passed unmodified to notifier function
433 * @v: Pointer passed unmodified to notifier function
434 * @nr_to_call: See comment for notifier_call_chain.
435 * @nr_calls: See comment for notifier_call_chain
437 * Calls each function in a notifier chain in turn. The functions
438 * run in an undefined context.
439 * All locking must be provided by the caller.
441 * If the return value of the notifier can be and'ed
442 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
443 * will return immediately, with the return value of
444 * the notifier function which halted execution.
445 * Otherwise the return value is the return value
446 * of the last notifier function called.
449 int __raw_notifier_call_chain(struct raw_notifier_head
*nh
,
450 unsigned long val
, void *v
,
451 int nr_to_call
, int *nr_calls
)
453 return notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
456 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain
);
458 int raw_notifier_call_chain(struct raw_notifier_head
*nh
,
459 unsigned long val
, void *v
)
461 return __raw_notifier_call_chain(nh
, val
, v
, -1, NULL
);
464 EXPORT_SYMBOL_GPL(raw_notifier_call_chain
);
467 * SRCU notifier chain routines. Registration and unregistration
468 * use a mutex, and call_chain is synchronized by SRCU (no locks).
472 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
473 * @nh: Pointer to head of the SRCU notifier chain
474 * @n: New entry in notifier chain
476 * Adds a notifier to an SRCU notifier chain.
477 * Must be called in process context.
479 * Currently always returns zero.
482 int srcu_notifier_chain_register(struct srcu_notifier_head
*nh
,
483 struct notifier_block
*n
)
488 * This code gets used during boot-up, when task switching is
489 * not yet working and interrupts must remain disabled. At
490 * such times we must not call mutex_lock().
492 if (unlikely(system_state
== SYSTEM_BOOTING
))
493 return notifier_chain_register(&nh
->head
, n
);
495 mutex_lock(&nh
->mutex
);
496 ret
= notifier_chain_register(&nh
->head
, n
);
497 mutex_unlock(&nh
->mutex
);
501 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register
);
504 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
505 * @nh: Pointer to head of the SRCU notifier chain
506 * @n: Entry to remove from notifier chain
508 * Removes a notifier from an SRCU notifier chain.
509 * Must be called from process context.
511 * Returns zero on success or %-ENOENT on failure.
513 int srcu_notifier_chain_unregister(struct srcu_notifier_head
*nh
,
514 struct notifier_block
*n
)
519 * This code gets used during boot-up, when task switching is
520 * not yet working and interrupts must remain disabled. At
521 * such times we must not call mutex_lock().
523 if (unlikely(system_state
== SYSTEM_BOOTING
))
524 return notifier_chain_unregister(&nh
->head
, n
);
526 mutex_lock(&nh
->mutex
);
527 ret
= notifier_chain_unregister(&nh
->head
, n
);
528 mutex_unlock(&nh
->mutex
);
529 synchronize_srcu(&nh
->srcu
);
533 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister
);
536 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
537 * @nh: Pointer to head of the SRCU notifier chain
538 * @val: Value passed unmodified to notifier function
539 * @v: Pointer passed unmodified to notifier function
540 * @nr_to_call: See comment for notifier_call_chain.
541 * @nr_calls: See comment for notifier_call_chain
543 * Calls each function in a notifier chain in turn. The functions
544 * run in a process context, so they are allowed to block.
546 * If the return value of the notifier can be and'ed
547 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
548 * will return immediately, with the return value of
549 * the notifier function which halted execution.
550 * Otherwise the return value is the return value
551 * of the last notifier function called.
554 int __srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
555 unsigned long val
, void *v
,
556 int nr_to_call
, int *nr_calls
)
561 idx
= srcu_read_lock(&nh
->srcu
);
562 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
563 srcu_read_unlock(&nh
->srcu
, idx
);
566 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain
);
568 int srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
569 unsigned long val
, void *v
)
571 return __srcu_notifier_call_chain(nh
, val
, v
, -1, NULL
);
573 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain
);
576 * srcu_init_notifier_head - Initialize an SRCU notifier head
577 * @nh: Pointer to head of the srcu notifier chain
579 * Unlike other sorts of notifier heads, SRCU notifier heads require
580 * dynamic initialization. Be sure to call this routine before
581 * calling any of the other SRCU notifier routines for this head.
583 * If an SRCU notifier head is deallocated, it must first be cleaned
584 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
585 * per-cpu data (used by the SRCU mechanism) will leak.
588 void srcu_init_notifier_head(struct srcu_notifier_head
*nh
)
590 mutex_init(&nh
->mutex
);
591 if (init_srcu_struct(&nh
->srcu
) < 0)
596 EXPORT_SYMBOL_GPL(srcu_init_notifier_head
);
599 * register_reboot_notifier - Register function to be called at reboot time
600 * @nb: Info about notifier function to be called
602 * Registers a function with the list of functions
603 * to be called at reboot time.
605 * Currently always returns zero, as blocking_notifier_chain_register()
606 * always returns zero.
609 int register_reboot_notifier(struct notifier_block
* nb
)
611 return blocking_notifier_chain_register(&reboot_notifier_list
, nb
);
614 EXPORT_SYMBOL(register_reboot_notifier
);
617 * unregister_reboot_notifier - Unregister previously registered reboot notifier
618 * @nb: Hook to be unregistered
620 * Unregisters a previously registered reboot
623 * Returns zero on success, or %-ENOENT on failure.
626 int unregister_reboot_notifier(struct notifier_block
* nb
)
628 return blocking_notifier_chain_unregister(&reboot_notifier_list
, nb
);
631 EXPORT_SYMBOL(unregister_reboot_notifier
);
633 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
637 if (p
->uid
!= current
->euid
&&
638 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
642 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
646 no_nice
= security_task_setnice(p
, niceval
);
653 set_user_nice(p
, niceval
);
658 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
660 struct task_struct
*g
, *p
;
661 struct user_struct
*user
;
665 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
668 /* normalize: avoid signed division (rounding problems) */
675 read_lock(&tasklist_lock
);
679 p
= find_task_by_pid(who
);
683 error
= set_one_prio(p
, niceval
, error
);
687 pgrp
= find_pid(who
);
689 pgrp
= task_pgrp(current
);
690 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
691 error
= set_one_prio(p
, niceval
, error
);
692 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
695 user
= current
->user
;
699 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
700 goto out_unlock
; /* No processes for this user */
704 error
= set_one_prio(p
, niceval
, error
);
705 while_each_thread(g
, p
);
706 if (who
!= current
->uid
)
707 free_uid(user
); /* For find_user() */
711 read_unlock(&tasklist_lock
);
717 * Ugh. To avoid negative return values, "getpriority()" will
718 * not return the normal nice-value, but a negated value that
719 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
720 * to stay compatible.
722 asmlinkage
long sys_getpriority(int which
, int who
)
724 struct task_struct
*g
, *p
;
725 struct user_struct
*user
;
726 long niceval
, retval
= -ESRCH
;
729 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
732 read_lock(&tasklist_lock
);
736 p
= find_task_by_pid(who
);
740 niceval
= 20 - task_nice(p
);
741 if (niceval
> retval
)
747 pgrp
= find_pid(who
);
749 pgrp
= task_pgrp(current
);
750 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
751 niceval
= 20 - task_nice(p
);
752 if (niceval
> retval
)
754 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
757 user
= current
->user
;
761 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
762 goto out_unlock
; /* No processes for this user */
766 niceval
= 20 - task_nice(p
);
767 if (niceval
> retval
)
770 while_each_thread(g
, p
);
771 if (who
!= current
->uid
)
772 free_uid(user
); /* for find_user() */
776 read_unlock(&tasklist_lock
);
782 * emergency_restart - reboot the system
784 * Without shutting down any hardware or taking any locks
785 * reboot the system. This is called when we know we are in
786 * trouble so this is our best effort to reboot. This is
787 * safe to call in interrupt context.
789 void emergency_restart(void)
791 machine_emergency_restart();
793 EXPORT_SYMBOL_GPL(emergency_restart
);
795 static void kernel_restart_prepare(char *cmd
)
797 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
798 system_state
= SYSTEM_RESTART
;
803 * kernel_restart - reboot the system
804 * @cmd: pointer to buffer containing command to execute for restart
807 * Shutdown everything and perform a clean reboot.
808 * This is not safe to call in interrupt context.
810 void kernel_restart(char *cmd
)
812 kernel_restart_prepare(cmd
);
814 printk(KERN_EMERG
"Restarting system.\n");
816 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
817 machine_restart(cmd
);
819 EXPORT_SYMBOL_GPL(kernel_restart
);
822 * kernel_kexec - reboot the system
824 * Move into place and start executing a preloaded standalone
825 * executable. If nothing was preloaded return an error.
827 static void kernel_kexec(void)
830 struct kimage
*image
;
831 image
= xchg(&kexec_image
, NULL
);
834 kernel_restart_prepare(NULL
);
835 printk(KERN_EMERG
"Starting new kernel\n");
837 machine_kexec(image
);
841 void kernel_shutdown_prepare(enum system_states state
)
843 blocking_notifier_call_chain(&reboot_notifier_list
,
844 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
845 system_state
= state
;
849 * kernel_halt - halt the system
851 * Shutdown everything and perform a clean system halt.
853 void kernel_halt(void)
855 kernel_shutdown_prepare(SYSTEM_HALT
);
856 printk(KERN_EMERG
"System halted.\n");
860 EXPORT_SYMBOL_GPL(kernel_halt
);
863 * kernel_power_off - power_off the system
865 * Shutdown everything and perform a clean system power_off.
867 void kernel_power_off(void)
869 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
870 printk(KERN_EMERG
"Power down.\n");
873 EXPORT_SYMBOL_GPL(kernel_power_off
);
875 * Reboot system call: for obvious reasons only root may call it,
876 * and even root needs to set up some magic numbers in the registers
877 * so that some mistake won't make this reboot the whole machine.
878 * You can also set the meaning of the ctrl-alt-del-key here.
880 * reboot doesn't sync: do that yourself before calling this.
882 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
886 /* We only trust the superuser with rebooting the system. */
887 if (!capable(CAP_SYS_BOOT
))
890 /* For safety, we require "magic" arguments. */
891 if (magic1
!= LINUX_REBOOT_MAGIC1
||
892 (magic2
!= LINUX_REBOOT_MAGIC2
&&
893 magic2
!= LINUX_REBOOT_MAGIC2A
&&
894 magic2
!= LINUX_REBOOT_MAGIC2B
&&
895 magic2
!= LINUX_REBOOT_MAGIC2C
))
898 /* Instead of trying to make the power_off code look like
899 * halt when pm_power_off is not set do it the easy way.
901 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
902 cmd
= LINUX_REBOOT_CMD_HALT
;
906 case LINUX_REBOOT_CMD_RESTART
:
907 kernel_restart(NULL
);
910 case LINUX_REBOOT_CMD_CAD_ON
:
914 case LINUX_REBOOT_CMD_CAD_OFF
:
918 case LINUX_REBOOT_CMD_HALT
:
924 case LINUX_REBOOT_CMD_POWER_OFF
:
930 case LINUX_REBOOT_CMD_RESTART2
:
931 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
935 buffer
[sizeof(buffer
) - 1] = '\0';
937 kernel_restart(buffer
);
940 case LINUX_REBOOT_CMD_KEXEC
:
945 #ifdef CONFIG_SOFTWARE_SUSPEND
946 case LINUX_REBOOT_CMD_SW_SUSPEND
:
948 int ret
= hibernate();
962 static void deferred_cad(struct work_struct
*dummy
)
964 kernel_restart(NULL
);
968 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
969 * As it's called within an interrupt, it may NOT sync: the only choice
970 * is whether to reboot at once, or just ignore the ctrl-alt-del.
972 void ctrl_alt_del(void)
974 static DECLARE_WORK(cad_work
, deferred_cad
);
977 schedule_work(&cad_work
);
979 kill_cad_pid(SIGINT
, 1);
983 * Unprivileged users may change the real gid to the effective gid
984 * or vice versa. (BSD-style)
986 * If you set the real gid at all, or set the effective gid to a value not
987 * equal to the real gid, then the saved gid is set to the new effective gid.
989 * This makes it possible for a setgid program to completely drop its
990 * privileges, which is often a useful assertion to make when you are doing
991 * a security audit over a program.
993 * The general idea is that a program which uses just setregid() will be
994 * 100% compatible with BSD. A program which uses just setgid() will be
995 * 100% compatible with POSIX with saved IDs.
997 * SMP: There are not races, the GIDs are checked only by filesystem
998 * operations (as far as semantic preservation is concerned).
1000 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
1002 int old_rgid
= current
->gid
;
1003 int old_egid
= current
->egid
;
1004 int new_rgid
= old_rgid
;
1005 int new_egid
= old_egid
;
1008 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
1012 if (rgid
!= (gid_t
) -1) {
1013 if ((old_rgid
== rgid
) ||
1014 (current
->egid
==rgid
) ||
1015 capable(CAP_SETGID
))
1020 if (egid
!= (gid_t
) -1) {
1021 if ((old_rgid
== egid
) ||
1022 (current
->egid
== egid
) ||
1023 (current
->sgid
== egid
) ||
1024 capable(CAP_SETGID
))
1029 if (new_egid
!= old_egid
) {
1030 current
->mm
->dumpable
= suid_dumpable
;
1033 if (rgid
!= (gid_t
) -1 ||
1034 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
1035 current
->sgid
= new_egid
;
1036 current
->fsgid
= new_egid
;
1037 current
->egid
= new_egid
;
1038 current
->gid
= new_rgid
;
1039 key_fsgid_changed(current
);
1040 proc_id_connector(current
, PROC_EVENT_GID
);
1045 * setgid() is implemented like SysV w/ SAVED_IDS
1047 * SMP: Same implicit races as above.
1049 asmlinkage
long sys_setgid(gid_t gid
)
1051 int old_egid
= current
->egid
;
1054 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
1058 if (capable(CAP_SETGID
)) {
1059 if (old_egid
!= gid
) {
1060 current
->mm
->dumpable
= suid_dumpable
;
1063 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
1064 } else if ((gid
== current
->gid
) || (gid
== current
->sgid
)) {
1065 if (old_egid
!= gid
) {
1066 current
->mm
->dumpable
= suid_dumpable
;
1069 current
->egid
= current
->fsgid
= gid
;
1074 key_fsgid_changed(current
);
1075 proc_id_connector(current
, PROC_EVENT_GID
);
1079 static int set_user(uid_t new_ruid
, int dumpclear
)
1081 struct user_struct
*new_user
;
1083 new_user
= alloc_uid(current
->nsproxy
->user_ns
, new_ruid
);
1087 if (atomic_read(&new_user
->processes
) >=
1088 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
1089 new_user
!= current
->nsproxy
->user_ns
->root_user
) {
1094 switch_uid(new_user
);
1097 current
->mm
->dumpable
= suid_dumpable
;
1100 current
->uid
= new_ruid
;
1105 * Unprivileged users may change the real uid to the effective uid
1106 * or vice versa. (BSD-style)
1108 * If you set the real uid at all, or set the effective uid to a value not
1109 * equal to the real uid, then the saved uid is set to the new effective uid.
1111 * This makes it possible for a setuid program to completely drop its
1112 * privileges, which is often a useful assertion to make when you are doing
1113 * a security audit over a program.
1115 * The general idea is that a program which uses just setreuid() will be
1116 * 100% compatible with BSD. A program which uses just setuid() will be
1117 * 100% compatible with POSIX with saved IDs.
1119 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
1121 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
1124 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
1128 new_ruid
= old_ruid
= current
->uid
;
1129 new_euid
= old_euid
= current
->euid
;
1130 old_suid
= current
->suid
;
1132 if (ruid
!= (uid_t
) -1) {
1134 if ((old_ruid
!= ruid
) &&
1135 (current
->euid
!= ruid
) &&
1136 !capable(CAP_SETUID
))
1140 if (euid
!= (uid_t
) -1) {
1142 if ((old_ruid
!= euid
) &&
1143 (current
->euid
!= euid
) &&
1144 (current
->suid
!= euid
) &&
1145 !capable(CAP_SETUID
))
1149 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
1152 if (new_euid
!= old_euid
) {
1153 current
->mm
->dumpable
= suid_dumpable
;
1156 current
->fsuid
= current
->euid
= new_euid
;
1157 if (ruid
!= (uid_t
) -1 ||
1158 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
1159 current
->suid
= current
->euid
;
1160 current
->fsuid
= current
->euid
;
1162 key_fsuid_changed(current
);
1163 proc_id_connector(current
, PROC_EVENT_UID
);
1165 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
1171 * setuid() is implemented like SysV with SAVED_IDS
1173 * Note that SAVED_ID's is deficient in that a setuid root program
1174 * like sendmail, for example, cannot set its uid to be a normal
1175 * user and then switch back, because if you're root, setuid() sets
1176 * the saved uid too. If you don't like this, blame the bright people
1177 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1178 * will allow a root program to temporarily drop privileges and be able to
1179 * regain them by swapping the real and effective uid.
1181 asmlinkage
long sys_setuid(uid_t uid
)
1183 int old_euid
= current
->euid
;
1184 int old_ruid
, old_suid
, new_suid
;
1187 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
1191 old_ruid
= current
->uid
;
1192 old_suid
= current
->suid
;
1193 new_suid
= old_suid
;
1195 if (capable(CAP_SETUID
)) {
1196 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
1199 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
1202 if (old_euid
!= uid
) {
1203 current
->mm
->dumpable
= suid_dumpable
;
1206 current
->fsuid
= current
->euid
= uid
;
1207 current
->suid
= new_suid
;
1209 key_fsuid_changed(current
);
1210 proc_id_connector(current
, PROC_EVENT_UID
);
1212 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
1217 * This function implements a generic ability to update ruid, euid,
1218 * and suid. This allows you to implement the 4.4 compatible seteuid().
1220 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
1222 int old_ruid
= current
->uid
;
1223 int old_euid
= current
->euid
;
1224 int old_suid
= current
->suid
;
1227 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
1231 if (!capable(CAP_SETUID
)) {
1232 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
1233 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
1235 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
1236 (euid
!= current
->euid
) && (euid
!= current
->suid
))
1238 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
1239 (suid
!= current
->euid
) && (suid
!= current
->suid
))
1242 if (ruid
!= (uid_t
) -1) {
1243 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
1246 if (euid
!= (uid_t
) -1) {
1247 if (euid
!= current
->euid
) {
1248 current
->mm
->dumpable
= suid_dumpable
;
1251 current
->euid
= euid
;
1253 current
->fsuid
= current
->euid
;
1254 if (suid
!= (uid_t
) -1)
1255 current
->suid
= suid
;
1257 key_fsuid_changed(current
);
1258 proc_id_connector(current
, PROC_EVENT_UID
);
1260 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
1263 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
1267 if (!(retval
= put_user(current
->uid
, ruid
)) &&
1268 !(retval
= put_user(current
->euid
, euid
)))
1269 retval
= put_user(current
->suid
, suid
);
1275 * Same as above, but for rgid, egid, sgid.
1277 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
1281 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
1285 if (!capable(CAP_SETGID
)) {
1286 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
1287 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
1289 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
1290 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
1292 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
1293 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
1296 if (egid
!= (gid_t
) -1) {
1297 if (egid
!= current
->egid
) {
1298 current
->mm
->dumpable
= suid_dumpable
;
1301 current
->egid
= egid
;
1303 current
->fsgid
= current
->egid
;
1304 if (rgid
!= (gid_t
) -1)
1305 current
->gid
= rgid
;
1306 if (sgid
!= (gid_t
) -1)
1307 current
->sgid
= sgid
;
1309 key_fsgid_changed(current
);
1310 proc_id_connector(current
, PROC_EVENT_GID
);
1314 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
1318 if (!(retval
= put_user(current
->gid
, rgid
)) &&
1319 !(retval
= put_user(current
->egid
, egid
)))
1320 retval
= put_user(current
->sgid
, sgid
);
1327 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1328 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1329 * whatever uid it wants to). It normally shadows "euid", except when
1330 * explicitly set by setfsuid() or for access..
1332 asmlinkage
long sys_setfsuid(uid_t uid
)
1336 old_fsuid
= current
->fsuid
;
1337 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
1340 if (uid
== current
->uid
|| uid
== current
->euid
||
1341 uid
== current
->suid
|| uid
== current
->fsuid
||
1342 capable(CAP_SETUID
)) {
1343 if (uid
!= old_fsuid
) {
1344 current
->mm
->dumpable
= suid_dumpable
;
1347 current
->fsuid
= uid
;
1350 key_fsuid_changed(current
);
1351 proc_id_connector(current
, PROC_EVENT_UID
);
1353 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
1359 * Samma på svenska..
1361 asmlinkage
long sys_setfsgid(gid_t gid
)
1365 old_fsgid
= current
->fsgid
;
1366 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
1369 if (gid
== current
->gid
|| gid
== current
->egid
||
1370 gid
== current
->sgid
|| gid
== current
->fsgid
||
1371 capable(CAP_SETGID
)) {
1372 if (gid
!= old_fsgid
) {
1373 current
->mm
->dumpable
= suid_dumpable
;
1376 current
->fsgid
= gid
;
1377 key_fsgid_changed(current
);
1378 proc_id_connector(current
, PROC_EVENT_GID
);
1383 asmlinkage
long sys_times(struct tms __user
* tbuf
)
1386 * In the SMP world we might just be unlucky and have one of
1387 * the times increment as we use it. Since the value is an
1388 * atomically safe type this is just fine. Conceptually its
1389 * as if the syscall took an instant longer to occur.
1393 struct task_struct
*tsk
= current
;
1394 struct task_struct
*t
;
1395 cputime_t utime
, stime
, cutime
, cstime
;
1397 spin_lock_irq(&tsk
->sighand
->siglock
);
1398 utime
= tsk
->signal
->utime
;
1399 stime
= tsk
->signal
->stime
;
1402 utime
= cputime_add(utime
, t
->utime
);
1403 stime
= cputime_add(stime
, t
->stime
);
1407 cutime
= tsk
->signal
->cutime
;
1408 cstime
= tsk
->signal
->cstime
;
1409 spin_unlock_irq(&tsk
->sighand
->siglock
);
1411 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1412 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1413 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1414 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1415 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1418 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1422 * This needs some heavy checking ...
1423 * I just haven't the stomach for it. I also don't fully
1424 * understand sessions/pgrp etc. Let somebody who does explain it.
1426 * OK, I think I have the protection semantics right.... this is really
1427 * only important on a multi-user system anyway, to make sure one user
1428 * can't send a signal to a process owned by another. -TYT, 12/12/91
1430 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1434 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1436 struct task_struct
*p
;
1437 struct task_struct
*group_leader
= current
->group_leader
;
1441 pid
= group_leader
->pid
;
1447 /* From this point forward we keep holding onto the tasklist lock
1448 * so that our parent does not change from under us. -DaveM
1450 write_lock_irq(&tasklist_lock
);
1453 p
= find_task_by_pid(pid
);
1458 if (!thread_group_leader(p
))
1461 if (p
->real_parent
== group_leader
) {
1463 if (task_session(p
) != task_session(group_leader
))
1470 if (p
!= group_leader
)
1475 if (p
->signal
->leader
)
1479 struct task_struct
*g
=
1480 find_task_by_pid_type(PIDTYPE_PGID
, pgid
);
1482 if (!g
|| task_session(g
) != task_session(group_leader
))
1486 err
= security_task_setpgid(p
, pgid
);
1490 if (process_group(p
) != pgid
) {
1491 detach_pid(p
, PIDTYPE_PGID
);
1492 p
->signal
->pgrp
= pgid
;
1493 attach_pid(p
, PIDTYPE_PGID
, find_pid(pgid
));
1498 /* All paths lead to here, thus we are safe. -DaveM */
1499 write_unlock_irq(&tasklist_lock
);
1503 asmlinkage
long sys_getpgid(pid_t pid
)
1506 return process_group(current
);
1509 struct task_struct
*p
;
1511 read_lock(&tasklist_lock
);
1512 p
= find_task_by_pid(pid
);
1516 retval
= security_task_getpgid(p
);
1518 retval
= process_group(p
);
1520 read_unlock(&tasklist_lock
);
1525 #ifdef __ARCH_WANT_SYS_GETPGRP
1527 asmlinkage
long sys_getpgrp(void)
1529 /* SMP - assuming writes are word atomic this is fine */
1530 return process_group(current
);
1535 asmlinkage
long sys_getsid(pid_t pid
)
1538 return process_session(current
);
1541 struct task_struct
*p
;
1543 read_lock(&tasklist_lock
);
1544 p
= find_task_by_pid(pid
);
1548 retval
= security_task_getsid(p
);
1550 retval
= process_session(p
);
1552 read_unlock(&tasklist_lock
);
1557 asmlinkage
long sys_setsid(void)
1559 struct task_struct
*group_leader
= current
->group_leader
;
1563 write_lock_irq(&tasklist_lock
);
1565 /* Fail if I am already a session leader */
1566 if (group_leader
->signal
->leader
)
1569 session
= group_leader
->pid
;
1570 /* Fail if a process group id already exists that equals the
1571 * proposed session id.
1573 * Don't check if session id == 1 because kernel threads use this
1574 * session id and so the check will always fail and make it so
1575 * init cannot successfully call setsid.
1577 if (session
> 1 && find_task_by_pid_type(PIDTYPE_PGID
, session
))
1580 group_leader
->signal
->leader
= 1;
1581 __set_special_pids(session
, session
);
1583 spin_lock(&group_leader
->sighand
->siglock
);
1584 group_leader
->signal
->tty
= NULL
;
1585 spin_unlock(&group_leader
->sighand
->siglock
);
1587 err
= process_group(group_leader
);
1589 write_unlock_irq(&tasklist_lock
);
1594 * Supplementary group IDs
1597 /* init to 2 - one for init_task, one to ensure it is never freed */
1598 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1600 struct group_info
*groups_alloc(int gidsetsize
)
1602 struct group_info
*group_info
;
1606 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1607 /* Make sure we always allocate at least one indirect block pointer */
1608 nblocks
= nblocks
? : 1;
1609 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1612 group_info
->ngroups
= gidsetsize
;
1613 group_info
->nblocks
= nblocks
;
1614 atomic_set(&group_info
->usage
, 1);
1616 if (gidsetsize
<= NGROUPS_SMALL
)
1617 group_info
->blocks
[0] = group_info
->small_block
;
1619 for (i
= 0; i
< nblocks
; i
++) {
1621 b
= (void *)__get_free_page(GFP_USER
);
1623 goto out_undo_partial_alloc
;
1624 group_info
->blocks
[i
] = b
;
1629 out_undo_partial_alloc
:
1631 free_page((unsigned long)group_info
->blocks
[i
]);
1637 EXPORT_SYMBOL(groups_alloc
);
1639 void groups_free(struct group_info
*group_info
)
1641 if (group_info
->blocks
[0] != group_info
->small_block
) {
1643 for (i
= 0; i
< group_info
->nblocks
; i
++)
1644 free_page((unsigned long)group_info
->blocks
[i
]);
1649 EXPORT_SYMBOL(groups_free
);
1651 /* export the group_info to a user-space array */
1652 static int groups_to_user(gid_t __user
*grouplist
,
1653 struct group_info
*group_info
)
1656 int count
= group_info
->ngroups
;
1658 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1659 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1660 int off
= i
* NGROUPS_PER_BLOCK
;
1661 int len
= cp_count
* sizeof(*grouplist
);
1663 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1671 /* fill a group_info from a user-space array - it must be allocated already */
1672 static int groups_from_user(struct group_info
*group_info
,
1673 gid_t __user
*grouplist
)
1676 int count
= group_info
->ngroups
;
1678 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1679 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1680 int off
= i
* NGROUPS_PER_BLOCK
;
1681 int len
= cp_count
* sizeof(*grouplist
);
1683 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1691 /* a simple Shell sort */
1692 static void groups_sort(struct group_info
*group_info
)
1694 int base
, max
, stride
;
1695 int gidsetsize
= group_info
->ngroups
;
1697 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1702 max
= gidsetsize
- stride
;
1703 for (base
= 0; base
< max
; base
++) {
1705 int right
= left
+ stride
;
1706 gid_t tmp
= GROUP_AT(group_info
, right
);
1708 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1709 GROUP_AT(group_info
, right
) =
1710 GROUP_AT(group_info
, left
);
1714 GROUP_AT(group_info
, right
) = tmp
;
1720 /* a simple bsearch */
1721 int groups_search(struct group_info
*group_info
, gid_t grp
)
1723 unsigned int left
, right
;
1729 right
= group_info
->ngroups
;
1730 while (left
< right
) {
1731 unsigned int mid
= (left
+right
)/2;
1732 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1743 /* validate and set current->group_info */
1744 int set_current_groups(struct group_info
*group_info
)
1747 struct group_info
*old_info
;
1749 retval
= security_task_setgroups(group_info
);
1753 groups_sort(group_info
);
1754 get_group_info(group_info
);
1757 old_info
= current
->group_info
;
1758 current
->group_info
= group_info
;
1759 task_unlock(current
);
1761 put_group_info(old_info
);
1766 EXPORT_SYMBOL(set_current_groups
);
1768 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1773 * SMP: Nobody else can change our grouplist. Thus we are
1780 /* no need to grab task_lock here; it cannot change */
1781 i
= current
->group_info
->ngroups
;
1783 if (i
> gidsetsize
) {
1787 if (groups_to_user(grouplist
, current
->group_info
)) {
1797 * SMP: Our groups are copy-on-write. We can set them safely
1798 * without another task interfering.
1801 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1803 struct group_info
*group_info
;
1806 if (!capable(CAP_SETGID
))
1808 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1811 group_info
= groups_alloc(gidsetsize
);
1814 retval
= groups_from_user(group_info
, grouplist
);
1816 put_group_info(group_info
);
1820 retval
= set_current_groups(group_info
);
1821 put_group_info(group_info
);
1827 * Check whether we're fsgid/egid or in the supplemental group..
1829 int in_group_p(gid_t grp
)
1832 if (grp
!= current
->fsgid
)
1833 retval
= groups_search(current
->group_info
, grp
);
1837 EXPORT_SYMBOL(in_group_p
);
1839 int in_egroup_p(gid_t grp
)
1842 if (grp
!= current
->egid
)
1843 retval
= groups_search(current
->group_info
, grp
);
1847 EXPORT_SYMBOL(in_egroup_p
);
1849 DECLARE_RWSEM(uts_sem
);
1851 EXPORT_SYMBOL(uts_sem
);
1853 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1857 down_read(&uts_sem
);
1858 if (copy_to_user(name
, utsname(), sizeof *name
))
1864 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1867 char tmp
[__NEW_UTS_LEN
];
1869 if (!capable(CAP_SYS_ADMIN
))
1871 if (len
< 0 || len
> __NEW_UTS_LEN
)
1873 down_write(&uts_sem
);
1875 if (!copy_from_user(tmp
, name
, len
)) {
1876 memcpy(utsname()->nodename
, tmp
, len
);
1877 utsname()->nodename
[len
] = 0;
1884 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1886 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1892 down_read(&uts_sem
);
1893 i
= 1 + strlen(utsname()->nodename
);
1897 if (copy_to_user(name
, utsname()->nodename
, i
))
1906 * Only setdomainname; getdomainname can be implemented by calling
1909 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1912 char tmp
[__NEW_UTS_LEN
];
1914 if (!capable(CAP_SYS_ADMIN
))
1916 if (len
< 0 || len
> __NEW_UTS_LEN
)
1919 down_write(&uts_sem
);
1921 if (!copy_from_user(tmp
, name
, len
)) {
1922 memcpy(utsname()->domainname
, tmp
, len
);
1923 utsname()->domainname
[len
] = 0;
1930 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1932 if (resource
>= RLIM_NLIMITS
)
1935 struct rlimit value
;
1936 task_lock(current
->group_leader
);
1937 value
= current
->signal
->rlim
[resource
];
1938 task_unlock(current
->group_leader
);
1939 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1943 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1946 * Back compatibility for getrlimit. Needed for some apps.
1949 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1952 if (resource
>= RLIM_NLIMITS
)
1955 task_lock(current
->group_leader
);
1956 x
= current
->signal
->rlim
[resource
];
1957 task_unlock(current
->group_leader
);
1958 if (x
.rlim_cur
> 0x7FFFFFFF)
1959 x
.rlim_cur
= 0x7FFFFFFF;
1960 if (x
.rlim_max
> 0x7FFFFFFF)
1961 x
.rlim_max
= 0x7FFFFFFF;
1962 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1967 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1969 struct rlimit new_rlim
, *old_rlim
;
1970 unsigned long it_prof_secs
;
1973 if (resource
>= RLIM_NLIMITS
)
1975 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1977 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1979 old_rlim
= current
->signal
->rlim
+ resource
;
1980 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1981 !capable(CAP_SYS_RESOURCE
))
1983 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1986 retval
= security_task_setrlimit(resource
, &new_rlim
);
1990 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
== 0) {
1992 * The caller is asking for an immediate RLIMIT_CPU
1993 * expiry. But we use the zero value to mean "it was
1994 * never set". So let's cheat and make it one second
1997 new_rlim
.rlim_cur
= 1;
2000 task_lock(current
->group_leader
);
2001 *old_rlim
= new_rlim
;
2002 task_unlock(current
->group_leader
);
2004 if (resource
!= RLIMIT_CPU
)
2008 * RLIMIT_CPU handling. Note that the kernel fails to return an error
2009 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
2010 * very long-standing error, and fixing it now risks breakage of
2011 * applications, so we live with it
2013 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
2016 it_prof_secs
= cputime_to_secs(current
->signal
->it_prof_expires
);
2017 if (it_prof_secs
== 0 || new_rlim
.rlim_cur
<= it_prof_secs
) {
2018 unsigned long rlim_cur
= new_rlim
.rlim_cur
;
2021 cputime
= secs_to_cputime(rlim_cur
);
2022 read_lock(&tasklist_lock
);
2023 spin_lock_irq(¤t
->sighand
->siglock
);
2024 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
2025 spin_unlock_irq(¤t
->sighand
->siglock
);
2026 read_unlock(&tasklist_lock
);
2033 * It would make sense to put struct rusage in the task_struct,
2034 * except that would make the task_struct be *really big*. After
2035 * task_struct gets moved into malloc'ed memory, it would
2036 * make sense to do this. It will make moving the rest of the information
2037 * a lot simpler! (Which we're not doing right now because we're not
2038 * measuring them yet).
2040 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
2041 * races with threads incrementing their own counters. But since word
2042 * reads are atomic, we either get new values or old values and we don't
2043 * care which for the sums. We always take the siglock to protect reading
2044 * the c* fields from p->signal from races with exit.c updating those
2045 * fields when reaping, so a sample either gets all the additions of a
2046 * given child after it's reaped, or none so this sample is before reaping.
2049 * We need to take the siglock for CHILDEREN, SELF and BOTH
2050 * for the cases current multithreaded, non-current single threaded
2051 * non-current multithreaded. Thread traversal is now safe with
2053 * Strictly speaking, we donot need to take the siglock if we are current and
2054 * single threaded, as no one else can take our signal_struct away, no one
2055 * else can reap the children to update signal->c* counters, and no one else
2056 * can race with the signal-> fields. If we do not take any lock, the
2057 * signal-> fields could be read out of order while another thread was just
2058 * exiting. So we should place a read memory barrier when we avoid the lock.
2059 * On the writer side, write memory barrier is implied in __exit_signal
2060 * as __exit_signal releases the siglock spinlock after updating the signal->
2061 * fields. But we don't do this yet to keep things simple.
2065 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
2067 struct task_struct
*t
;
2068 unsigned long flags
;
2069 cputime_t utime
, stime
;
2071 memset((char *) r
, 0, sizeof *r
);
2072 utime
= stime
= cputime_zero
;
2075 if (!lock_task_sighand(p
, &flags
)) {
2082 case RUSAGE_CHILDREN
:
2083 utime
= p
->signal
->cutime
;
2084 stime
= p
->signal
->cstime
;
2085 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
2086 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
2087 r
->ru_minflt
= p
->signal
->cmin_flt
;
2088 r
->ru_majflt
= p
->signal
->cmaj_flt
;
2089 r
->ru_inblock
= p
->signal
->cinblock
;
2090 r
->ru_oublock
= p
->signal
->coublock
;
2092 if (who
== RUSAGE_CHILDREN
)
2096 utime
= cputime_add(utime
, p
->signal
->utime
);
2097 stime
= cputime_add(stime
, p
->signal
->stime
);
2098 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
2099 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
2100 r
->ru_minflt
+= p
->signal
->min_flt
;
2101 r
->ru_majflt
+= p
->signal
->maj_flt
;
2102 r
->ru_inblock
+= p
->signal
->inblock
;
2103 r
->ru_oublock
+= p
->signal
->oublock
;
2106 utime
= cputime_add(utime
, t
->utime
);
2107 stime
= cputime_add(stime
, t
->stime
);
2108 r
->ru_nvcsw
+= t
->nvcsw
;
2109 r
->ru_nivcsw
+= t
->nivcsw
;
2110 r
->ru_minflt
+= t
->min_flt
;
2111 r
->ru_majflt
+= t
->maj_flt
;
2112 r
->ru_inblock
+= task_io_get_inblock(t
);
2113 r
->ru_oublock
+= task_io_get_oublock(t
);
2122 unlock_task_sighand(p
, &flags
);
2125 cputime_to_timeval(utime
, &r
->ru_utime
);
2126 cputime_to_timeval(stime
, &r
->ru_stime
);
2129 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
2132 k_getrusage(p
, who
, &r
);
2133 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
2136 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
2138 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
2140 return getrusage(current
, who
, ru
);
2143 asmlinkage
long sys_umask(int mask
)
2145 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
2149 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
2150 unsigned long arg4
, unsigned long arg5
)
2154 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2159 case PR_SET_PDEATHSIG
:
2160 if (!valid_signal(arg2
)) {
2164 current
->pdeath_signal
= arg2
;
2166 case PR_GET_PDEATHSIG
:
2167 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
2169 case PR_GET_DUMPABLE
:
2170 error
= current
->mm
->dumpable
;
2172 case PR_SET_DUMPABLE
:
2173 if (arg2
< 0 || arg2
> 1) {
2177 current
->mm
->dumpable
= arg2
;
2180 case PR_SET_UNALIGN
:
2181 error
= SET_UNALIGN_CTL(current
, arg2
);
2183 case PR_GET_UNALIGN
:
2184 error
= GET_UNALIGN_CTL(current
, arg2
);
2187 error
= SET_FPEMU_CTL(current
, arg2
);
2190 error
= GET_FPEMU_CTL(current
, arg2
);
2193 error
= SET_FPEXC_CTL(current
, arg2
);
2196 error
= GET_FPEXC_CTL(current
, arg2
);
2199 error
= PR_TIMING_STATISTICAL
;
2202 if (arg2
== PR_TIMING_STATISTICAL
)
2208 case PR_GET_KEEPCAPS
:
2209 if (current
->keep_capabilities
)
2212 case PR_SET_KEEPCAPS
:
2213 if (arg2
!= 0 && arg2
!= 1) {
2217 current
->keep_capabilities
= arg2
;
2220 struct task_struct
*me
= current
;
2221 unsigned char ncomm
[sizeof(me
->comm
)];
2223 ncomm
[sizeof(me
->comm
)-1] = 0;
2224 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
2225 sizeof(me
->comm
)-1) < 0)
2227 set_task_comm(me
, ncomm
);
2231 struct task_struct
*me
= current
;
2232 unsigned char tcomm
[sizeof(me
->comm
)];
2234 get_task_comm(tcomm
, me
);
2235 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))
2240 error
= GET_ENDIAN(current
, arg2
);
2243 error
= SET_ENDIAN(current
, arg2
);
2246 case PR_GET_SECCOMP
:
2247 error
= prctl_get_seccomp();
2249 case PR_SET_SECCOMP
:
2250 error
= prctl_set_seccomp(arg2
);
2260 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
2261 struct getcpu_cache __user
*cache
)
2264 int cpu
= raw_smp_processor_id();
2266 err
|= put_user(cpu
, cpup
);
2268 err
|= put_user(cpu_to_node(cpu
), nodep
);
2271 * The cache is not needed for this implementation,
2272 * but make sure user programs pass something
2273 * valid. vsyscall implementations can instead make
2274 * good use of the cache. Only use t0 and t1 because
2275 * these are available in both 32bit and 64bit ABI (no
2276 * need for a compat_getcpu). 32bit has enough
2279 unsigned long t0
, t1
;
2280 get_user(t0
, &cache
->blob
[0]);
2281 get_user(t1
, &cache
->blob
[1]);
2284 put_user(t0
, &cache
->blob
[0]);
2285 put_user(t1
, &cache
->blob
[1]);
2287 return err
? -EFAULT
: 0;