2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
31 #include <asm/param.h>
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
34 #include <asm/siginfo.h>
35 #include "audit.h" /* audit_signal_info() */
38 * SLAB caches for signal bits.
41 static struct kmem_cache
*sigqueue_cachep
;
43 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
45 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
48 static int sig_handler_ignored(void __user
*handler
, int sig
)
50 /* Is it explicitly or implicitly ignored? */
51 return handler
== SIG_IGN
||
52 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
55 static int sig_ignored(struct task_struct
*t
, int sig
)
60 * Blocked signals are never ignored, since the
61 * signal handler may change by the time it is
64 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
67 handler
= sig_handler(t
, sig
);
68 if (!sig_handler_ignored(handler
, sig
))
72 * Tracers may want to know about even ignored signals.
74 return !tracehook_consider_ignored_signal(t
, sig
, handler
);
78 * Re-calculate pending state from the set of locally pending
79 * signals, globally pending signals, and blocked signals.
81 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
86 switch (_NSIG_WORDS
) {
88 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
89 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
92 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
93 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
94 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
95 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
98 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
99 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
102 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
107 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109 static int recalc_sigpending_tsk(struct task_struct
*t
)
111 if (t
->signal
->group_stop_count
> 0 ||
112 PENDING(&t
->pending
, &t
->blocked
) ||
113 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
114 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
118 * We must never clear the flag in another thread, or in current
119 * when it's possible the current syscall is returning -ERESTART*.
120 * So we don't clear it here, and only callers who know they should do.
126 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
127 * This is superfluous when called on current, the wakeup is a harmless no-op.
129 void recalc_sigpending_and_wake(struct task_struct
*t
)
131 if (recalc_sigpending_tsk(t
))
132 signal_wake_up(t
, 0);
135 void recalc_sigpending(void)
137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING
);
139 else if (!recalc_sigpending_tsk(current
) && !freezing(current
))
140 clear_thread_flag(TIF_SIGPENDING
);
144 /* Given the mask, find the first available signal that should be serviced. */
146 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
148 unsigned long i
, *s
, *m
, x
;
151 s
= pending
->signal
.sig
;
153 switch (_NSIG_WORDS
) {
155 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
156 if ((x
= *s
&~ *m
) != 0) {
157 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
162 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
164 else if ((x
= s
[1] &~ m
[1]) != 0)
171 case 1: if ((x
= *s
&~ *m
) != 0)
179 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
182 struct sigqueue
*q
= NULL
;
183 struct user_struct
*user
;
186 * In order to avoid problems with "switch_user()", we want to make
187 * sure that the compiler doesn't re-load "t->user"
191 atomic_inc(&user
->sigpending
);
192 if (override_rlimit
||
193 atomic_read(&user
->sigpending
) <=
194 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
195 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
196 if (unlikely(q
== NULL
)) {
197 atomic_dec(&user
->sigpending
);
199 INIT_LIST_HEAD(&q
->list
);
201 q
->user
= get_uid(user
);
206 static void __sigqueue_free(struct sigqueue
*q
)
208 if (q
->flags
& SIGQUEUE_PREALLOC
)
210 atomic_dec(&q
->user
->sigpending
);
212 kmem_cache_free(sigqueue_cachep
, q
);
215 void flush_sigqueue(struct sigpending
*queue
)
219 sigemptyset(&queue
->signal
);
220 while (!list_empty(&queue
->list
)) {
221 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
222 list_del_init(&q
->list
);
228 * Flush all pending signals for a task.
230 void flush_signals(struct task_struct
*t
)
234 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
235 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
236 flush_sigqueue(&t
->pending
);
237 flush_sigqueue(&t
->signal
->shared_pending
);
238 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
241 static void __flush_itimer_signals(struct sigpending
*pending
)
243 sigset_t signal
, retain
;
244 struct sigqueue
*q
, *n
;
246 signal
= pending
->signal
;
247 sigemptyset(&retain
);
249 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
250 int sig
= q
->info
.si_signo
;
252 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
253 sigaddset(&retain
, sig
);
255 sigdelset(&signal
, sig
);
256 list_del_init(&q
->list
);
261 sigorsets(&pending
->signal
, &signal
, &retain
);
264 void flush_itimer_signals(void)
266 struct task_struct
*tsk
= current
;
269 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
270 __flush_itimer_signals(&tsk
->pending
);
271 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
272 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
275 void ignore_signals(struct task_struct
*t
)
279 for (i
= 0; i
< _NSIG
; ++i
)
280 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
286 * Flush all handlers for a task.
290 flush_signal_handlers(struct task_struct
*t
, int force_default
)
293 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
294 for (i
= _NSIG
; i
!= 0 ; i
--) {
295 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
296 ka
->sa
.sa_handler
= SIG_DFL
;
298 sigemptyset(&ka
->sa
.sa_mask
);
303 int unhandled_signal(struct task_struct
*tsk
, int sig
)
305 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
306 if (is_global_init(tsk
))
308 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
310 return !tracehook_consider_fatal_signal(tsk
, sig
, handler
);
314 /* Notify the system that a driver wants to block all signals for this
315 * process, and wants to be notified if any signals at all were to be
316 * sent/acted upon. If the notifier routine returns non-zero, then the
317 * signal will be acted upon after all. If the notifier routine returns 0,
318 * then then signal will be blocked. Only one block per process is
319 * allowed. priv is a pointer to private data that the notifier routine
320 * can use to determine if the signal should be blocked or not. */
323 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
327 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
328 current
->notifier_mask
= mask
;
329 current
->notifier_data
= priv
;
330 current
->notifier
= notifier
;
331 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
334 /* Notify the system that blocking has ended. */
337 unblock_all_signals(void)
341 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
342 current
->notifier
= NULL
;
343 current
->notifier_data
= NULL
;
345 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
348 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
350 struct sigqueue
*q
, *first
= NULL
;
353 * Collect the siginfo appropriate to this signal. Check if
354 * there is another siginfo for the same signal.
356 list_for_each_entry(q
, &list
->list
, list
) {
357 if (q
->info
.si_signo
== sig
) {
364 sigdelset(&list
->signal
, sig
);
368 list_del_init(&first
->list
);
369 copy_siginfo(info
, &first
->info
);
370 __sigqueue_free(first
);
372 /* Ok, it wasn't in the queue. This must be
373 a fast-pathed signal or we must have been
374 out of queue space. So zero out the info.
376 info
->si_signo
= sig
;
384 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
387 int sig
= next_signal(pending
, mask
);
390 if (current
->notifier
) {
391 if (sigismember(current
->notifier_mask
, sig
)) {
392 if (!(current
->notifier
)(current
->notifier_data
)) {
393 clear_thread_flag(TIF_SIGPENDING
);
399 collect_signal(sig
, pending
, info
);
406 * Dequeue a signal and return the element to the caller, which is
407 * expected to free it.
409 * All callers have to hold the siglock.
411 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
415 /* We only dequeue private signals from ourselves, we don't let
416 * signalfd steal them
418 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
420 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
425 * itimers are process shared and we restart periodic
426 * itimers in the signal delivery path to prevent DoS
427 * attacks in the high resolution timer case. This is
428 * compliant with the old way of self restarting
429 * itimers, as the SIGALRM is a legacy signal and only
430 * queued once. Changing the restart behaviour to
431 * restart the timer in the signal dequeue path is
432 * reducing the timer noise on heavy loaded !highres
435 if (unlikely(signr
== SIGALRM
)) {
436 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
438 if (!hrtimer_is_queued(tmr
) &&
439 tsk
->signal
->it_real_incr
.tv64
!= 0) {
440 hrtimer_forward(tmr
, tmr
->base
->get_time(),
441 tsk
->signal
->it_real_incr
);
442 hrtimer_restart(tmr
);
451 if (unlikely(sig_kernel_stop(signr
))) {
453 * Set a marker that we have dequeued a stop signal. Our
454 * caller might release the siglock and then the pending
455 * stop signal it is about to process is no longer in the
456 * pending bitmasks, but must still be cleared by a SIGCONT
457 * (and overruled by a SIGKILL). So those cases clear this
458 * shared flag after we've set it. Note that this flag may
459 * remain set after the signal we return is ignored or
460 * handled. That doesn't matter because its only purpose
461 * is to alert stop-signal processing code when another
462 * processor has come along and cleared the flag.
464 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
466 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
468 * Release the siglock to ensure proper locking order
469 * of timer locks outside of siglocks. Note, we leave
470 * irqs disabled here, since the posix-timers code is
471 * about to disable them again anyway.
473 spin_unlock(&tsk
->sighand
->siglock
);
474 do_schedule_next_timer(info
);
475 spin_lock(&tsk
->sighand
->siglock
);
481 * Tell a process that it has a new active signal..
483 * NOTE! we rely on the previous spin_lock to
484 * lock interrupts for us! We can only be called with
485 * "siglock" held, and the local interrupt must
486 * have been disabled when that got acquired!
488 * No need to set need_resched since signal event passing
489 * goes through ->blocked
491 void signal_wake_up(struct task_struct
*t
, int resume
)
495 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
498 * For SIGKILL, we want to wake it up in the stopped/traced/killable
499 * case. We don't check t->state here because there is a race with it
500 * executing another processor and just now entering stopped state.
501 * By using wake_up_state, we ensure the process will wake up and
502 * handle its death signal.
504 mask
= TASK_INTERRUPTIBLE
;
506 mask
|= TASK_WAKEKILL
;
507 if (!wake_up_state(t
, mask
))
512 * Remove signals in mask from the pending set and queue.
513 * Returns 1 if any signals were found.
515 * All callers must be holding the siglock.
517 * This version takes a sigset mask and looks at all signals,
518 * not just those in the first mask word.
520 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
522 struct sigqueue
*q
, *n
;
525 sigandsets(&m
, mask
, &s
->signal
);
526 if (sigisemptyset(&m
))
529 signandsets(&s
->signal
, &s
->signal
, mask
);
530 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
531 if (sigismember(mask
, q
->info
.si_signo
)) {
532 list_del_init(&q
->list
);
539 * Remove signals in mask from the pending set and queue.
540 * Returns 1 if any signals were found.
542 * All callers must be holding the siglock.
544 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
546 struct sigqueue
*q
, *n
;
548 if (!sigtestsetmask(&s
->signal
, mask
))
551 sigdelsetmask(&s
->signal
, mask
);
552 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
553 if (q
->info
.si_signo
< SIGRTMIN
&&
554 (mask
& sigmask(q
->info
.si_signo
))) {
555 list_del_init(&q
->list
);
563 * Bad permissions for sending the signal
565 static int check_kill_permission(int sig
, struct siginfo
*info
,
566 struct task_struct
*t
)
571 if (!valid_signal(sig
))
574 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
577 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
581 if ((current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
) &&
582 (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
) &&
583 !capable(CAP_KILL
)) {
586 sid
= task_session(t
);
588 * We don't return the error if sid == NULL. The
589 * task was unhashed, the caller must notice this.
591 if (!sid
|| sid
== task_session(current
))
598 return security_task_kill(t
, info
, sig
, 0);
602 * Handle magic process-wide effects of stop/continue signals. Unlike
603 * the signal actions, these happen immediately at signal-generation
604 * time regardless of blocking, ignoring, or handling. This does the
605 * actual continuing for SIGCONT, but not the actual stopping for stop
606 * signals. The process stop is done as a signal action for SIG_DFL.
608 * Returns true if the signal should be actually delivered, otherwise
609 * it should be dropped.
611 static int prepare_signal(int sig
, struct task_struct
*p
)
613 struct signal_struct
*signal
= p
->signal
;
614 struct task_struct
*t
;
616 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
618 * The process is in the middle of dying, nothing to do.
620 } else if (sig_kernel_stop(sig
)) {
622 * This is a stop signal. Remove SIGCONT from all queues.
624 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
627 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
628 } while_each_thread(p
, t
);
629 } else if (sig
== SIGCONT
) {
632 * Remove all stop signals from all queues,
633 * and wake all threads.
635 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
639 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
641 * If there is a handler for SIGCONT, we must make
642 * sure that no thread returns to user mode before
643 * we post the signal, in case it was the only
644 * thread eligible to run the signal handler--then
645 * it must not do anything between resuming and
646 * running the handler. With the TIF_SIGPENDING
647 * flag set, the thread will pause and acquire the
648 * siglock that we hold now and until we've queued
649 * the pending signal.
651 * Wake up the stopped thread _after_ setting
654 state
= __TASK_STOPPED
;
655 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
656 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
657 state
|= TASK_INTERRUPTIBLE
;
659 wake_up_state(t
, state
);
660 } while_each_thread(p
, t
);
663 * Notify the parent with CLD_CONTINUED if we were stopped.
665 * If we were in the middle of a group stop, we pretend it
666 * was already finished, and then continued. Since SIGCHLD
667 * doesn't queue we report only CLD_STOPPED, as if the next
668 * CLD_CONTINUED was dropped.
671 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
672 why
|= SIGNAL_CLD_CONTINUED
;
673 else if (signal
->group_stop_count
)
674 why
|= SIGNAL_CLD_STOPPED
;
678 * The first thread which returns from finish_stop()
679 * will take ->siglock, notice SIGNAL_CLD_MASK, and
680 * notify its parent. See get_signal_to_deliver().
682 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
683 signal
->group_stop_count
= 0;
684 signal
->group_exit_code
= 0;
687 * We are not stopped, but there could be a stop
688 * signal in the middle of being processed after
689 * being removed from the queue. Clear that too.
691 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
695 return !sig_ignored(p
, sig
);
699 * Test if P wants to take SIG. After we've checked all threads with this,
700 * it's equivalent to finding no threads not blocking SIG. Any threads not
701 * blocking SIG were ruled out because they are not running and already
702 * have pending signals. Such threads will dequeue from the shared queue
703 * as soon as they're available, so putting the signal on the shared queue
704 * will be equivalent to sending it to one such thread.
706 static inline int wants_signal(int sig
, struct task_struct
*p
)
708 if (sigismember(&p
->blocked
, sig
))
710 if (p
->flags
& PF_EXITING
)
714 if (task_is_stopped_or_traced(p
))
716 return task_curr(p
) || !signal_pending(p
);
719 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
721 struct signal_struct
*signal
= p
->signal
;
722 struct task_struct
*t
;
725 * Now find a thread we can wake up to take the signal off the queue.
727 * If the main thread wants the signal, it gets first crack.
728 * Probably the least surprising to the average bear.
730 if (wants_signal(sig
, p
))
732 else if (!group
|| thread_group_empty(p
))
734 * There is just one thread and it does not need to be woken.
735 * It will dequeue unblocked signals before it runs again.
740 * Otherwise try to find a suitable thread.
742 t
= signal
->curr_target
;
743 while (!wants_signal(sig
, t
)) {
745 if (t
== signal
->curr_target
)
747 * No thread needs to be woken.
748 * Any eligible threads will see
749 * the signal in the queue soon.
753 signal
->curr_target
= t
;
757 * Found a killable thread. If the signal will be fatal,
758 * then start taking the whole group down immediately.
760 if (sig_fatal(p
, sig
) &&
761 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
762 !sigismember(&t
->real_blocked
, sig
) &&
764 !tracehook_consider_fatal_signal(t
, sig
, SIG_DFL
))) {
766 * This signal will be fatal to the whole group.
768 if (!sig_kernel_coredump(sig
)) {
770 * Start a group exit and wake everybody up.
771 * This way we don't have other threads
772 * running and doing things after a slower
773 * thread has the fatal signal pending.
775 signal
->flags
= SIGNAL_GROUP_EXIT
;
776 signal
->group_exit_code
= sig
;
777 signal
->group_stop_count
= 0;
780 sigaddset(&t
->pending
.signal
, SIGKILL
);
781 signal_wake_up(t
, 1);
782 } while_each_thread(p
, t
);
788 * The signal is already in the shared-pending queue.
789 * Tell the chosen thread to wake up and dequeue it.
791 signal_wake_up(t
, sig
== SIGKILL
);
795 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
797 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
800 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
803 struct sigpending
*pending
;
806 assert_spin_locked(&t
->sighand
->siglock
);
807 if (!prepare_signal(sig
, t
))
810 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
812 * Short-circuit ignored signals and support queuing
813 * exactly one non-rt signal, so that we can get more
814 * detailed information about the cause of the signal.
816 if (legacy_queue(pending
, sig
))
819 * fast-pathed signals for kernel-internal things like SIGSTOP
822 if (info
== SEND_SIG_FORCED
)
825 /* Real-time signals must be queued if sent by sigqueue, or
826 some other real-time mechanism. It is implementation
827 defined whether kill() does so. We attempt to do so, on
828 the principle of least surprise, but since kill is not
829 allowed to fail with EAGAIN when low on memory we just
830 make sure at least one signal gets delivered and don't
831 pass on the info struct. */
833 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
834 (is_si_special(info
) ||
835 info
->si_code
>= 0)));
837 list_add_tail(&q
->list
, &pending
->list
);
838 switch ((unsigned long) info
) {
839 case (unsigned long) SEND_SIG_NOINFO
:
840 q
->info
.si_signo
= sig
;
841 q
->info
.si_errno
= 0;
842 q
->info
.si_code
= SI_USER
;
843 q
->info
.si_pid
= task_pid_vnr(current
);
844 q
->info
.si_uid
= current
->uid
;
846 case (unsigned long) SEND_SIG_PRIV
:
847 q
->info
.si_signo
= sig
;
848 q
->info
.si_errno
= 0;
849 q
->info
.si_code
= SI_KERNEL
;
854 copy_siginfo(&q
->info
, info
);
857 } else if (!is_si_special(info
)) {
858 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
860 * Queue overflow, abort. We may abort if the signal was rt
861 * and sent by user using something other than kill().
867 signalfd_notify(t
, sig
);
868 sigaddset(&pending
->signal
, sig
);
869 complete_signal(sig
, t
, group
);
873 int print_fatal_signals
;
875 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
877 printk("%s/%d: potentially unexpected fatal signal %d.\n",
878 current
->comm
, task_pid_nr(current
), signr
);
880 #if defined(__i386__) && !defined(__arch_um__)
881 printk("code at %08lx: ", regs
->ip
);
884 for (i
= 0; i
< 16; i
++) {
887 if (get_user(insn
, (unsigned char *)(regs
->ip
+ i
)))
889 printk("%02x ", insn
);
897 static int __init
setup_print_fatal_signals(char *str
)
899 get_option (&str
, &print_fatal_signals
);
904 __setup("print-fatal-signals=", setup_print_fatal_signals
);
907 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
909 return send_signal(sig
, info
, p
, 1);
913 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
915 return send_signal(sig
, info
, t
, 0);
919 * Force a signal that the process can't ignore: if necessary
920 * we unblock the signal and change any SIG_IGN to SIG_DFL.
922 * Note: If we unblock the signal, we always reset it to SIG_DFL,
923 * since we do not want to have a signal handler that was blocked
924 * be invoked when user space had explicitly blocked it.
926 * We don't want to have recursive SIGSEGV's etc, for example,
927 * that is why we also clear SIGNAL_UNKILLABLE.
930 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
932 unsigned long int flags
;
933 int ret
, blocked
, ignored
;
934 struct k_sigaction
*action
;
936 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
937 action
= &t
->sighand
->action
[sig
-1];
938 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
939 blocked
= sigismember(&t
->blocked
, sig
);
940 if (blocked
|| ignored
) {
941 action
->sa
.sa_handler
= SIG_DFL
;
943 sigdelset(&t
->blocked
, sig
);
944 recalc_sigpending_and_wake(t
);
947 if (action
->sa
.sa_handler
== SIG_DFL
)
948 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
949 ret
= specific_send_sig_info(sig
, info
, t
);
950 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
956 force_sig_specific(int sig
, struct task_struct
*t
)
958 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
962 * Nuke all other threads in the group.
964 void zap_other_threads(struct task_struct
*p
)
966 struct task_struct
*t
;
968 p
->signal
->group_stop_count
= 0;
970 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
972 * Don't bother with already dead threads
977 /* SIGKILL will be handled before any pending SIGSTOP */
978 sigaddset(&t
->pending
.signal
, SIGKILL
);
979 signal_wake_up(t
, 1);
983 int __fatal_signal_pending(struct task_struct
*tsk
)
985 return sigismember(&tsk
->pending
.signal
, SIGKILL
);
987 EXPORT_SYMBOL(__fatal_signal_pending
);
989 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
991 struct sighand_struct
*sighand
;
995 sighand
= rcu_dereference(tsk
->sighand
);
996 if (unlikely(sighand
== NULL
))
999 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1000 if (likely(sighand
== tsk
->sighand
))
1002 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1009 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1011 unsigned long flags
;
1014 ret
= check_kill_permission(sig
, info
, p
);
1018 if (lock_task_sighand(p
, &flags
)) {
1019 ret
= __group_send_sig_info(sig
, info
, p
);
1020 unlock_task_sighand(p
, &flags
);
1028 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1029 * control characters do (^C, ^Z etc)
1032 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1034 struct task_struct
*p
= NULL
;
1035 int retval
, success
;
1039 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1040 int err
= group_send_sig_info(sig
, info
, p
);
1043 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1044 return success
? 0 : retval
;
1047 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1050 struct task_struct
*p
;
1054 p
= pid_task(pid
, PIDTYPE_PID
);
1056 error
= group_send_sig_info(sig
, info
, p
);
1057 if (unlikely(error
== -ESRCH
))
1059 * The task was unhashed in between, try again.
1060 * If it is dead, pid_task() will return NULL,
1061 * if we race with de_thread() it will find the
1072 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1076 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1081 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1082 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1083 uid_t uid
, uid_t euid
, u32 secid
)
1086 struct task_struct
*p
;
1088 if (!valid_signal(sig
))
1091 read_lock(&tasklist_lock
);
1092 p
= pid_task(pid
, PIDTYPE_PID
);
1097 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1098 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1099 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1103 ret
= security_task_kill(p
, info
, sig
, secid
);
1106 if (sig
&& p
->sighand
) {
1107 unsigned long flags
;
1108 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1109 ret
= __group_send_sig_info(sig
, info
, p
);
1110 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1113 read_unlock(&tasklist_lock
);
1116 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1119 * kill_something_info() interprets pid in interesting ways just like kill(2).
1121 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1122 * is probably wrong. Should make it like BSD or SYSV.
1125 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1131 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1136 read_lock(&tasklist_lock
);
1138 ret
= __kill_pgrp_info(sig
, info
,
1139 pid
? find_vpid(-pid
) : task_pgrp(current
));
1141 int retval
= 0, count
= 0;
1142 struct task_struct
* p
;
1144 for_each_process(p
) {
1145 if (task_pid_vnr(p
) > 1 &&
1146 !same_thread_group(p
, current
)) {
1147 int err
= group_send_sig_info(sig
, info
, p
);
1153 ret
= count
? retval
: -ESRCH
;
1155 read_unlock(&tasklist_lock
);
1161 * These are for backward compatibility with the rest of the kernel source.
1165 * The caller must ensure the task can't exit.
1168 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1171 unsigned long flags
;
1174 * Make sure legacy kernel users don't send in bad values
1175 * (normal paths check this in check_kill_permission).
1177 if (!valid_signal(sig
))
1180 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1181 ret
= specific_send_sig_info(sig
, info
, p
);
1182 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1186 #define __si_special(priv) \
1187 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1190 send_sig(int sig
, struct task_struct
*p
, int priv
)
1192 return send_sig_info(sig
, __si_special(priv
), p
);
1196 force_sig(int sig
, struct task_struct
*p
)
1198 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1202 * When things go south during signal handling, we
1203 * will force a SIGSEGV. And if the signal that caused
1204 * the problem was already a SIGSEGV, we'll want to
1205 * make sure we don't even try to deliver the signal..
1208 force_sigsegv(int sig
, struct task_struct
*p
)
1210 if (sig
== SIGSEGV
) {
1211 unsigned long flags
;
1212 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1213 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1214 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1216 force_sig(SIGSEGV
, p
);
1220 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1224 read_lock(&tasklist_lock
);
1225 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1226 read_unlock(&tasklist_lock
);
1230 EXPORT_SYMBOL(kill_pgrp
);
1232 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1234 return kill_pid_info(sig
, __si_special(priv
), pid
);
1236 EXPORT_SYMBOL(kill_pid
);
1239 * These functions support sending signals using preallocated sigqueue
1240 * structures. This is needed "because realtime applications cannot
1241 * afford to lose notifications of asynchronous events, like timer
1242 * expirations or I/O completions". In the case of Posix Timers
1243 * we allocate the sigqueue structure from the timer_create. If this
1244 * allocation fails we are able to report the failure to the application
1245 * with an EAGAIN error.
1248 struct sigqueue
*sigqueue_alloc(void)
1252 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1253 q
->flags
|= SIGQUEUE_PREALLOC
;
1257 void sigqueue_free(struct sigqueue
*q
)
1259 unsigned long flags
;
1260 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1262 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1264 * We must hold ->siglock while testing q->list
1265 * to serialize with collect_signal() or with
1266 * __exit_signal()->flush_sigqueue().
1268 spin_lock_irqsave(lock
, flags
);
1269 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1271 * If it is queued it will be freed when dequeued,
1272 * like the "regular" sigqueue.
1274 if (!list_empty(&q
->list
))
1276 spin_unlock_irqrestore(lock
, flags
);
1282 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1284 int sig
= q
->info
.si_signo
;
1285 struct sigpending
*pending
;
1286 unsigned long flags
;
1289 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1292 if (!likely(lock_task_sighand(t
, &flags
)))
1295 ret
= 1; /* the signal is ignored */
1296 if (!prepare_signal(sig
, t
))
1300 if (unlikely(!list_empty(&q
->list
))) {
1302 * If an SI_TIMER entry is already queue just increment
1303 * the overrun count.
1305 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1306 q
->info
.si_overrun
++;
1309 q
->info
.si_overrun
= 0;
1311 signalfd_notify(t
, sig
);
1312 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1313 list_add_tail(&q
->list
, &pending
->list
);
1314 sigaddset(&pending
->signal
, sig
);
1315 complete_signal(sig
, t
, group
);
1317 unlock_task_sighand(t
, &flags
);
1323 * Wake up any threads in the parent blocked in wait* syscalls.
1325 static inline void __wake_up_parent(struct task_struct
*p
,
1326 struct task_struct
*parent
)
1328 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1332 * Let a parent know about the death of a child.
1333 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1335 * Returns -1 if our parent ignored us and so we've switched to
1336 * self-reaping, or else @sig.
1338 int do_notify_parent(struct task_struct
*tsk
, int sig
)
1340 struct siginfo info
;
1341 unsigned long flags
;
1342 struct sighand_struct
*psig
;
1347 /* do_notify_parent_cldstop should have been called instead. */
1348 BUG_ON(task_is_stopped_or_traced(tsk
));
1350 BUG_ON(!tsk
->ptrace
&&
1351 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1353 info
.si_signo
= sig
;
1356 * we are under tasklist_lock here so our parent is tied to
1357 * us and cannot exit and release its namespace.
1359 * the only it can is to switch its nsproxy with sys_unshare,
1360 * bu uncharing pid namespaces is not allowed, so we'll always
1361 * see relevant namespace
1363 * write_lock() currently calls preempt_disable() which is the
1364 * same as rcu_read_lock(), but according to Oleg, this is not
1365 * correct to rely on this
1368 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1371 info
.si_uid
= tsk
->uid
;
1373 info
.si_utime
= cputime_to_clock_t(cputime_add(tsk
->utime
,
1374 tsk
->signal
->utime
));
1375 info
.si_stime
= cputime_to_clock_t(cputime_add(tsk
->stime
,
1376 tsk
->signal
->stime
));
1378 info
.si_status
= tsk
->exit_code
& 0x7f;
1379 if (tsk
->exit_code
& 0x80)
1380 info
.si_code
= CLD_DUMPED
;
1381 else if (tsk
->exit_code
& 0x7f)
1382 info
.si_code
= CLD_KILLED
;
1384 info
.si_code
= CLD_EXITED
;
1385 info
.si_status
= tsk
->exit_code
>> 8;
1388 psig
= tsk
->parent
->sighand
;
1389 spin_lock_irqsave(&psig
->siglock
, flags
);
1390 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1391 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1392 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1394 * We are exiting and our parent doesn't care. POSIX.1
1395 * defines special semantics for setting SIGCHLD to SIG_IGN
1396 * or setting the SA_NOCLDWAIT flag: we should be reaped
1397 * automatically and not left for our parent's wait4 call.
1398 * Rather than having the parent do it as a magic kind of
1399 * signal handler, we just set this to tell do_exit that we
1400 * can be cleaned up without becoming a zombie. Note that
1401 * we still call __wake_up_parent in this case, because a
1402 * blocked sys_wait4 might now return -ECHILD.
1404 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1405 * is implementation-defined: we do (if you don't want
1406 * it, just use SIG_IGN instead).
1408 ret
= tsk
->exit_signal
= -1;
1409 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1412 if (valid_signal(sig
) && sig
> 0)
1413 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1414 __wake_up_parent(tsk
, tsk
->parent
);
1415 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1420 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1422 struct siginfo info
;
1423 unsigned long flags
;
1424 struct task_struct
*parent
;
1425 struct sighand_struct
*sighand
;
1427 if (tsk
->ptrace
& PT_PTRACED
)
1428 parent
= tsk
->parent
;
1430 tsk
= tsk
->group_leader
;
1431 parent
= tsk
->real_parent
;
1434 info
.si_signo
= SIGCHLD
;
1437 * see comment in do_notify_parent() abot the following 3 lines
1440 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1443 info
.si_uid
= tsk
->uid
;
1445 info
.si_utime
= cputime_to_clock_t(tsk
->utime
);
1446 info
.si_stime
= cputime_to_clock_t(tsk
->stime
);
1451 info
.si_status
= SIGCONT
;
1454 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1457 info
.si_status
= tsk
->exit_code
& 0x7f;
1463 sighand
= parent
->sighand
;
1464 spin_lock_irqsave(&sighand
->siglock
, flags
);
1465 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1466 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1467 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1469 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1471 __wake_up_parent(tsk
, parent
);
1472 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1475 static inline int may_ptrace_stop(void)
1477 if (!likely(current
->ptrace
& PT_PTRACED
))
1480 * Are we in the middle of do_coredump?
1481 * If so and our tracer is also part of the coredump stopping
1482 * is a deadlock situation, and pointless because our tracer
1483 * is dead so don't allow us to stop.
1484 * If SIGKILL was already sent before the caller unlocked
1485 * ->siglock we must see ->core_state != NULL. Otherwise it
1486 * is safe to enter schedule().
1488 if (unlikely(current
->mm
->core_state
) &&
1489 unlikely(current
->mm
== current
->parent
->mm
))
1496 * Return nonzero if there is a SIGKILL that should be waking us up.
1497 * Called with the siglock held.
1499 static int sigkill_pending(struct task_struct
*tsk
)
1501 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1502 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1506 * This must be called with current->sighand->siglock held.
1508 * This should be the path for all ptrace stops.
1509 * We always set current->last_siginfo while stopped here.
1510 * That makes it a way to test a stopped process for
1511 * being ptrace-stopped vs being job-control-stopped.
1513 * If we actually decide not to stop at all because the tracer
1514 * is gone, we keep current->exit_code unless clear_code.
1516 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1518 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1520 * The arch code has something special to do before a
1521 * ptrace stop. This is allowed to block, e.g. for faults
1522 * on user stack pages. We can't keep the siglock while
1523 * calling arch_ptrace_stop, so we must release it now.
1524 * To preserve proper semantics, we must do this before
1525 * any signal bookkeeping like checking group_stop_count.
1526 * Meanwhile, a SIGKILL could come in before we retake the
1527 * siglock. That must prevent us from sleeping in TASK_TRACED.
1528 * So after regaining the lock, we must check for SIGKILL.
1530 spin_unlock_irq(¤t
->sighand
->siglock
);
1531 arch_ptrace_stop(exit_code
, info
);
1532 spin_lock_irq(¤t
->sighand
->siglock
);
1533 if (sigkill_pending(current
))
1538 * If there is a group stop in progress,
1539 * we must participate in the bookkeeping.
1541 if (current
->signal
->group_stop_count
> 0)
1542 --current
->signal
->group_stop_count
;
1544 current
->last_siginfo
= info
;
1545 current
->exit_code
= exit_code
;
1547 /* Let the debugger run. */
1548 __set_current_state(TASK_TRACED
);
1549 spin_unlock_irq(¤t
->sighand
->siglock
);
1550 read_lock(&tasklist_lock
);
1551 if (may_ptrace_stop()) {
1552 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1554 * Don't want to allow preemption here, because
1555 * sys_ptrace() needs this task to be inactive.
1557 * XXX: implement read_unlock_no_resched().
1560 read_unlock(&tasklist_lock
);
1561 preempt_enable_no_resched();
1565 * By the time we got the lock, our tracer went away.
1566 * Don't drop the lock yet, another tracer may come.
1568 __set_current_state(TASK_RUNNING
);
1570 current
->exit_code
= 0;
1571 read_unlock(&tasklist_lock
);
1575 * While in TASK_TRACED, we were considered "frozen enough".
1576 * Now that we woke up, it's crucial if we're supposed to be
1577 * frozen that we freeze now before running anything substantial.
1582 * We are back. Now reacquire the siglock before touching
1583 * last_siginfo, so that we are sure to have synchronized with
1584 * any signal-sending on another CPU that wants to examine it.
1586 spin_lock_irq(¤t
->sighand
->siglock
);
1587 current
->last_siginfo
= NULL
;
1590 * Queued signals ignored us while we were stopped for tracing.
1591 * So check for any that we should take before resuming user mode.
1592 * This sets TIF_SIGPENDING, but never clears it.
1594 recalc_sigpending_tsk(current
);
1597 void ptrace_notify(int exit_code
)
1601 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1603 memset(&info
, 0, sizeof info
);
1604 info
.si_signo
= SIGTRAP
;
1605 info
.si_code
= exit_code
;
1606 info
.si_pid
= task_pid_vnr(current
);
1607 info
.si_uid
= current
->uid
;
1609 /* Let the debugger run. */
1610 spin_lock_irq(¤t
->sighand
->siglock
);
1611 ptrace_stop(exit_code
, 1, &info
);
1612 spin_unlock_irq(¤t
->sighand
->siglock
);
1616 finish_stop(int stop_count
)
1619 * If there are no other threads in the group, or if there is
1620 * a group stop in progress and we are the last to stop,
1621 * report to the parent. When ptraced, every thread reports itself.
1623 if (tracehook_notify_jctl(stop_count
== 0, CLD_STOPPED
)) {
1624 read_lock(&tasklist_lock
);
1625 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1626 read_unlock(&tasklist_lock
);
1631 } while (try_to_freeze());
1633 * Now we don't run again until continued.
1635 current
->exit_code
= 0;
1639 * This performs the stopping for SIGSTOP and other stop signals.
1640 * We have to stop all threads in the thread group.
1641 * Returns nonzero if we've actually stopped and released the siglock.
1642 * Returns zero if we didn't stop and still hold the siglock.
1644 static int do_signal_stop(int signr
)
1646 struct signal_struct
*sig
= current
->signal
;
1649 if (sig
->group_stop_count
> 0) {
1651 * There is a group stop in progress. We don't need to
1652 * start another one.
1654 stop_count
= --sig
->group_stop_count
;
1656 struct task_struct
*t
;
1658 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
) ||
1659 unlikely(signal_group_exit(sig
)))
1662 * There is no group stop already in progress.
1663 * We must initiate one now.
1665 sig
->group_exit_code
= signr
;
1668 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1670 * Setting state to TASK_STOPPED for a group
1671 * stop is always done with the siglock held,
1672 * so this check has no races.
1674 if (!(t
->flags
& PF_EXITING
) &&
1675 !task_is_stopped_or_traced(t
)) {
1677 signal_wake_up(t
, 0);
1679 sig
->group_stop_count
= stop_count
;
1682 if (stop_count
== 0)
1683 sig
->flags
= SIGNAL_STOP_STOPPED
;
1684 current
->exit_code
= sig
->group_exit_code
;
1685 __set_current_state(TASK_STOPPED
);
1687 spin_unlock_irq(¤t
->sighand
->siglock
);
1688 finish_stop(stop_count
);
1692 static int ptrace_signal(int signr
, siginfo_t
*info
,
1693 struct pt_regs
*regs
, void *cookie
)
1695 if (!(current
->ptrace
& PT_PTRACED
))
1698 ptrace_signal_deliver(regs
, cookie
);
1700 /* Let the debugger run. */
1701 ptrace_stop(signr
, 0, info
);
1703 /* We're back. Did the debugger cancel the sig? */
1704 signr
= current
->exit_code
;
1708 current
->exit_code
= 0;
1710 /* Update the siginfo structure if the signal has
1711 changed. If the debugger wanted something
1712 specific in the siginfo structure then it should
1713 have updated *info via PTRACE_SETSIGINFO. */
1714 if (signr
!= info
->si_signo
) {
1715 info
->si_signo
= signr
;
1717 info
->si_code
= SI_USER
;
1718 info
->si_pid
= task_pid_vnr(current
->parent
);
1719 info
->si_uid
= current
->parent
->uid
;
1722 /* If the (new) signal is now blocked, requeue it. */
1723 if (sigismember(¤t
->blocked
, signr
)) {
1724 specific_send_sig_info(signr
, info
, current
);
1731 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1732 struct pt_regs
*regs
, void *cookie
)
1734 struct sighand_struct
*sighand
= current
->sighand
;
1735 struct signal_struct
*signal
= current
->signal
;
1740 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1741 * While in TASK_STOPPED, we were considered "frozen enough".
1742 * Now that we woke up, it's crucial if we're supposed to be
1743 * frozen that we freeze now before running anything substantial.
1747 spin_lock_irq(&sighand
->siglock
);
1749 * Every stopped thread goes here after wakeup. Check to see if
1750 * we should notify the parent, prepare_signal(SIGCONT) encodes
1751 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1753 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1754 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1755 ? CLD_CONTINUED
: CLD_STOPPED
;
1756 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1757 spin_unlock_irq(&sighand
->siglock
);
1759 if (unlikely(!tracehook_notify_jctl(1, why
)))
1762 read_lock(&tasklist_lock
);
1763 do_notify_parent_cldstop(current
->group_leader
, why
);
1764 read_unlock(&tasklist_lock
);
1769 struct k_sigaction
*ka
;
1771 if (unlikely(signal
->group_stop_count
> 0) &&
1776 * Tracing can induce an artifical signal and choose sigaction.
1777 * The return value in @signr determines the default action,
1778 * but @info->si_signo is the signal number we will report.
1780 signr
= tracehook_get_signal(current
, regs
, info
, return_ka
);
1781 if (unlikely(signr
< 0))
1783 if (unlikely(signr
!= 0))
1786 signr
= dequeue_signal(current
, ¤t
->blocked
,
1790 break; /* will return 0 */
1792 if (signr
!= SIGKILL
) {
1793 signr
= ptrace_signal(signr
, info
,
1799 ka
= &sighand
->action
[signr
-1];
1802 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1804 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1805 /* Run the handler. */
1808 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1809 ka
->sa
.sa_handler
= SIG_DFL
;
1811 break; /* will return non-zero "signr" value */
1815 * Now we are doing the default action for this signal.
1817 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1821 * Global init gets no signals it doesn't want.
1823 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1824 !signal_group_exit(signal
))
1827 if (sig_kernel_stop(signr
)) {
1829 * The default action is to stop all threads in
1830 * the thread group. The job control signals
1831 * do nothing in an orphaned pgrp, but SIGSTOP
1832 * always works. Note that siglock needs to be
1833 * dropped during the call to is_orphaned_pgrp()
1834 * because of lock ordering with tasklist_lock.
1835 * This allows an intervening SIGCONT to be posted.
1836 * We need to check for that and bail out if necessary.
1838 if (signr
!= SIGSTOP
) {
1839 spin_unlock_irq(&sighand
->siglock
);
1841 /* signals can be posted during this window */
1843 if (is_current_pgrp_orphaned())
1846 spin_lock_irq(&sighand
->siglock
);
1849 if (likely(do_signal_stop(info
->si_signo
))) {
1850 /* It released the siglock. */
1855 * We didn't actually stop, due to a race
1856 * with SIGCONT or something like that.
1861 spin_unlock_irq(&sighand
->siglock
);
1864 * Anything else is fatal, maybe with a core dump.
1866 current
->flags
|= PF_SIGNALED
;
1868 if (sig_kernel_coredump(signr
)) {
1869 if (print_fatal_signals
)
1870 print_fatal_signal(regs
, info
->si_signo
);
1872 * If it was able to dump core, this kills all
1873 * other threads in the group and synchronizes with
1874 * their demise. If we lost the race with another
1875 * thread getting here, it set group_exit_code
1876 * first and our do_group_exit call below will use
1877 * that value and ignore the one we pass it.
1879 do_coredump(info
->si_signo
, info
->si_signo
, regs
);
1883 * Death signals, no core dump.
1885 do_group_exit(info
->si_signo
);
1888 spin_unlock_irq(&sighand
->siglock
);
1892 void exit_signals(struct task_struct
*tsk
)
1895 struct task_struct
*t
;
1897 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1898 tsk
->flags
|= PF_EXITING
;
1902 spin_lock_irq(&tsk
->sighand
->siglock
);
1904 * From now this task is not visible for group-wide signals,
1905 * see wants_signal(), do_signal_stop().
1907 tsk
->flags
|= PF_EXITING
;
1908 if (!signal_pending(tsk
))
1911 /* It could be that __group_complete_signal() choose us to
1912 * notify about group-wide signal. Another thread should be
1913 * woken now to take the signal since we will not.
1915 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1916 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1917 recalc_sigpending_and_wake(t
);
1919 if (unlikely(tsk
->signal
->group_stop_count
) &&
1920 !--tsk
->signal
->group_stop_count
) {
1921 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1925 spin_unlock_irq(&tsk
->sighand
->siglock
);
1927 if (unlikely(group_stop
) && tracehook_notify_jctl(1, CLD_STOPPED
)) {
1928 read_lock(&tasklist_lock
);
1929 do_notify_parent_cldstop(tsk
, CLD_STOPPED
);
1930 read_unlock(&tasklist_lock
);
1934 EXPORT_SYMBOL(recalc_sigpending
);
1935 EXPORT_SYMBOL_GPL(dequeue_signal
);
1936 EXPORT_SYMBOL(flush_signals
);
1937 EXPORT_SYMBOL(force_sig
);
1938 EXPORT_SYMBOL(send_sig
);
1939 EXPORT_SYMBOL(send_sig_info
);
1940 EXPORT_SYMBOL(sigprocmask
);
1941 EXPORT_SYMBOL(block_all_signals
);
1942 EXPORT_SYMBOL(unblock_all_signals
);
1946 * System call entry points.
1949 SYSCALL_DEFINE0(restart_syscall
)
1951 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1952 return restart
->fn(restart
);
1955 long do_no_restart_syscall(struct restart_block
*param
)
1961 * We don't need to get the kernel lock - this is all local to this
1962 * particular thread.. (and that's good, because this is _heavily_
1963 * used by various programs)
1967 * This is also useful for kernel threads that want to temporarily
1968 * (or permanently) block certain signals.
1970 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1971 * interface happily blocks "unblockable" signals like SIGKILL
1974 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1978 spin_lock_irq(¤t
->sighand
->siglock
);
1980 *oldset
= current
->blocked
;
1985 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1988 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1991 current
->blocked
= *set
;
1996 recalc_sigpending();
1997 spin_unlock_irq(¤t
->sighand
->siglock
);
2002 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, set
,
2003 sigset_t __user
*, oset
, size_t, sigsetsize
)
2005 int error
= -EINVAL
;
2006 sigset_t old_set
, new_set
;
2008 /* XXX: Don't preclude handling different sized sigset_t's. */
2009 if (sigsetsize
!= sizeof(sigset_t
))
2014 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2016 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2018 error
= sigprocmask(how
, &new_set
, &old_set
);
2024 spin_lock_irq(¤t
->sighand
->siglock
);
2025 old_set
= current
->blocked
;
2026 spin_unlock_irq(¤t
->sighand
->siglock
);
2030 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2038 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2040 long error
= -EINVAL
;
2043 if (sigsetsize
> sizeof(sigset_t
))
2046 spin_lock_irq(¤t
->sighand
->siglock
);
2047 sigorsets(&pending
, ¤t
->pending
.signal
,
2048 ¤t
->signal
->shared_pending
.signal
);
2049 spin_unlock_irq(¤t
->sighand
->siglock
);
2051 /* Outside the lock because only this thread touches it. */
2052 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2055 if (!copy_to_user(set
, &pending
, sigsetsize
))
2062 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, set
, size_t, sigsetsize
)
2064 return do_sigpending(set
, sigsetsize
);
2067 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2069 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2073 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2075 if (from
->si_code
< 0)
2076 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2079 * If you change siginfo_t structure, please be sure
2080 * this code is fixed accordingly.
2081 * Please remember to update the signalfd_copyinfo() function
2082 * inside fs/signalfd.c too, in case siginfo_t changes.
2083 * It should never copy any pad contained in the structure
2084 * to avoid security leaks, but must copy the generic
2085 * 3 ints plus the relevant union member.
2087 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2088 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2089 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2090 switch (from
->si_code
& __SI_MASK
) {
2092 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2093 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2096 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2097 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2098 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2101 err
|= __put_user(from
->si_band
, &to
->si_band
);
2102 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2105 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2106 #ifdef __ARCH_SI_TRAPNO
2107 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2111 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2112 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2113 err
|= __put_user(from
->si_status
, &to
->si_status
);
2114 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2115 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2117 case __SI_RT
: /* This is not generated by the kernel as of now. */
2118 case __SI_MESGQ
: /* But this is */
2119 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2120 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2121 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2123 default: /* this is just in case for now ... */
2124 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2125 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2133 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2134 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2143 /* XXX: Don't preclude handling different sized sigset_t's. */
2144 if (sigsetsize
!= sizeof(sigset_t
))
2147 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2151 * Invert the set of allowed signals to get those we
2154 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2158 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2160 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2165 spin_lock_irq(¤t
->sighand
->siglock
);
2166 sig
= dequeue_signal(current
, &these
, &info
);
2168 timeout
= MAX_SCHEDULE_TIMEOUT
;
2170 timeout
= (timespec_to_jiffies(&ts
)
2171 + (ts
.tv_sec
|| ts
.tv_nsec
));
2174 /* None ready -- temporarily unblock those we're
2175 * interested while we are sleeping in so that we'll
2176 * be awakened when they arrive. */
2177 current
->real_blocked
= current
->blocked
;
2178 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2179 recalc_sigpending();
2180 spin_unlock_irq(¤t
->sighand
->siglock
);
2182 timeout
= schedule_timeout_interruptible(timeout
);
2184 spin_lock_irq(¤t
->sighand
->siglock
);
2185 sig
= dequeue_signal(current
, &these
, &info
);
2186 current
->blocked
= current
->real_blocked
;
2187 siginitset(¤t
->real_blocked
, 0);
2188 recalc_sigpending();
2191 spin_unlock_irq(¤t
->sighand
->siglock
);
2196 if (copy_siginfo_to_user(uinfo
, &info
))
2208 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2210 struct siginfo info
;
2212 info
.si_signo
= sig
;
2214 info
.si_code
= SI_USER
;
2215 info
.si_pid
= task_tgid_vnr(current
);
2216 info
.si_uid
= current
->uid
;
2218 return kill_something_info(sig
, &info
, pid
);
2221 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2224 struct siginfo info
;
2225 struct task_struct
*p
;
2226 unsigned long flags
;
2229 info
.si_signo
= sig
;
2231 info
.si_code
= SI_TKILL
;
2232 info
.si_pid
= task_tgid_vnr(current
);
2233 info
.si_uid
= current
->uid
;
2236 p
= find_task_by_vpid(pid
);
2237 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2238 error
= check_kill_permission(sig
, &info
, p
);
2240 * The null signal is a permissions and process existence
2241 * probe. No signal is actually delivered.
2243 * If lock_task_sighand() fails we pretend the task dies
2244 * after receiving the signal. The window is tiny, and the
2245 * signal is private anyway.
2247 if (!error
&& sig
&& lock_task_sighand(p
, &flags
)) {
2248 error
= specific_send_sig_info(sig
, &info
, p
);
2249 unlock_task_sighand(p
, &flags
);
2258 * sys_tgkill - send signal to one specific thread
2259 * @tgid: the thread group ID of the thread
2260 * @pid: the PID of the thread
2261 * @sig: signal to be sent
2263 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2264 * exists but it's not belonging to the target process anymore. This
2265 * method solves the problem of threads exiting and PIDs getting reused.
2267 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2269 /* This is only valid for single tasks */
2270 if (pid
<= 0 || tgid
<= 0)
2273 return do_tkill(tgid
, pid
, sig
);
2277 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2279 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
2281 /* This is only valid for single tasks */
2285 return do_tkill(0, pid
, sig
);
2288 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
2289 siginfo_t __user
*, uinfo
)
2293 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2296 /* Not even root can pretend to send signals from the kernel.
2297 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2299 if (info
.si_code
>= 0 || info
.si_code
== SI_TKILL
) {
2300 /* We used to allow any < 0 si_code */
2301 WARN_ON_ONCE(info
.si_code
< 0);
2304 info
.si_signo
= sig
;
2306 /* POSIX.1b doesn't mention process groups. */
2307 return kill_proc_info(sig
, &info
, pid
);
2310 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2312 struct task_struct
*t
= current
;
2313 struct k_sigaction
*k
;
2316 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2319 k
= &t
->sighand
->action
[sig
-1];
2321 spin_lock_irq(¤t
->sighand
->siglock
);
2326 sigdelsetmask(&act
->sa
.sa_mask
,
2327 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2331 * "Setting a signal action to SIG_IGN for a signal that is
2332 * pending shall cause the pending signal to be discarded,
2333 * whether or not it is blocked."
2335 * "Setting a signal action to SIG_DFL for a signal that is
2336 * pending and whose default action is to ignore the signal
2337 * (for example, SIGCHLD), shall cause the pending signal to
2338 * be discarded, whether or not it is blocked"
2340 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
2342 sigaddset(&mask
, sig
);
2343 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2345 rm_from_queue_full(&mask
, &t
->pending
);
2347 } while (t
!= current
);
2351 spin_unlock_irq(¤t
->sighand
->siglock
);
2356 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2361 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2362 oss
.ss_size
= current
->sas_ss_size
;
2363 oss
.ss_flags
= sas_ss_flags(sp
);
2371 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2372 || __get_user(ss_sp
, &uss
->ss_sp
)
2373 || __get_user(ss_flags
, &uss
->ss_flags
)
2374 || __get_user(ss_size
, &uss
->ss_size
))
2378 if (on_sig_stack(sp
))
2384 * Note - this code used to test ss_flags incorrectly
2385 * old code may have been written using ss_flags==0
2386 * to mean ss_flags==SS_ONSTACK (as this was the only
2387 * way that worked) - this fix preserves that older
2390 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2393 if (ss_flags
== SS_DISABLE
) {
2398 if (ss_size
< MINSIGSTKSZ
)
2402 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2403 current
->sas_ss_size
= ss_size
;
2409 if (!access_ok(VERIFY_WRITE
, uoss
, sizeof(*uoss
)))
2411 error
= __put_user(oss
.ss_sp
, &uoss
->ss_sp
) |
2412 __put_user(oss
.ss_size
, &uoss
->ss_size
) |
2413 __put_user(oss
.ss_flags
, &uoss
->ss_flags
);
2420 #ifdef __ARCH_WANT_SYS_SIGPENDING
2422 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
2424 return do_sigpending(set
, sizeof(*set
));
2429 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2430 /* Some platforms have their own version with special arguments others
2431 support only sys_rt_sigprocmask. */
2433 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, set
,
2434 old_sigset_t __user
*, oset
)
2437 old_sigset_t old_set
, new_set
;
2441 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2443 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2445 spin_lock_irq(¤t
->sighand
->siglock
);
2446 old_set
= current
->blocked
.sig
[0];
2454 sigaddsetmask(¤t
->blocked
, new_set
);
2457 sigdelsetmask(¤t
->blocked
, new_set
);
2460 current
->blocked
.sig
[0] = new_set
;
2464 recalc_sigpending();
2465 spin_unlock_irq(¤t
->sighand
->siglock
);
2471 old_set
= current
->blocked
.sig
[0];
2474 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2481 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2483 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2484 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
2485 const struct sigaction __user
*, act
,
2486 struct sigaction __user
*, oact
,
2489 struct k_sigaction new_sa
, old_sa
;
2492 /* XXX: Don't preclude handling different sized sigset_t's. */
2493 if (sigsetsize
!= sizeof(sigset_t
))
2497 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2501 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2504 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2510 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2512 #ifdef __ARCH_WANT_SYS_SGETMASK
2515 * For backwards compatibility. Functionality superseded by sigprocmask.
2517 SYSCALL_DEFINE0(sgetmask
)
2520 return current
->blocked
.sig
[0];
2523 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
2527 spin_lock_irq(¤t
->sighand
->siglock
);
2528 old
= current
->blocked
.sig
[0];
2530 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2532 recalc_sigpending();
2533 spin_unlock_irq(¤t
->sighand
->siglock
);
2537 #endif /* __ARCH_WANT_SGETMASK */
2539 #ifdef __ARCH_WANT_SYS_SIGNAL
2541 * For backwards compatibility. Functionality superseded by sigaction.
2543 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
2545 struct k_sigaction new_sa
, old_sa
;
2548 new_sa
.sa
.sa_handler
= handler
;
2549 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2550 sigemptyset(&new_sa
.sa
.sa_mask
);
2552 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2554 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2556 #endif /* __ARCH_WANT_SYS_SIGNAL */
2558 #ifdef __ARCH_WANT_SYS_PAUSE
2560 SYSCALL_DEFINE0(pause
)
2562 current
->state
= TASK_INTERRUPTIBLE
;
2564 return -ERESTARTNOHAND
;
2569 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
2574 /* XXX: Don't preclude handling different sized sigset_t's. */
2575 if (sigsetsize
!= sizeof(sigset_t
))
2578 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2580 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2582 spin_lock_irq(¤t
->sighand
->siglock
);
2583 current
->saved_sigmask
= current
->blocked
;
2584 current
->blocked
= newset
;
2585 recalc_sigpending();
2586 spin_unlock_irq(¤t
->sighand
->siglock
);
2588 current
->state
= TASK_INTERRUPTIBLE
;
2590 set_restore_sigmask();
2591 return -ERESTARTNOHAND
;
2593 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2595 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2600 void __init
signals_init(void)
2602 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);