2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache
*sigqueue_cachep
;
43 static int sig_ignored(struct task_struct
*t
, int sig
)
45 void __user
* handler
;
48 * Tracers always want to know about signals..
50 if (t
->ptrace
& PT_PTRACED
)
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
58 if (sigismember(&t
->blocked
, sig
))
61 /* Is it explicitly or implicitly ignored? */
62 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
63 return handler
== SIG_IGN
||
64 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
76 switch (_NSIG_WORDS
) {
78 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
79 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
82 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
83 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
84 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
85 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
88 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
89 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
92 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct
*t
)
101 if (t
->signal
->group_stop_count
> 0 ||
103 PENDING(&t
->pending
, &t
->blocked
) ||
104 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
105 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
109 * We must never clear the flag in another thread, or in current
110 * when it's possible the current syscall is returning -ERESTART*.
111 * So we don't clear it here, and only callers who know they should do.
117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118 * This is superfluous when called on current, the wakeup is a harmless no-op.
120 void recalc_sigpending_and_wake(struct task_struct
*t
)
122 if (recalc_sigpending_tsk(t
))
123 signal_wake_up(t
, 0);
126 void recalc_sigpending(void)
128 if (!recalc_sigpending_tsk(current
))
129 clear_thread_flag(TIF_SIGPENDING
);
133 /* Given the mask, find the first available signal that should be serviced. */
135 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
137 unsigned long i
, *s
, *m
, x
;
140 s
= pending
->signal
.sig
;
142 switch (_NSIG_WORDS
) {
144 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
145 if ((x
= *s
&~ *m
) != 0) {
146 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
151 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
153 else if ((x
= s
[1] &~ m
[1]) != 0)
160 case 1: if ((x
= *s
&~ *m
) != 0)
168 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
171 struct sigqueue
*q
= NULL
;
172 struct user_struct
*user
;
175 * In order to avoid problems with "switch_user()", we want to make
176 * sure that the compiler doesn't re-load "t->user"
180 atomic_inc(&user
->sigpending
);
181 if (override_rlimit
||
182 atomic_read(&user
->sigpending
) <=
183 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
184 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
185 if (unlikely(q
== NULL
)) {
186 atomic_dec(&user
->sigpending
);
188 INIT_LIST_HEAD(&q
->list
);
190 q
->user
= get_uid(user
);
195 static void __sigqueue_free(struct sigqueue
*q
)
197 if (q
->flags
& SIGQUEUE_PREALLOC
)
199 atomic_dec(&q
->user
->sigpending
);
201 kmem_cache_free(sigqueue_cachep
, q
);
204 void flush_sigqueue(struct sigpending
*queue
)
208 sigemptyset(&queue
->signal
);
209 while (!list_empty(&queue
->list
)) {
210 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
211 list_del_init(&q
->list
);
217 * Flush all pending signals for a task.
219 void flush_signals(struct task_struct
*t
)
223 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
224 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
225 flush_sigqueue(&t
->pending
);
226 flush_sigqueue(&t
->signal
->shared_pending
);
227 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
230 void ignore_signals(struct task_struct
*t
)
234 for (i
= 0; i
< _NSIG
; ++i
)
235 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
241 * Flush all handlers for a task.
245 flush_signal_handlers(struct task_struct
*t
, int force_default
)
248 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
249 for (i
= _NSIG
; i
!= 0 ; i
--) {
250 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
251 ka
->sa
.sa_handler
= SIG_DFL
;
253 sigemptyset(&ka
->sa
.sa_mask
);
259 /* Notify the system that a driver wants to block all signals for this
260 * process, and wants to be notified if any signals at all were to be
261 * sent/acted upon. If the notifier routine returns non-zero, then the
262 * signal will be acted upon after all. If the notifier routine returns 0,
263 * then then signal will be blocked. Only one block per process is
264 * allowed. priv is a pointer to private data that the notifier routine
265 * can use to determine if the signal should be blocked or not. */
268 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
272 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
273 current
->notifier_mask
= mask
;
274 current
->notifier_data
= priv
;
275 current
->notifier
= notifier
;
276 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
279 /* Notify the system that blocking has ended. */
282 unblock_all_signals(void)
286 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
287 current
->notifier
= NULL
;
288 current
->notifier_data
= NULL
;
290 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
293 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
295 struct sigqueue
*q
, *first
= NULL
;
296 int still_pending
= 0;
298 if (unlikely(!sigismember(&list
->signal
, sig
)))
302 * Collect the siginfo appropriate to this signal. Check if
303 * there is another siginfo for the same signal.
305 list_for_each_entry(q
, &list
->list
, list
) {
306 if (q
->info
.si_signo
== sig
) {
315 list_del_init(&first
->list
);
316 copy_siginfo(info
, &first
->info
);
317 __sigqueue_free(first
);
319 sigdelset(&list
->signal
, sig
);
322 /* Ok, it wasn't in the queue. This must be
323 a fast-pathed signal or we must have been
324 out of queue space. So zero out the info.
326 sigdelset(&list
->signal
, sig
);
327 info
->si_signo
= sig
;
336 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
339 int sig
= next_signal(pending
, mask
);
342 if (current
->notifier
) {
343 if (sigismember(current
->notifier_mask
, sig
)) {
344 if (!(current
->notifier
)(current
->notifier_data
)) {
345 clear_thread_flag(TIF_SIGPENDING
);
351 if (!collect_signal(sig
, pending
, info
))
359 * Dequeue a signal and return the element to the caller, which is
360 * expected to free it.
362 * All callers have to hold the siglock.
364 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
366 int signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
368 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
373 * itimers are process shared and we restart periodic
374 * itimers in the signal delivery path to prevent DoS
375 * attacks in the high resolution timer case. This is
376 * compliant with the old way of self restarting
377 * itimers, as the SIGALRM is a legacy signal and only
378 * queued once. Changing the restart behaviour to
379 * restart the timer in the signal dequeue path is
380 * reducing the timer noise on heavy loaded !highres
383 if (unlikely(signr
== SIGALRM
)) {
384 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
386 if (!hrtimer_is_queued(tmr
) &&
387 tsk
->signal
->it_real_incr
.tv64
!= 0) {
388 hrtimer_forward(tmr
, tmr
->base
->get_time(),
389 tsk
->signal
->it_real_incr
);
390 hrtimer_restart(tmr
);
394 if (likely(tsk
== current
))
396 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
398 * Set a marker that we have dequeued a stop signal. Our
399 * caller might release the siglock and then the pending
400 * stop signal it is about to process is no longer in the
401 * pending bitmasks, but must still be cleared by a SIGCONT
402 * (and overruled by a SIGKILL). So those cases clear this
403 * shared flag after we've set it. Note that this flag may
404 * remain set after the signal we return is ignored or
405 * handled. That doesn't matter because its only purpose
406 * is to alert stop-signal processing code when another
407 * processor has come along and cleared the flag.
409 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
))
410 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
413 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
414 info
->si_sys_private
){
416 * Release the siglock to ensure proper locking order
417 * of timer locks outside of siglocks. Note, we leave
418 * irqs disabled here, since the posix-timers code is
419 * about to disable them again anyway.
421 spin_unlock(&tsk
->sighand
->siglock
);
422 do_schedule_next_timer(info
);
423 spin_lock(&tsk
->sighand
->siglock
);
429 * Tell a process that it has a new active signal..
431 * NOTE! we rely on the previous spin_lock to
432 * lock interrupts for us! We can only be called with
433 * "siglock" held, and the local interrupt must
434 * have been disabled when that got acquired!
436 * No need to set need_resched since signal event passing
437 * goes through ->blocked
439 void signal_wake_up(struct task_struct
*t
, int resume
)
443 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
446 * For SIGKILL, we want to wake it up in the stopped/traced case.
447 * We don't check t->state here because there is a race with it
448 * executing another processor and just now entering stopped state.
449 * By using wake_up_state, we ensure the process will wake up and
450 * handle its death signal.
452 mask
= TASK_INTERRUPTIBLE
;
454 mask
|= TASK_STOPPED
| TASK_TRACED
;
455 if (!wake_up_state(t
, mask
))
460 * Remove signals in mask from the pending set and queue.
461 * Returns 1 if any signals were found.
463 * All callers must be holding the siglock.
465 * This version takes a sigset mask and looks at all signals,
466 * not just those in the first mask word.
468 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
470 struct sigqueue
*q
, *n
;
473 sigandsets(&m
, mask
, &s
->signal
);
474 if (sigisemptyset(&m
))
477 signandsets(&s
->signal
, &s
->signal
, mask
);
478 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
479 if (sigismember(mask
, q
->info
.si_signo
)) {
480 list_del_init(&q
->list
);
487 * Remove signals in mask from the pending set and queue.
488 * Returns 1 if any signals were found.
490 * All callers must be holding the siglock.
492 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
494 struct sigqueue
*q
, *n
;
496 if (!sigtestsetmask(&s
->signal
, mask
))
499 sigdelsetmask(&s
->signal
, mask
);
500 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
501 if (q
->info
.si_signo
< SIGRTMIN
&&
502 (mask
& sigmask(q
->info
.si_signo
))) {
503 list_del_init(&q
->list
);
511 * Bad permissions for sending the signal
513 static int check_kill_permission(int sig
, struct siginfo
*info
,
514 struct task_struct
*t
)
517 if (!valid_signal(sig
))
520 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
525 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
526 && ((sig
!= SIGCONT
) ||
527 (process_session(current
) != process_session(t
)))
528 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
529 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
530 && !capable(CAP_KILL
))
533 return security_task_kill(t
, info
, sig
, 0);
537 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
);
540 * Handle magic process-wide effects of stop/continue signals.
541 * Unlike the signal actions, these happen immediately at signal-generation
542 * time regardless of blocking, ignoring, or handling. This does the
543 * actual continuing for SIGCONT, but not the actual stopping for stop
544 * signals. The process stop is done as a signal action for SIG_DFL.
546 static void handle_stop_signal(int sig
, struct task_struct
*p
)
548 struct task_struct
*t
;
550 if (p
->signal
->flags
& SIGNAL_GROUP_EXIT
)
552 * The process is in the middle of dying already.
556 if (sig_kernel_stop(sig
)) {
558 * This is a stop signal. Remove SIGCONT from all queues.
560 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
563 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
566 } else if (sig
== SIGCONT
) {
568 * Remove all stop signals from all queues,
569 * and wake all threads.
571 if (unlikely(p
->signal
->group_stop_count
> 0)) {
573 * There was a group stop in progress. We'll
574 * pretend it finished before we got here. We are
575 * obliged to report it to the parent: if the
576 * SIGSTOP happened "after" this SIGCONT, then it
577 * would have cleared this pending SIGCONT. If it
578 * happened "before" this SIGCONT, then the parent
579 * got the SIGCHLD about the stop finishing before
580 * the continue happened. We do the notification
581 * now, and it's as if the stop had finished and
582 * the SIGCHLD was pending on entry to this kill.
584 p
->signal
->group_stop_count
= 0;
585 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
586 spin_unlock(&p
->sighand
->siglock
);
587 do_notify_parent_cldstop(p
, CLD_STOPPED
);
588 spin_lock(&p
->sighand
->siglock
);
590 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
594 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
597 * If there is a handler for SIGCONT, we must make
598 * sure that no thread returns to user mode before
599 * we post the signal, in case it was the only
600 * thread eligible to run the signal handler--then
601 * it must not do anything between resuming and
602 * running the handler. With the TIF_SIGPENDING
603 * flag set, the thread will pause and acquire the
604 * siglock that we hold now and until we've queued
605 * the pending signal.
607 * Wake up the stopped thread _after_ setting
610 state
= TASK_STOPPED
;
611 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
612 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
613 state
|= TASK_INTERRUPTIBLE
;
615 wake_up_state(t
, state
);
620 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
622 * We were in fact stopped, and are now continued.
623 * Notify the parent with CLD_CONTINUED.
625 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
626 p
->signal
->group_exit_code
= 0;
627 spin_unlock(&p
->sighand
->siglock
);
628 do_notify_parent_cldstop(p
, CLD_CONTINUED
);
629 spin_lock(&p
->sighand
->siglock
);
632 * We are not stopped, but there could be a stop
633 * signal in the middle of being processed after
634 * being removed from the queue. Clear that too.
636 p
->signal
->flags
= 0;
638 } else if (sig
== SIGKILL
) {
640 * Make sure that any pending stop signal already dequeued
641 * is undone by the wakeup for SIGKILL.
643 p
->signal
->flags
= 0;
647 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
648 struct sigpending
*signals
)
650 struct sigqueue
* q
= NULL
;
654 * Deliver the signal to listening signalfds. This must be called
655 * with the sighand lock held.
657 signalfd_notify(t
, sig
);
660 * fast-pathed signals for kernel-internal things like SIGSTOP
663 if (info
== SEND_SIG_FORCED
)
666 /* Real-time signals must be queued if sent by sigqueue, or
667 some other real-time mechanism. It is implementation
668 defined whether kill() does so. We attempt to do so, on
669 the principle of least surprise, but since kill is not
670 allowed to fail with EAGAIN when low on memory we just
671 make sure at least one signal gets delivered and don't
672 pass on the info struct. */
674 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
675 (is_si_special(info
) ||
676 info
->si_code
>= 0)));
678 list_add_tail(&q
->list
, &signals
->list
);
679 switch ((unsigned long) info
) {
680 case (unsigned long) SEND_SIG_NOINFO
:
681 q
->info
.si_signo
= sig
;
682 q
->info
.si_errno
= 0;
683 q
->info
.si_code
= SI_USER
;
684 q
->info
.si_pid
= current
->pid
;
685 q
->info
.si_uid
= current
->uid
;
687 case (unsigned long) SEND_SIG_PRIV
:
688 q
->info
.si_signo
= sig
;
689 q
->info
.si_errno
= 0;
690 q
->info
.si_code
= SI_KERNEL
;
695 copy_siginfo(&q
->info
, info
);
698 } else if (!is_si_special(info
)) {
699 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
701 * Queue overflow, abort. We may abort if the signal was rt
702 * and sent by user using something other than kill().
708 sigaddset(&signals
->signal
, sig
);
712 #define LEGACY_QUEUE(sigptr, sig) \
713 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
717 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
721 BUG_ON(!irqs_disabled());
722 assert_spin_locked(&t
->sighand
->siglock
);
724 /* Short-circuit ignored signals. */
725 if (sig_ignored(t
, sig
))
728 /* Support queueing exactly one non-rt signal, so that we
729 can get more detailed information about the cause of
731 if (LEGACY_QUEUE(&t
->pending
, sig
))
734 ret
= send_signal(sig
, info
, t
, &t
->pending
);
735 if (!ret
&& !sigismember(&t
->blocked
, sig
))
736 signal_wake_up(t
, sig
== SIGKILL
);
742 * Force a signal that the process can't ignore: if necessary
743 * we unblock the signal and change any SIG_IGN to SIG_DFL.
745 * Note: If we unblock the signal, we always reset it to SIG_DFL,
746 * since we do not want to have a signal handler that was blocked
747 * be invoked when user space had explicitly blocked it.
749 * We don't want to have recursive SIGSEGV's etc, for example.
752 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
754 unsigned long int flags
;
755 int ret
, blocked
, ignored
;
756 struct k_sigaction
*action
;
758 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
759 action
= &t
->sighand
->action
[sig
-1];
760 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
761 blocked
= sigismember(&t
->blocked
, sig
);
762 if (blocked
|| ignored
) {
763 action
->sa
.sa_handler
= SIG_DFL
;
765 sigdelset(&t
->blocked
, sig
);
766 recalc_sigpending_and_wake(t
);
769 ret
= specific_send_sig_info(sig
, info
, t
);
770 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
776 force_sig_specific(int sig
, struct task_struct
*t
)
778 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
782 * Test if P wants to take SIG. After we've checked all threads with this,
783 * it's equivalent to finding no threads not blocking SIG. Any threads not
784 * blocking SIG were ruled out because they are not running and already
785 * have pending signals. Such threads will dequeue from the shared queue
786 * as soon as they're available, so putting the signal on the shared queue
787 * will be equivalent to sending it to one such thread.
789 static inline int wants_signal(int sig
, struct task_struct
*p
)
791 if (sigismember(&p
->blocked
, sig
))
793 if (p
->flags
& PF_EXITING
)
797 if (p
->state
& (TASK_STOPPED
| TASK_TRACED
))
799 return task_curr(p
) || !signal_pending(p
);
803 __group_complete_signal(int sig
, struct task_struct
*p
)
805 struct task_struct
*t
;
808 * Now find a thread we can wake up to take the signal off the queue.
810 * If the main thread wants the signal, it gets first crack.
811 * Probably the least surprising to the average bear.
813 if (wants_signal(sig
, p
))
815 else if (thread_group_empty(p
))
817 * There is just one thread and it does not need to be woken.
818 * It will dequeue unblocked signals before it runs again.
823 * Otherwise try to find a suitable thread.
825 t
= p
->signal
->curr_target
;
827 /* restart balancing at this thread */
828 t
= p
->signal
->curr_target
= p
;
830 while (!wants_signal(sig
, t
)) {
832 if (t
== p
->signal
->curr_target
)
834 * No thread needs to be woken.
835 * Any eligible threads will see
836 * the signal in the queue soon.
840 p
->signal
->curr_target
= t
;
844 * Found a killable thread. If the signal will be fatal,
845 * then start taking the whole group down immediately.
847 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
848 !sigismember(&t
->real_blocked
, sig
) &&
849 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
851 * This signal will be fatal to the whole group.
853 if (!sig_kernel_coredump(sig
)) {
855 * Start a group exit and wake everybody up.
856 * This way we don't have other threads
857 * running and doing things after a slower
858 * thread has the fatal signal pending.
860 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
861 p
->signal
->group_exit_code
= sig
;
862 p
->signal
->group_stop_count
= 0;
865 sigaddset(&t
->pending
.signal
, SIGKILL
);
866 signal_wake_up(t
, 1);
873 * There will be a core dump. We make all threads other
874 * than the chosen one go into a group stop so that nothing
875 * happens until it gets scheduled, takes the signal off
876 * the shared queue, and does the core dump. This is a
877 * little more complicated than strictly necessary, but it
878 * keeps the signal state that winds up in the core dump
879 * unchanged from the death state, e.g. which thread had
880 * the core-dump signal unblocked.
882 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
883 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
884 p
->signal
->group_stop_count
= 0;
885 p
->signal
->group_exit_task
= t
;
888 p
->signal
->group_stop_count
++;
889 signal_wake_up(t
, 0);
892 wake_up_process(p
->signal
->group_exit_task
);
897 * The signal is already in the shared-pending queue.
898 * Tell the chosen thread to wake up and dequeue it.
900 signal_wake_up(t
, sig
== SIGKILL
);
905 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
909 assert_spin_locked(&p
->sighand
->siglock
);
910 handle_stop_signal(sig
, p
);
912 /* Short-circuit ignored signals. */
913 if (sig_ignored(p
, sig
))
916 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
917 /* This is a non-RT signal and we already have one queued. */
921 * Put this signal on the shared-pending queue, or fail with EAGAIN.
922 * We always use the shared queue for process-wide signals,
923 * to avoid several races.
925 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
929 __group_complete_signal(sig
, p
);
934 * Nuke all other threads in the group.
936 void zap_other_threads(struct task_struct
*p
)
938 struct task_struct
*t
;
940 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
941 p
->signal
->group_stop_count
= 0;
943 if (thread_group_empty(p
))
946 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
948 * Don't bother with already dead threads
953 /* SIGKILL will be handled before any pending SIGSTOP */
954 sigaddset(&t
->pending
.signal
, SIGKILL
);
955 signal_wake_up(t
, 1);
960 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
962 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
964 struct sighand_struct
*sighand
;
967 sighand
= rcu_dereference(tsk
->sighand
);
968 if (unlikely(sighand
== NULL
))
971 spin_lock_irqsave(&sighand
->siglock
, *flags
);
972 if (likely(sighand
== tsk
->sighand
))
974 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
980 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
985 ret
= check_kill_permission(sig
, info
, p
);
989 if (lock_task_sighand(p
, &flags
)) {
990 ret
= __group_send_sig_info(sig
, info
, p
);
991 unlock_task_sighand(p
, &flags
);
999 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1000 * control characters do (^C, ^Z etc)
1003 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1005 struct task_struct
*p
= NULL
;
1006 int retval
, success
;
1010 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1011 int err
= group_send_sig_info(sig
, info
, p
);
1014 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1015 return success
? 0 : retval
;
1018 int kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1022 read_lock(&tasklist_lock
);
1023 retval
= __kill_pgrp_info(sig
, info
, pgrp
);
1024 read_unlock(&tasklist_lock
);
1029 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1032 struct task_struct
*p
;
1035 if (unlikely(sig_needs_tasklist(sig
)))
1036 read_lock(&tasklist_lock
);
1038 p
= pid_task(pid
, PIDTYPE_PID
);
1041 error
= group_send_sig_info(sig
, info
, p
);
1043 if (unlikely(sig_needs_tasklist(sig
)))
1044 read_unlock(&tasklist_lock
);
1050 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1054 error
= kill_pid_info(sig
, info
, find_pid(pid
));
1059 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1060 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1061 uid_t uid
, uid_t euid
, u32 secid
)
1064 struct task_struct
*p
;
1066 if (!valid_signal(sig
))
1069 read_lock(&tasklist_lock
);
1070 p
= pid_task(pid
, PIDTYPE_PID
);
1075 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1076 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1077 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1081 ret
= security_task_kill(p
, info
, sig
, secid
);
1084 if (sig
&& p
->sighand
) {
1085 unsigned long flags
;
1086 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1087 ret
= __group_send_sig_info(sig
, info
, p
);
1088 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1091 read_unlock(&tasklist_lock
);
1094 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1097 * kill_something_info() interprets pid in interesting ways just like kill(2).
1099 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1100 * is probably wrong. Should make it like BSD or SYSV.
1103 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1108 ret
= kill_pgrp_info(sig
, info
, task_pgrp(current
));
1109 } else if (pid
== -1) {
1110 int retval
= 0, count
= 0;
1111 struct task_struct
* p
;
1113 read_lock(&tasklist_lock
);
1114 for_each_process(p
) {
1115 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1116 int err
= group_send_sig_info(sig
, info
, p
);
1122 read_unlock(&tasklist_lock
);
1123 ret
= count
? retval
: -ESRCH
;
1124 } else if (pid
< 0) {
1125 ret
= kill_pgrp_info(sig
, info
, find_pid(-pid
));
1127 ret
= kill_pid_info(sig
, info
, find_pid(pid
));
1134 * These are for backward compatibility with the rest of the kernel source.
1138 * These two are the most common entry points. They send a signal
1139 * just to the specific thread.
1142 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1145 unsigned long flags
;
1148 * Make sure legacy kernel users don't send in bad values
1149 * (normal paths check this in check_kill_permission).
1151 if (!valid_signal(sig
))
1155 * We need the tasklist lock even for the specific
1156 * thread case (when we don't need to follow the group
1157 * lists) in order to avoid races with "p->sighand"
1158 * going away or changing from under us.
1160 read_lock(&tasklist_lock
);
1161 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1162 ret
= specific_send_sig_info(sig
, info
, p
);
1163 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1164 read_unlock(&tasklist_lock
);
1168 #define __si_special(priv) \
1169 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1172 send_sig(int sig
, struct task_struct
*p
, int priv
)
1174 return send_sig_info(sig
, __si_special(priv
), p
);
1178 * This is the entry point for "process-wide" signals.
1179 * They will go to an appropriate thread in the thread group.
1182 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1185 read_lock(&tasklist_lock
);
1186 ret
= group_send_sig_info(sig
, info
, p
);
1187 read_unlock(&tasklist_lock
);
1192 force_sig(int sig
, struct task_struct
*p
)
1194 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1198 * When things go south during signal handling, we
1199 * will force a SIGSEGV. And if the signal that caused
1200 * the problem was already a SIGSEGV, we'll want to
1201 * make sure we don't even try to deliver the signal..
1204 force_sigsegv(int sig
, struct task_struct
*p
)
1206 if (sig
== SIGSEGV
) {
1207 unsigned long flags
;
1208 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1209 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1210 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1212 force_sig(SIGSEGV
, p
);
1216 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1218 return kill_pgrp_info(sig
, __si_special(priv
), pid
);
1220 EXPORT_SYMBOL(kill_pgrp
);
1222 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1224 return kill_pid_info(sig
, __si_special(priv
), pid
);
1226 EXPORT_SYMBOL(kill_pid
);
1229 kill_proc(pid_t pid
, int sig
, int priv
)
1231 return kill_proc_info(sig
, __si_special(priv
), pid
);
1235 * These functions support sending signals using preallocated sigqueue
1236 * structures. This is needed "because realtime applications cannot
1237 * afford to lose notifications of asynchronous events, like timer
1238 * expirations or I/O completions". In the case of Posix Timers
1239 * we allocate the sigqueue structure from the timer_create. If this
1240 * allocation fails we are able to report the failure to the application
1241 * with an EAGAIN error.
1244 struct sigqueue
*sigqueue_alloc(void)
1248 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1249 q
->flags
|= SIGQUEUE_PREALLOC
;
1253 void sigqueue_free(struct sigqueue
*q
)
1255 unsigned long flags
;
1256 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1258 * If the signal is still pending remove it from the
1261 if (unlikely(!list_empty(&q
->list
))) {
1262 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1263 read_lock(&tasklist_lock
);
1264 spin_lock_irqsave(lock
, flags
);
1265 if (!list_empty(&q
->list
))
1266 list_del_init(&q
->list
);
1267 spin_unlock_irqrestore(lock
, flags
);
1268 read_unlock(&tasklist_lock
);
1270 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1274 int send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1276 unsigned long flags
;
1279 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1282 * The rcu based delayed sighand destroy makes it possible to
1283 * run this without tasklist lock held. The task struct itself
1284 * cannot go away as create_timer did get_task_struct().
1286 * We return -1, when the task is marked exiting, so
1287 * posix_timer_event can redirect it to the group leader
1291 if (!likely(lock_task_sighand(p
, &flags
))) {
1296 if (unlikely(!list_empty(&q
->list
))) {
1298 * If an SI_TIMER entry is already queue just increment
1299 * the overrun count.
1301 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1302 q
->info
.si_overrun
++;
1305 /* Short-circuit ignored signals. */
1306 if (sig_ignored(p
, sig
)) {
1311 * Deliver the signal to listening signalfds. This must be called
1312 * with the sighand lock held.
1314 signalfd_notify(p
, sig
);
1316 list_add_tail(&q
->list
, &p
->pending
.list
);
1317 sigaddset(&p
->pending
.signal
, sig
);
1318 if (!sigismember(&p
->blocked
, sig
))
1319 signal_wake_up(p
, sig
== SIGKILL
);
1322 unlock_task_sighand(p
, &flags
);
1330 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1332 unsigned long flags
;
1335 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1337 read_lock(&tasklist_lock
);
1338 /* Since it_lock is held, p->sighand cannot be NULL. */
1339 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1340 handle_stop_signal(sig
, p
);
1342 /* Short-circuit ignored signals. */
1343 if (sig_ignored(p
, sig
)) {
1348 if (unlikely(!list_empty(&q
->list
))) {
1350 * If an SI_TIMER entry is already queue just increment
1351 * the overrun count. Other uses should not try to
1352 * send the signal multiple times.
1354 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1355 q
->info
.si_overrun
++;
1359 * Deliver the signal to listening signalfds. This must be called
1360 * with the sighand lock held.
1362 signalfd_notify(p
, sig
);
1365 * Put this signal on the shared-pending queue.
1366 * We always use the shared queue for process-wide signals,
1367 * to avoid several races.
1369 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1370 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1372 __group_complete_signal(sig
, p
);
1374 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1375 read_unlock(&tasklist_lock
);
1380 * Wake up any threads in the parent blocked in wait* syscalls.
1382 static inline void __wake_up_parent(struct task_struct
*p
,
1383 struct task_struct
*parent
)
1385 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1389 * Let a parent know about the death of a child.
1390 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1393 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1395 struct siginfo info
;
1396 unsigned long flags
;
1397 struct sighand_struct
*psig
;
1401 /* do_notify_parent_cldstop should have been called instead. */
1402 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1404 BUG_ON(!tsk
->ptrace
&&
1405 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1407 info
.si_signo
= sig
;
1409 info
.si_pid
= tsk
->pid
;
1410 info
.si_uid
= tsk
->uid
;
1412 /* FIXME: find out whether or not this is supposed to be c*time. */
1413 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1414 tsk
->signal
->utime
));
1415 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1416 tsk
->signal
->stime
));
1418 info
.si_status
= tsk
->exit_code
& 0x7f;
1419 if (tsk
->exit_code
& 0x80)
1420 info
.si_code
= CLD_DUMPED
;
1421 else if (tsk
->exit_code
& 0x7f)
1422 info
.si_code
= CLD_KILLED
;
1424 info
.si_code
= CLD_EXITED
;
1425 info
.si_status
= tsk
->exit_code
>> 8;
1428 psig
= tsk
->parent
->sighand
;
1429 spin_lock_irqsave(&psig
->siglock
, flags
);
1430 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1431 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1432 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1434 * We are exiting and our parent doesn't care. POSIX.1
1435 * defines special semantics for setting SIGCHLD to SIG_IGN
1436 * or setting the SA_NOCLDWAIT flag: we should be reaped
1437 * automatically and not left for our parent's wait4 call.
1438 * Rather than having the parent do it as a magic kind of
1439 * signal handler, we just set this to tell do_exit that we
1440 * can be cleaned up without becoming a zombie. Note that
1441 * we still call __wake_up_parent in this case, because a
1442 * blocked sys_wait4 might now return -ECHILD.
1444 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1445 * is implementation-defined: we do (if you don't want
1446 * it, just use SIG_IGN instead).
1448 tsk
->exit_signal
= -1;
1449 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1452 if (valid_signal(sig
) && sig
> 0)
1453 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1454 __wake_up_parent(tsk
, tsk
->parent
);
1455 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1458 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1460 struct siginfo info
;
1461 unsigned long flags
;
1462 struct task_struct
*parent
;
1463 struct sighand_struct
*sighand
;
1465 if (tsk
->ptrace
& PT_PTRACED
)
1466 parent
= tsk
->parent
;
1468 tsk
= tsk
->group_leader
;
1469 parent
= tsk
->real_parent
;
1472 info
.si_signo
= SIGCHLD
;
1474 info
.si_pid
= tsk
->pid
;
1475 info
.si_uid
= tsk
->uid
;
1477 /* FIXME: find out whether or not this is supposed to be c*time. */
1478 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1479 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1484 info
.si_status
= SIGCONT
;
1487 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1490 info
.si_status
= tsk
->exit_code
& 0x7f;
1496 sighand
= parent
->sighand
;
1497 spin_lock_irqsave(&sighand
->siglock
, flags
);
1498 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1499 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1500 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1502 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1504 __wake_up_parent(tsk
, parent
);
1505 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1508 static inline int may_ptrace_stop(void)
1510 if (!likely(current
->ptrace
& PT_PTRACED
))
1513 if (unlikely(current
->parent
== current
->real_parent
&&
1514 (current
->ptrace
& PT_ATTACHED
)))
1517 if (unlikely(current
->signal
== current
->parent
->signal
) &&
1518 unlikely(current
->signal
->flags
& SIGNAL_GROUP_EXIT
))
1522 * Are we in the middle of do_coredump?
1523 * If so and our tracer is also part of the coredump stopping
1524 * is a deadlock situation, and pointless because our tracer
1525 * is dead so don't allow us to stop.
1526 * If SIGKILL was already sent before the caller unlocked
1527 * ->siglock we must see ->core_waiters != 0. Otherwise it
1528 * is safe to enter schedule().
1530 if (unlikely(current
->mm
->core_waiters
) &&
1531 unlikely(current
->mm
== current
->parent
->mm
))
1538 * This must be called with current->sighand->siglock held.
1540 * This should be the path for all ptrace stops.
1541 * We always set current->last_siginfo while stopped here.
1542 * That makes it a way to test a stopped process for
1543 * being ptrace-stopped vs being job-control-stopped.
1545 * If we actually decide not to stop at all because the tracer is gone,
1546 * we leave nostop_code in current->exit_code.
1548 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1551 * If there is a group stop in progress,
1552 * we must participate in the bookkeeping.
1554 if (current
->signal
->group_stop_count
> 0)
1555 --current
->signal
->group_stop_count
;
1557 current
->last_siginfo
= info
;
1558 current
->exit_code
= exit_code
;
1560 /* Let the debugger run. */
1561 set_current_state(TASK_TRACED
);
1562 spin_unlock_irq(¤t
->sighand
->siglock
);
1564 read_lock(&tasklist_lock
);
1565 if (may_ptrace_stop()) {
1566 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1567 read_unlock(&tasklist_lock
);
1571 * By the time we got the lock, our tracer went away.
1574 read_unlock(&tasklist_lock
);
1575 set_current_state(TASK_RUNNING
);
1576 current
->exit_code
= nostop_code
;
1580 * We are back. Now reacquire the siglock before touching
1581 * last_siginfo, so that we are sure to have synchronized with
1582 * any signal-sending on another CPU that wants to examine it.
1584 spin_lock_irq(¤t
->sighand
->siglock
);
1585 current
->last_siginfo
= NULL
;
1588 * Queued signals ignored us while we were stopped for tracing.
1589 * So check for any that we should take before resuming user mode.
1590 * This sets TIF_SIGPENDING, but never clears it.
1592 recalc_sigpending_tsk(current
);
1595 void ptrace_notify(int exit_code
)
1599 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1601 memset(&info
, 0, sizeof info
);
1602 info
.si_signo
= SIGTRAP
;
1603 info
.si_code
= exit_code
;
1604 info
.si_pid
= current
->pid
;
1605 info
.si_uid
= current
->uid
;
1607 /* Let the debugger run. */
1608 spin_lock_irq(¤t
->sighand
->siglock
);
1609 ptrace_stop(exit_code
, 0, &info
);
1610 spin_unlock_irq(¤t
->sighand
->siglock
);
1614 finish_stop(int stop_count
)
1617 * If there are no other threads in the group, or if there is
1618 * a group stop in progress and we are the last to stop,
1619 * report to the parent. When ptraced, every thread reports itself.
1621 if (stop_count
== 0 || (current
->ptrace
& PT_PTRACED
)) {
1622 read_lock(&tasklist_lock
);
1623 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1624 read_unlock(&tasklist_lock
);
1629 } while (try_to_freeze());
1631 * Now we don't run again until continued.
1633 current
->exit_code
= 0;
1637 * This performs the stopping for SIGSTOP and other stop signals.
1638 * We have to stop all threads in the thread group.
1639 * Returns nonzero if we've actually stopped and released the siglock.
1640 * Returns zero if we didn't stop and still hold the siglock.
1642 static int do_signal_stop(int signr
)
1644 struct signal_struct
*sig
= current
->signal
;
1647 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1650 if (sig
->group_stop_count
> 0) {
1652 * There is a group stop in progress. We don't need to
1653 * start another one.
1655 stop_count
= --sig
->group_stop_count
;
1658 * There is no group stop already in progress.
1659 * We must initiate one now.
1661 struct task_struct
*t
;
1663 sig
->group_exit_code
= signr
;
1666 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1668 * Setting state to TASK_STOPPED for a group
1669 * stop is always done with the siglock held,
1670 * so this check has no races.
1672 if (!t
->exit_state
&&
1673 !(t
->state
& (TASK_STOPPED
|TASK_TRACED
))) {
1675 signal_wake_up(t
, 0);
1677 sig
->group_stop_count
= stop_count
;
1680 if (stop_count
== 0)
1681 sig
->flags
= SIGNAL_STOP_STOPPED
;
1682 current
->exit_code
= sig
->group_exit_code
;
1683 __set_current_state(TASK_STOPPED
);
1685 spin_unlock_irq(¤t
->sighand
->siglock
);
1686 finish_stop(stop_count
);
1691 * Do appropriate magic when group_stop_count > 0.
1692 * We return nonzero if we stopped, after releasing the siglock.
1693 * We return zero if we still hold the siglock and should look
1694 * for another signal without checking group_stop_count again.
1696 static int handle_group_stop(void)
1700 if (current
->signal
->group_exit_task
== current
) {
1702 * Group stop is so we can do a core dump,
1703 * We are the initiating thread, so get on with it.
1705 current
->signal
->group_exit_task
= NULL
;
1709 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1711 * Group stop is so another thread can do a core dump,
1712 * or else we are racing against a death signal.
1713 * Just punt the stop so we can get the next signal.
1718 * There is a group stop in progress. We stop
1719 * without any associated signal being in our queue.
1721 stop_count
= --current
->signal
->group_stop_count
;
1722 if (stop_count
== 0)
1723 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1724 current
->exit_code
= current
->signal
->group_exit_code
;
1725 set_current_state(TASK_STOPPED
);
1726 spin_unlock_irq(¤t
->sighand
->siglock
);
1727 finish_stop(stop_count
);
1731 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1732 struct pt_regs
*regs
, void *cookie
)
1734 sigset_t
*mask
= ¤t
->blocked
;
1740 spin_lock_irq(¤t
->sighand
->siglock
);
1742 struct k_sigaction
*ka
;
1744 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1745 handle_group_stop())
1748 signr
= dequeue_signal(current
, mask
, info
);
1751 break; /* will return 0 */
1753 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1754 ptrace_signal_deliver(regs
, cookie
);
1756 /* Let the debugger run. */
1757 ptrace_stop(signr
, signr
, info
);
1759 /* We're back. Did the debugger cancel the sig? */
1760 signr
= current
->exit_code
;
1764 current
->exit_code
= 0;
1766 /* Update the siginfo structure if the signal has
1767 changed. If the debugger wanted something
1768 specific in the siginfo structure then it should
1769 have updated *info via PTRACE_SETSIGINFO. */
1770 if (signr
!= info
->si_signo
) {
1771 info
->si_signo
= signr
;
1773 info
->si_code
= SI_USER
;
1774 info
->si_pid
= current
->parent
->pid
;
1775 info
->si_uid
= current
->parent
->uid
;
1778 /* If the (new) signal is now blocked, requeue it. */
1779 if (sigismember(¤t
->blocked
, signr
)) {
1780 specific_send_sig_info(signr
, info
, current
);
1785 ka
= ¤t
->sighand
->action
[signr
-1];
1786 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1788 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1789 /* Run the handler. */
1792 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1793 ka
->sa
.sa_handler
= SIG_DFL
;
1795 break; /* will return non-zero "signr" value */
1799 * Now we are doing the default action for this signal.
1801 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1805 * Init of a pid space gets no signals it doesn't want from
1806 * within that pid space. It can of course get signals from
1807 * its parent pid space.
1809 if (current
== child_reaper(current
))
1812 if (sig_kernel_stop(signr
)) {
1814 * The default action is to stop all threads in
1815 * the thread group. The job control signals
1816 * do nothing in an orphaned pgrp, but SIGSTOP
1817 * always works. Note that siglock needs to be
1818 * dropped during the call to is_orphaned_pgrp()
1819 * because of lock ordering with tasklist_lock.
1820 * This allows an intervening SIGCONT to be posted.
1821 * We need to check for that and bail out if necessary.
1823 if (signr
!= SIGSTOP
) {
1824 spin_unlock_irq(¤t
->sighand
->siglock
);
1826 /* signals can be posted during this window */
1828 if (is_current_pgrp_orphaned())
1831 spin_lock_irq(¤t
->sighand
->siglock
);
1834 if (likely(do_signal_stop(signr
))) {
1835 /* It released the siglock. */
1840 * We didn't actually stop, due to a race
1841 * with SIGCONT or something like that.
1846 spin_unlock_irq(¤t
->sighand
->siglock
);
1849 * Anything else is fatal, maybe with a core dump.
1851 current
->flags
|= PF_SIGNALED
;
1852 if (sig_kernel_coredump(signr
)) {
1854 * If it was able to dump core, this kills all
1855 * other threads in the group and synchronizes with
1856 * their demise. If we lost the race with another
1857 * thread getting here, it set group_exit_code
1858 * first and our do_group_exit call below will use
1859 * that value and ignore the one we pass it.
1861 do_coredump((long)signr
, signr
, regs
);
1865 * Death signals, no core dump.
1867 do_group_exit(signr
);
1870 spin_unlock_irq(¤t
->sighand
->siglock
);
1874 EXPORT_SYMBOL(recalc_sigpending
);
1875 EXPORT_SYMBOL_GPL(dequeue_signal
);
1876 EXPORT_SYMBOL(flush_signals
);
1877 EXPORT_SYMBOL(force_sig
);
1878 EXPORT_SYMBOL(kill_proc
);
1879 EXPORT_SYMBOL(ptrace_notify
);
1880 EXPORT_SYMBOL(send_sig
);
1881 EXPORT_SYMBOL(send_sig_info
);
1882 EXPORT_SYMBOL(sigprocmask
);
1883 EXPORT_SYMBOL(block_all_signals
);
1884 EXPORT_SYMBOL(unblock_all_signals
);
1888 * System call entry points.
1891 asmlinkage
long sys_restart_syscall(void)
1893 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1894 return restart
->fn(restart
);
1897 long do_no_restart_syscall(struct restart_block
*param
)
1903 * We don't need to get the kernel lock - this is all local to this
1904 * particular thread.. (and that's good, because this is _heavily_
1905 * used by various programs)
1909 * This is also useful for kernel threads that want to temporarily
1910 * (or permanently) block certain signals.
1912 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1913 * interface happily blocks "unblockable" signals like SIGKILL
1916 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1920 spin_lock_irq(¤t
->sighand
->siglock
);
1922 *oldset
= current
->blocked
;
1927 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1930 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1933 current
->blocked
= *set
;
1938 recalc_sigpending();
1939 spin_unlock_irq(¤t
->sighand
->siglock
);
1945 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1947 int error
= -EINVAL
;
1948 sigset_t old_set
, new_set
;
1950 /* XXX: Don't preclude handling different sized sigset_t's. */
1951 if (sigsetsize
!= sizeof(sigset_t
))
1956 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1958 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1960 error
= sigprocmask(how
, &new_set
, &old_set
);
1966 spin_lock_irq(¤t
->sighand
->siglock
);
1967 old_set
= current
->blocked
;
1968 spin_unlock_irq(¤t
->sighand
->siglock
);
1972 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1980 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
1982 long error
= -EINVAL
;
1985 if (sigsetsize
> sizeof(sigset_t
))
1988 spin_lock_irq(¤t
->sighand
->siglock
);
1989 sigorsets(&pending
, ¤t
->pending
.signal
,
1990 ¤t
->signal
->shared_pending
.signal
);
1991 spin_unlock_irq(¤t
->sighand
->siglock
);
1993 /* Outside the lock because only this thread touches it. */
1994 sigandsets(&pending
, ¤t
->blocked
, &pending
);
1997 if (!copy_to_user(set
, &pending
, sigsetsize
))
2005 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2007 return do_sigpending(set
, sigsetsize
);
2010 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2012 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2016 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2018 if (from
->si_code
< 0)
2019 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2022 * If you change siginfo_t structure, please be sure
2023 * this code is fixed accordingly.
2024 * Please remember to update the signalfd_copyinfo() function
2025 * inside fs/signalfd.c too, in case siginfo_t changes.
2026 * It should never copy any pad contained in the structure
2027 * to avoid security leaks, but must copy the generic
2028 * 3 ints plus the relevant union member.
2030 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2031 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2032 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2033 switch (from
->si_code
& __SI_MASK
) {
2035 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2036 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2039 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2040 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2041 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2044 err
|= __put_user(from
->si_band
, &to
->si_band
);
2045 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2048 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2049 #ifdef __ARCH_SI_TRAPNO
2050 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2054 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2055 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2056 err
|= __put_user(from
->si_status
, &to
->si_status
);
2057 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2058 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2060 case __SI_RT
: /* This is not generated by the kernel as of now. */
2061 case __SI_MESGQ
: /* But this is */
2062 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2063 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2064 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2066 default: /* this is just in case for now ... */
2067 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2068 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2077 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2078 siginfo_t __user
*uinfo
,
2079 const struct timespec __user
*uts
,
2088 /* XXX: Don't preclude handling different sized sigset_t's. */
2089 if (sigsetsize
!= sizeof(sigset_t
))
2092 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2096 * Invert the set of allowed signals to get those we
2099 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2103 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2105 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2110 spin_lock_irq(¤t
->sighand
->siglock
);
2111 sig
= dequeue_signal(current
, &these
, &info
);
2113 timeout
= MAX_SCHEDULE_TIMEOUT
;
2115 timeout
= (timespec_to_jiffies(&ts
)
2116 + (ts
.tv_sec
|| ts
.tv_nsec
));
2119 /* None ready -- temporarily unblock those we're
2120 * interested while we are sleeping in so that we'll
2121 * be awakened when they arrive. */
2122 current
->real_blocked
= current
->blocked
;
2123 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2124 recalc_sigpending();
2125 spin_unlock_irq(¤t
->sighand
->siglock
);
2127 timeout
= schedule_timeout_interruptible(timeout
);
2129 spin_lock_irq(¤t
->sighand
->siglock
);
2130 sig
= dequeue_signal(current
, &these
, &info
);
2131 current
->blocked
= current
->real_blocked
;
2132 siginitset(¤t
->real_blocked
, 0);
2133 recalc_sigpending();
2136 spin_unlock_irq(¤t
->sighand
->siglock
);
2141 if (copy_siginfo_to_user(uinfo
, &info
))
2154 sys_kill(int pid
, int sig
)
2156 struct siginfo info
;
2158 info
.si_signo
= sig
;
2160 info
.si_code
= SI_USER
;
2161 info
.si_pid
= current
->tgid
;
2162 info
.si_uid
= current
->uid
;
2164 return kill_something_info(sig
, &info
, pid
);
2167 static int do_tkill(int tgid
, int pid
, int sig
)
2170 struct siginfo info
;
2171 struct task_struct
*p
;
2174 info
.si_signo
= sig
;
2176 info
.si_code
= SI_TKILL
;
2177 info
.si_pid
= current
->tgid
;
2178 info
.si_uid
= current
->uid
;
2180 read_lock(&tasklist_lock
);
2181 p
= find_task_by_pid(pid
);
2182 if (p
&& (tgid
<= 0 || p
->tgid
== tgid
)) {
2183 error
= check_kill_permission(sig
, &info
, p
);
2185 * The null signal is a permissions and process existence
2186 * probe. No signal is actually delivered.
2188 if (!error
&& sig
&& p
->sighand
) {
2189 spin_lock_irq(&p
->sighand
->siglock
);
2190 handle_stop_signal(sig
, p
);
2191 error
= specific_send_sig_info(sig
, &info
, p
);
2192 spin_unlock_irq(&p
->sighand
->siglock
);
2195 read_unlock(&tasklist_lock
);
2201 * sys_tgkill - send signal to one specific thread
2202 * @tgid: the thread group ID of the thread
2203 * @pid: the PID of the thread
2204 * @sig: signal to be sent
2206 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2207 * exists but it's not belonging to the target process anymore. This
2208 * method solves the problem of threads exiting and PIDs getting reused.
2210 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2212 /* This is only valid for single tasks */
2213 if (pid
<= 0 || tgid
<= 0)
2216 return do_tkill(tgid
, pid
, sig
);
2220 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2223 sys_tkill(int pid
, int sig
)
2225 /* This is only valid for single tasks */
2229 return do_tkill(0, pid
, sig
);
2233 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2237 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2240 /* Not even root can pretend to send signals from the kernel.
2241 Nor can they impersonate a kill(), which adds source info. */
2242 if (info
.si_code
>= 0)
2244 info
.si_signo
= sig
;
2246 /* POSIX.1b doesn't mention process groups. */
2247 return kill_proc_info(sig
, &info
, pid
);
2250 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2252 struct k_sigaction
*k
;
2255 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2258 k
= ¤t
->sighand
->action
[sig
-1];
2260 spin_lock_irq(¤t
->sighand
->siglock
);
2261 if (signal_pending(current
)) {
2263 * If there might be a fatal signal pending on multiple
2264 * threads, make sure we take it before changing the action.
2266 spin_unlock_irq(¤t
->sighand
->siglock
);
2267 return -ERESTARTNOINTR
;
2274 sigdelsetmask(&act
->sa
.sa_mask
,
2275 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2279 * "Setting a signal action to SIG_IGN for a signal that is
2280 * pending shall cause the pending signal to be discarded,
2281 * whether or not it is blocked."
2283 * "Setting a signal action to SIG_DFL for a signal that is
2284 * pending and whose default action is to ignore the signal
2285 * (for example, SIGCHLD), shall cause the pending signal to
2286 * be discarded, whether or not it is blocked"
2288 if (act
->sa
.sa_handler
== SIG_IGN
||
2289 (act
->sa
.sa_handler
== SIG_DFL
&& sig_kernel_ignore(sig
))) {
2290 struct task_struct
*t
= current
;
2292 sigaddset(&mask
, sig
);
2293 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2295 rm_from_queue_full(&mask
, &t
->pending
);
2296 recalc_sigpending_and_wake(t
);
2298 } while (t
!= current
);
2302 spin_unlock_irq(¤t
->sighand
->siglock
);
2307 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2313 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2314 oss
.ss_size
= current
->sas_ss_size
;
2315 oss
.ss_flags
= sas_ss_flags(sp
);
2324 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2325 || __get_user(ss_sp
, &uss
->ss_sp
)
2326 || __get_user(ss_flags
, &uss
->ss_flags
)
2327 || __get_user(ss_size
, &uss
->ss_size
))
2331 if (on_sig_stack(sp
))
2337 * Note - this code used to test ss_flags incorrectly
2338 * old code may have been written using ss_flags==0
2339 * to mean ss_flags==SS_ONSTACK (as this was the only
2340 * way that worked) - this fix preserves that older
2343 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2346 if (ss_flags
== SS_DISABLE
) {
2351 if (ss_size
< MINSIGSTKSZ
)
2355 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2356 current
->sas_ss_size
= ss_size
;
2361 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2370 #ifdef __ARCH_WANT_SYS_SIGPENDING
2373 sys_sigpending(old_sigset_t __user
*set
)
2375 return do_sigpending(set
, sizeof(*set
));
2380 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2381 /* Some platforms have their own version with special arguments others
2382 support only sys_rt_sigprocmask. */
2385 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2388 old_sigset_t old_set
, new_set
;
2392 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2394 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2396 spin_lock_irq(¤t
->sighand
->siglock
);
2397 old_set
= current
->blocked
.sig
[0];
2405 sigaddsetmask(¤t
->blocked
, new_set
);
2408 sigdelsetmask(¤t
->blocked
, new_set
);
2411 current
->blocked
.sig
[0] = new_set
;
2415 recalc_sigpending();
2416 spin_unlock_irq(¤t
->sighand
->siglock
);
2422 old_set
= current
->blocked
.sig
[0];
2425 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2432 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2434 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2436 sys_rt_sigaction(int sig
,
2437 const struct sigaction __user
*act
,
2438 struct sigaction __user
*oact
,
2441 struct k_sigaction new_sa
, old_sa
;
2444 /* XXX: Don't preclude handling different sized sigset_t's. */
2445 if (sigsetsize
!= sizeof(sigset_t
))
2449 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2453 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2456 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2462 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2464 #ifdef __ARCH_WANT_SYS_SGETMASK
2467 * For backwards compatibility. Functionality superseded by sigprocmask.
2473 return current
->blocked
.sig
[0];
2477 sys_ssetmask(int newmask
)
2481 spin_lock_irq(¤t
->sighand
->siglock
);
2482 old
= current
->blocked
.sig
[0];
2484 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2486 recalc_sigpending();
2487 spin_unlock_irq(¤t
->sighand
->siglock
);
2491 #endif /* __ARCH_WANT_SGETMASK */
2493 #ifdef __ARCH_WANT_SYS_SIGNAL
2495 * For backwards compatibility. Functionality superseded by sigaction.
2497 asmlinkage
unsigned long
2498 sys_signal(int sig
, __sighandler_t handler
)
2500 struct k_sigaction new_sa
, old_sa
;
2503 new_sa
.sa
.sa_handler
= handler
;
2504 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2505 sigemptyset(&new_sa
.sa
.sa_mask
);
2507 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2509 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2511 #endif /* __ARCH_WANT_SYS_SIGNAL */
2513 #ifdef __ARCH_WANT_SYS_PAUSE
2518 current
->state
= TASK_INTERRUPTIBLE
;
2520 return -ERESTARTNOHAND
;
2525 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2526 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2530 /* XXX: Don't preclude handling different sized sigset_t's. */
2531 if (sigsetsize
!= sizeof(sigset_t
))
2534 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2536 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2538 spin_lock_irq(¤t
->sighand
->siglock
);
2539 current
->saved_sigmask
= current
->blocked
;
2540 current
->blocked
= newset
;
2541 recalc_sigpending();
2542 spin_unlock_irq(¤t
->sighand
->siglock
);
2544 current
->state
= TASK_INTERRUPTIBLE
;
2546 set_thread_flag(TIF_RESTORE_SIGMASK
);
2547 return -ERESTARTNOHAND
;
2549 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2551 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2556 void __init
signals_init(void)
2558 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);