2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <asm/param.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/siginfo.h>
33 * SLAB caches for signal bits.
36 static kmem_cache_t
*sigqueue_cachep
;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 static int sig_ignored(struct task_struct
*t
, int sig
)
158 void __user
* handler
;
161 * Tracers always want to know about signals..
163 if (t
->ptrace
& PT_PTRACED
)
167 * Blocked signals are never ignored, since the
168 * signal handler may change by the time it is
171 if (sigismember(&t
->blocked
, sig
))
174 /* Is it explicitly or implicitly ignored? */
175 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
176 return handler
== SIG_IGN
||
177 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
181 * Re-calculate pending state from the set of locally pending
182 * signals, globally pending signals, and blocked signals.
184 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
189 switch (_NSIG_WORDS
) {
191 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
192 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
195 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
196 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
197 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
198 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
201 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
202 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
205 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
210 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 fastcall
void recalc_sigpending_tsk(struct task_struct
*t
)
214 if (t
->signal
->group_stop_count
> 0 ||
215 PENDING(&t
->pending
, &t
->blocked
) ||
216 PENDING(&t
->signal
->shared_pending
, &t
->blocked
))
217 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
219 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
222 void recalc_sigpending(void)
224 recalc_sigpending_tsk(current
);
227 /* Given the mask, find the first available signal that should be serviced. */
230 next_signal(struct sigpending
*pending
, sigset_t
*mask
)
232 unsigned long i
, *s
, *m
, x
;
235 s
= pending
->signal
.sig
;
237 switch (_NSIG_WORDS
) {
239 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
240 if ((x
= *s
&~ *m
) != 0) {
241 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
246 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
248 else if ((x
= s
[1] &~ m
[1]) != 0)
255 case 1: if ((x
= *s
&~ *m
) != 0)
263 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, unsigned int __nocast flags
,
266 struct sigqueue
*q
= NULL
;
268 atomic_inc(&t
->user
->sigpending
);
269 if (override_rlimit
||
270 atomic_read(&t
->user
->sigpending
) <=
271 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
272 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
273 if (unlikely(q
== NULL
)) {
274 atomic_dec(&t
->user
->sigpending
);
276 INIT_LIST_HEAD(&q
->list
);
279 q
->user
= get_uid(t
->user
);
284 static inline void __sigqueue_free(struct sigqueue
*q
)
286 if (q
->flags
& SIGQUEUE_PREALLOC
)
288 atomic_dec(&q
->user
->sigpending
);
290 kmem_cache_free(sigqueue_cachep
, q
);
293 static void flush_sigqueue(struct sigpending
*queue
)
297 sigemptyset(&queue
->signal
);
298 while (!list_empty(&queue
->list
)) {
299 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
300 list_del_init(&q
->list
);
306 * Flush all pending signals for a task.
310 flush_signals(struct task_struct
*t
)
314 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
315 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
316 flush_sigqueue(&t
->pending
);
317 flush_sigqueue(&t
->signal
->shared_pending
);
318 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
322 * This function expects the tasklist_lock write-locked.
324 void __exit_sighand(struct task_struct
*tsk
)
326 struct sighand_struct
* sighand
= tsk
->sighand
;
328 /* Ok, we're done with the signal handlers */
330 if (atomic_dec_and_test(&sighand
->count
))
331 kmem_cache_free(sighand_cachep
, sighand
);
334 void exit_sighand(struct task_struct
*tsk
)
336 write_lock_irq(&tasklist_lock
);
338 write_unlock_irq(&tasklist_lock
);
342 * This function expects the tasklist_lock write-locked.
344 void __exit_signal(struct task_struct
*tsk
)
346 struct signal_struct
* sig
= tsk
->signal
;
347 struct sighand_struct
* sighand
= tsk
->sighand
;
351 if (!atomic_read(&sig
->count
))
353 spin_lock(&sighand
->siglock
);
354 posix_cpu_timers_exit(tsk
);
355 if (atomic_dec_and_test(&sig
->count
)) {
356 posix_cpu_timers_exit_group(tsk
);
357 if (tsk
== sig
->curr_target
)
358 sig
->curr_target
= next_thread(tsk
);
360 spin_unlock(&sighand
->siglock
);
361 flush_sigqueue(&sig
->shared_pending
);
364 * If there is any task waiting for the group exit
367 if (sig
->group_exit_task
&& atomic_read(&sig
->count
) == sig
->notify_count
) {
368 wake_up_process(sig
->group_exit_task
);
369 sig
->group_exit_task
= NULL
;
371 if (tsk
== sig
->curr_target
)
372 sig
->curr_target
= next_thread(tsk
);
375 * Accumulate here the counters for all threads but the
376 * group leader as they die, so they can be added into
377 * the process-wide totals when those are taken.
378 * The group leader stays around as a zombie as long
379 * as there are other threads. When it gets reaped,
380 * the exit.c code will add its counts into these totals.
381 * We won't ever get here for the group leader, since it
382 * will have been the last reference on the signal_struct.
384 sig
->utime
= cputime_add(sig
->utime
, tsk
->utime
);
385 sig
->stime
= cputime_add(sig
->stime
, tsk
->stime
);
386 sig
->min_flt
+= tsk
->min_flt
;
387 sig
->maj_flt
+= tsk
->maj_flt
;
388 sig
->nvcsw
+= tsk
->nvcsw
;
389 sig
->nivcsw
+= tsk
->nivcsw
;
390 sig
->sched_time
+= tsk
->sched_time
;
391 spin_unlock(&sighand
->siglock
);
392 sig
= NULL
; /* Marker for below. */
394 clear_tsk_thread_flag(tsk
,TIF_SIGPENDING
);
395 flush_sigqueue(&tsk
->pending
);
398 * We are cleaning up the signal_struct here. We delayed
399 * calling exit_itimers until after flush_sigqueue, just in
400 * case our thread-local pending queue contained a queued
401 * timer signal that would have been cleared in
402 * exit_itimers. When that called sigqueue_free, it would
403 * attempt to re-take the tasklist_lock and deadlock. This
404 * can never happen if we ensure that all queues the
405 * timer's signal might be queued on have been flushed
406 * first. The shared_pending queue, and our own pending
407 * queue are the only queues the timer could be on, since
408 * there are no other threads left in the group and timer
409 * signals are constrained to threads inside the group.
412 exit_thread_group_keys(sig
);
413 kmem_cache_free(signal_cachep
, sig
);
417 void exit_signal(struct task_struct
*tsk
)
419 write_lock_irq(&tasklist_lock
);
421 write_unlock_irq(&tasklist_lock
);
425 * Flush all handlers for a task.
429 flush_signal_handlers(struct task_struct
*t
, int force_default
)
432 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
433 for (i
= _NSIG
; i
!= 0 ; i
--) {
434 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
435 ka
->sa
.sa_handler
= SIG_DFL
;
437 sigemptyset(&ka
->sa
.sa_mask
);
443 /* Notify the system that a driver wants to block all signals for this
444 * process, and wants to be notified if any signals at all were to be
445 * sent/acted upon. If the notifier routine returns non-zero, then the
446 * signal will be acted upon after all. If the notifier routine returns 0,
447 * then then signal will be blocked. Only one block per process is
448 * allowed. priv is a pointer to private data that the notifier routine
449 * can use to determine if the signal should be blocked or not. */
452 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
456 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
457 current
->notifier_mask
= mask
;
458 current
->notifier_data
= priv
;
459 current
->notifier
= notifier
;
460 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
463 /* Notify the system that blocking has ended. */
466 unblock_all_signals(void)
470 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
471 current
->notifier
= NULL
;
472 current
->notifier_data
= NULL
;
474 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
477 static inline int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
479 struct sigqueue
*q
, *first
= NULL
;
480 int still_pending
= 0;
482 if (unlikely(!sigismember(&list
->signal
, sig
)))
486 * Collect the siginfo appropriate to this signal. Check if
487 * there is another siginfo for the same signal.
489 list_for_each_entry(q
, &list
->list
, list
) {
490 if (q
->info
.si_signo
== sig
) {
499 list_del_init(&first
->list
);
500 copy_siginfo(info
, &first
->info
);
501 __sigqueue_free(first
);
503 sigdelset(&list
->signal
, sig
);
506 /* Ok, it wasn't in the queue. This must be
507 a fast-pathed signal or we must have been
508 out of queue space. So zero out the info.
510 sigdelset(&list
->signal
, sig
);
511 info
->si_signo
= sig
;
520 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
525 sig
= next_signal(pending
, mask
);
527 if (current
->notifier
) {
528 if (sigismember(current
->notifier_mask
, sig
)) {
529 if (!(current
->notifier
)(current
->notifier_data
)) {
530 clear_thread_flag(TIF_SIGPENDING
);
536 if (!collect_signal(sig
, pending
, info
))
546 * Dequeue a signal and return the element to the caller, which is
547 * expected to free it.
549 * All callers have to hold the siglock.
551 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
553 int signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
555 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
557 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
559 * Set a marker that we have dequeued a stop signal. Our
560 * caller might release the siglock and then the pending
561 * stop signal it is about to process is no longer in the
562 * pending bitmasks, but must still be cleared by a SIGCONT
563 * (and overruled by a SIGKILL). So those cases clear this
564 * shared flag after we've set it. Note that this flag may
565 * remain set after the signal we return is ignored or
566 * handled. That doesn't matter because its only purpose
567 * is to alert stop-signal processing code when another
568 * processor has come along and cleared the flag.
570 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
573 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
574 info
->si_sys_private
){
576 * Release the siglock to ensure proper locking order
577 * of timer locks outside of siglocks. Note, we leave
578 * irqs disabled here, since the posix-timers code is
579 * about to disable them again anyway.
581 spin_unlock(&tsk
->sighand
->siglock
);
582 do_schedule_next_timer(info
);
583 spin_lock(&tsk
->sighand
->siglock
);
589 * Tell a process that it has a new active signal..
591 * NOTE! we rely on the previous spin_lock to
592 * lock interrupts for us! We can only be called with
593 * "siglock" held, and the local interrupt must
594 * have been disabled when that got acquired!
596 * No need to set need_resched since signal event passing
597 * goes through ->blocked
599 void signal_wake_up(struct task_struct
*t
, int resume
)
603 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
606 * For SIGKILL, we want to wake it up in the stopped/traced case.
607 * We don't check t->state here because there is a race with it
608 * executing another processor and just now entering stopped state.
609 * By using wake_up_state, we ensure the process will wake up and
610 * handle its death signal.
612 mask
= TASK_INTERRUPTIBLE
;
614 mask
|= TASK_STOPPED
| TASK_TRACED
;
615 if (!wake_up_state(t
, mask
))
620 * Remove signals in mask from the pending set and queue.
621 * Returns 1 if any signals were found.
623 * All callers must be holding the siglock.
625 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
627 struct sigqueue
*q
, *n
;
629 if (!sigtestsetmask(&s
->signal
, mask
))
632 sigdelsetmask(&s
->signal
, mask
);
633 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
634 if (q
->info
.si_signo
< SIGRTMIN
&&
635 (mask
& sigmask(q
->info
.si_signo
))) {
636 list_del_init(&q
->list
);
644 * Bad permissions for sending the signal
646 static int check_kill_permission(int sig
, struct siginfo
*info
,
647 struct task_struct
*t
)
650 if (!valid_signal(sig
))
653 if ((!info
|| ((unsigned long)info
!= 1 &&
654 (unsigned long)info
!= 2 && SI_FROMUSER(info
)))
655 && ((sig
!= SIGCONT
) ||
656 (current
->signal
->session
!= t
->signal
->session
))
657 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
658 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
659 && !capable(CAP_KILL
))
661 return security_task_kill(t
, info
, sig
);
665 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
666 struct task_struct
*parent
,
670 * Handle magic process-wide effects of stop/continue signals.
671 * Unlike the signal actions, these happen immediately at signal-generation
672 * time regardless of blocking, ignoring, or handling. This does the
673 * actual continuing for SIGCONT, but not the actual stopping for stop
674 * signals. The process stop is done as a signal action for SIG_DFL.
676 static void handle_stop_signal(int sig
, struct task_struct
*p
)
678 struct task_struct
*t
;
680 if (p
->flags
& SIGNAL_GROUP_EXIT
)
682 * The process is in the middle of dying already.
686 if (sig_kernel_stop(sig
)) {
688 * This is a stop signal. Remove SIGCONT from all queues.
690 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
693 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
696 } else if (sig
== SIGCONT
) {
698 * Remove all stop signals from all queues,
699 * and wake all threads.
701 if (unlikely(p
->signal
->group_stop_count
> 0)) {
703 * There was a group stop in progress. We'll
704 * pretend it finished before we got here. We are
705 * obliged to report it to the parent: if the
706 * SIGSTOP happened "after" this SIGCONT, then it
707 * would have cleared this pending SIGCONT. If it
708 * happened "before" this SIGCONT, then the parent
709 * got the SIGCHLD about the stop finishing before
710 * the continue happened. We do the notification
711 * now, and it's as if the stop had finished and
712 * the SIGCHLD was pending on entry to this kill.
714 p
->signal
->group_stop_count
= 0;
715 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
716 spin_unlock(&p
->sighand
->siglock
);
717 if (p
->ptrace
& PT_PTRACED
)
718 do_notify_parent_cldstop(p
, p
->parent
,
721 do_notify_parent_cldstop(
723 p
->group_leader
->real_parent
,
725 spin_lock(&p
->sighand
->siglock
);
727 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
731 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
734 * If there is a handler for SIGCONT, we must make
735 * sure that no thread returns to user mode before
736 * we post the signal, in case it was the only
737 * thread eligible to run the signal handler--then
738 * it must not do anything between resuming and
739 * running the handler. With the TIF_SIGPENDING
740 * flag set, the thread will pause and acquire the
741 * siglock that we hold now and until we've queued
742 * the pending signal.
744 * Wake up the stopped thread _after_ setting
747 state
= TASK_STOPPED
;
748 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
749 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
750 state
|= TASK_INTERRUPTIBLE
;
752 wake_up_state(t
, state
);
757 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
759 * We were in fact stopped, and are now continued.
760 * Notify the parent with CLD_CONTINUED.
762 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
763 p
->signal
->group_exit_code
= 0;
764 spin_unlock(&p
->sighand
->siglock
);
765 if (p
->ptrace
& PT_PTRACED
)
766 do_notify_parent_cldstop(p
, p
->parent
,
769 do_notify_parent_cldstop(
771 p
->group_leader
->real_parent
,
773 spin_lock(&p
->sighand
->siglock
);
776 * We are not stopped, but there could be a stop
777 * signal in the middle of being processed after
778 * being removed from the queue. Clear that too.
780 p
->signal
->flags
= 0;
782 } else if (sig
== SIGKILL
) {
784 * Make sure that any pending stop signal already dequeued
785 * is undone by the wakeup for SIGKILL.
787 p
->signal
->flags
= 0;
791 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
792 struct sigpending
*signals
)
794 struct sigqueue
* q
= NULL
;
798 * fast-pathed signals for kernel-internal things like SIGSTOP
801 if ((unsigned long)info
== 2)
804 /* Real-time signals must be queued if sent by sigqueue, or
805 some other real-time mechanism. It is implementation
806 defined whether kill() does so. We attempt to do so, on
807 the principle of least surprise, but since kill is not
808 allowed to fail with EAGAIN when low on memory we just
809 make sure at least one signal gets delivered and don't
810 pass on the info struct. */
812 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
813 ((unsigned long) info
< 2 ||
814 info
->si_code
>= 0)));
816 list_add_tail(&q
->list
, &signals
->list
);
817 switch ((unsigned long) info
) {
819 q
->info
.si_signo
= sig
;
820 q
->info
.si_errno
= 0;
821 q
->info
.si_code
= SI_USER
;
822 q
->info
.si_pid
= current
->pid
;
823 q
->info
.si_uid
= current
->uid
;
826 q
->info
.si_signo
= sig
;
827 q
->info
.si_errno
= 0;
828 q
->info
.si_code
= SI_KERNEL
;
833 copy_siginfo(&q
->info
, info
);
837 if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
838 && info
->si_code
!= SI_USER
)
840 * Queue overflow, abort. We may abort if the signal was rt
841 * and sent by user using something other than kill().
844 if (((unsigned long)info
> 1) && (info
->si_code
== SI_TIMER
))
846 * Set up a return to indicate that we dropped
849 ret
= info
->si_sys_private
;
853 sigaddset(&signals
->signal
, sig
);
857 #define LEGACY_QUEUE(sigptr, sig) \
858 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
862 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
866 if (!irqs_disabled())
868 assert_spin_locked(&t
->sighand
->siglock
);
870 if (((unsigned long)info
> 2) && (info
->si_code
== SI_TIMER
))
872 * Set up a return to indicate that we dropped the signal.
874 ret
= info
->si_sys_private
;
876 /* Short-circuit ignored signals. */
877 if (sig_ignored(t
, sig
))
880 /* Support queueing exactly one non-rt signal, so that we
881 can get more detailed information about the cause of
883 if (LEGACY_QUEUE(&t
->pending
, sig
))
886 ret
= send_signal(sig
, info
, t
, &t
->pending
);
887 if (!ret
&& !sigismember(&t
->blocked
, sig
))
888 signal_wake_up(t
, sig
== SIGKILL
);
894 * Force a signal that the process can't ignore: if necessary
895 * we unblock the signal and change any SIG_IGN to SIG_DFL.
899 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
901 unsigned long int flags
;
904 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
905 if (sigismember(&t
->blocked
, sig
) || t
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
) {
906 t
->sighand
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
907 sigdelset(&t
->blocked
, sig
);
908 recalc_sigpending_tsk(t
);
910 ret
= specific_send_sig_info(sig
, info
, t
);
911 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
917 force_sig_specific(int sig
, struct task_struct
*t
)
919 unsigned long int flags
;
921 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
922 if (t
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
923 t
->sighand
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
924 sigdelset(&t
->blocked
, sig
);
925 recalc_sigpending_tsk(t
);
926 specific_send_sig_info(sig
, (void *)2, t
);
927 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
931 * Test if P wants to take SIG. After we've checked all threads with this,
932 * it's equivalent to finding no threads not blocking SIG. Any threads not
933 * blocking SIG were ruled out because they are not running and already
934 * have pending signals. Such threads will dequeue from the shared queue
935 * as soon as they're available, so putting the signal on the shared queue
936 * will be equivalent to sending it to one such thread.
938 #define wants_signal(sig, p, mask) \
939 (!sigismember(&(p)->blocked, sig) \
940 && !((p)->state & mask) \
941 && !((p)->flags & PF_EXITING) \
942 && (task_curr(p) || !signal_pending(p)))
946 __group_complete_signal(int sig
, struct task_struct
*p
)
949 struct task_struct
*t
;
952 * Don't bother traced and stopped tasks (but
953 * SIGKILL will punch through that).
955 mask
= TASK_STOPPED
| TASK_TRACED
;
960 * Now find a thread we can wake up to take the signal off the queue.
962 * If the main thread wants the signal, it gets first crack.
963 * Probably the least surprising to the average bear.
965 if (wants_signal(sig
, p
, mask
))
967 else if (thread_group_empty(p
))
969 * There is just one thread and it does not need to be woken.
970 * It will dequeue unblocked signals before it runs again.
975 * Otherwise try to find a suitable thread.
977 t
= p
->signal
->curr_target
;
979 /* restart balancing at this thread */
980 t
= p
->signal
->curr_target
= p
;
981 BUG_ON(t
->tgid
!= p
->tgid
);
983 while (!wants_signal(sig
, t
, mask
)) {
985 if (t
== p
->signal
->curr_target
)
987 * No thread needs to be woken.
988 * Any eligible threads will see
989 * the signal in the queue soon.
993 p
->signal
->curr_target
= t
;
997 * Found a killable thread. If the signal will be fatal,
998 * then start taking the whole group down immediately.
1000 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
1001 !sigismember(&t
->real_blocked
, sig
) &&
1002 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
1004 * This signal will be fatal to the whole group.
1006 if (!sig_kernel_coredump(sig
)) {
1008 * Start a group exit and wake everybody up.
1009 * This way we don't have other threads
1010 * running and doing things after a slower
1011 * thread has the fatal signal pending.
1013 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1014 p
->signal
->group_exit_code
= sig
;
1015 p
->signal
->group_stop_count
= 0;
1018 sigaddset(&t
->pending
.signal
, SIGKILL
);
1019 signal_wake_up(t
, 1);
1026 * There will be a core dump. We make all threads other
1027 * than the chosen one go into a group stop so that nothing
1028 * happens until it gets scheduled, takes the signal off
1029 * the shared queue, and does the core dump. This is a
1030 * little more complicated than strictly necessary, but it
1031 * keeps the signal state that winds up in the core dump
1032 * unchanged from the death state, e.g. which thread had
1033 * the core-dump signal unblocked.
1035 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
1036 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
1037 p
->signal
->group_stop_count
= 0;
1038 p
->signal
->group_exit_task
= t
;
1041 p
->signal
->group_stop_count
++;
1042 signal_wake_up(t
, 0);
1045 wake_up_process(p
->signal
->group_exit_task
);
1050 * The signal is already in the shared-pending queue.
1051 * Tell the chosen thread to wake up and dequeue it.
1053 signal_wake_up(t
, sig
== SIGKILL
);
1058 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1062 assert_spin_locked(&p
->sighand
->siglock
);
1063 handle_stop_signal(sig
, p
);
1065 if (((unsigned long)info
> 2) && (info
->si_code
== SI_TIMER
))
1067 * Set up a return to indicate that we dropped the signal.
1069 ret
= info
->si_sys_private
;
1071 /* Short-circuit ignored signals. */
1072 if (sig_ignored(p
, sig
))
1075 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
1076 /* This is a non-RT signal and we already have one queued. */
1080 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1081 * We always use the shared queue for process-wide signals,
1082 * to avoid several races.
1084 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
1088 __group_complete_signal(sig
, p
);
1093 * Nuke all other threads in the group.
1095 void zap_other_threads(struct task_struct
*p
)
1097 struct task_struct
*t
;
1099 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1100 p
->signal
->group_stop_count
= 0;
1102 if (thread_group_empty(p
))
1105 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1107 * Don't bother with already dead threads
1113 * We don't want to notify the parent, since we are
1114 * killed as part of a thread group due to another
1115 * thread doing an execve() or similar. So set the
1116 * exit signal to -1 to allow immediate reaping of
1117 * the process. But don't detach the thread group
1120 if (t
!= p
->group_leader
)
1121 t
->exit_signal
= -1;
1123 sigaddset(&t
->pending
.signal
, SIGKILL
);
1124 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
1125 signal_wake_up(t
, 1);
1130 * Must be called with the tasklist_lock held for reading!
1132 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1134 unsigned long flags
;
1137 ret
= check_kill_permission(sig
, info
, p
);
1138 if (!ret
&& sig
&& p
->sighand
) {
1139 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1140 ret
= __group_send_sig_info(sig
, info
, p
);
1141 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1148 * kill_pg_info() sends a signal to a process group: this is what the tty
1149 * control characters do (^C, ^Z etc)
1152 int __kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
1154 struct task_struct
*p
= NULL
;
1155 int retval
, success
;
1162 do_each_task_pid(pgrp
, PIDTYPE_PGID
, p
) {
1163 int err
= group_send_sig_info(sig
, info
, p
);
1166 } while_each_task_pid(pgrp
, PIDTYPE_PGID
, p
);
1167 return success
? 0 : retval
;
1171 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
1175 read_lock(&tasklist_lock
);
1176 retval
= __kill_pg_info(sig
, info
, pgrp
);
1177 read_unlock(&tasklist_lock
);
1183 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1186 struct task_struct
*p
;
1188 read_lock(&tasklist_lock
);
1189 p
= find_task_by_pid(pid
);
1192 error
= group_send_sig_info(sig
, info
, p
);
1193 read_unlock(&tasklist_lock
);
1199 * kill_something_info() interprets pid in interesting ways just like kill(2).
1201 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1202 * is probably wrong. Should make it like BSD or SYSV.
1205 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1208 return kill_pg_info(sig
, info
, process_group(current
));
1209 } else if (pid
== -1) {
1210 int retval
= 0, count
= 0;
1211 struct task_struct
* p
;
1213 read_lock(&tasklist_lock
);
1214 for_each_process(p
) {
1215 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1216 int err
= group_send_sig_info(sig
, info
, p
);
1222 read_unlock(&tasklist_lock
);
1223 return count
? retval
: -ESRCH
;
1224 } else if (pid
< 0) {
1225 return kill_pg_info(sig
, info
, -pid
);
1227 return kill_proc_info(sig
, info
, pid
);
1232 * These are for backward compatibility with the rest of the kernel source.
1236 * These two are the most common entry points. They send a signal
1237 * just to the specific thread.
1240 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1243 unsigned long flags
;
1246 * Make sure legacy kernel users don't send in bad values
1247 * (normal paths check this in check_kill_permission).
1249 if (!valid_signal(sig
))
1253 * We need the tasklist lock even for the specific
1254 * thread case (when we don't need to follow the group
1255 * lists) in order to avoid races with "p->sighand"
1256 * going away or changing from under us.
1258 read_lock(&tasklist_lock
);
1259 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1260 ret
= specific_send_sig_info(sig
, info
, p
);
1261 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1262 read_unlock(&tasklist_lock
);
1267 send_sig(int sig
, struct task_struct
*p
, int priv
)
1269 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
1273 * This is the entry point for "process-wide" signals.
1274 * They will go to an appropriate thread in the thread group.
1277 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1280 read_lock(&tasklist_lock
);
1281 ret
= group_send_sig_info(sig
, info
, p
);
1282 read_unlock(&tasklist_lock
);
1287 force_sig(int sig
, struct task_struct
*p
)
1289 force_sig_info(sig
, (void*)1L, p
);
1293 * When things go south during signal handling, we
1294 * will force a SIGSEGV. And if the signal that caused
1295 * the problem was already a SIGSEGV, we'll want to
1296 * make sure we don't even try to deliver the signal..
1299 force_sigsegv(int sig
, struct task_struct
*p
)
1301 if (sig
== SIGSEGV
) {
1302 unsigned long flags
;
1303 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1304 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1305 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1307 force_sig(SIGSEGV
, p
);
1312 kill_pg(pid_t pgrp
, int sig
, int priv
)
1314 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
1318 kill_proc(pid_t pid
, int sig
, int priv
)
1320 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
1324 * These functions support sending signals using preallocated sigqueue
1325 * structures. This is needed "because realtime applications cannot
1326 * afford to lose notifications of asynchronous events, like timer
1327 * expirations or I/O completions". In the case of Posix Timers
1328 * we allocate the sigqueue structure from the timer_create. If this
1329 * allocation fails we are able to report the failure to the application
1330 * with an EAGAIN error.
1333 struct sigqueue
*sigqueue_alloc(void)
1337 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1338 q
->flags
|= SIGQUEUE_PREALLOC
;
1342 void sigqueue_free(struct sigqueue
*q
)
1344 unsigned long flags
;
1345 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1347 * If the signal is still pending remove it from the
1350 if (unlikely(!list_empty(&q
->list
))) {
1351 read_lock(&tasklist_lock
);
1352 spin_lock_irqsave(q
->lock
, flags
);
1353 if (!list_empty(&q
->list
))
1354 list_del_init(&q
->list
);
1355 spin_unlock_irqrestore(q
->lock
, flags
);
1356 read_unlock(&tasklist_lock
);
1358 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1363 send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1365 unsigned long flags
;
1369 * We need the tasklist lock even for the specific
1370 * thread case (when we don't need to follow the group
1371 * lists) in order to avoid races with "p->sighand"
1372 * going away or changing from under us.
1374 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1375 read_lock(&tasklist_lock
);
1376 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1378 if (unlikely(!list_empty(&q
->list
))) {
1380 * If an SI_TIMER entry is already queue just increment
1381 * the overrun count.
1383 if (q
->info
.si_code
!= SI_TIMER
)
1385 q
->info
.si_overrun
++;
1388 /* Short-circuit ignored signals. */
1389 if (sig_ignored(p
, sig
)) {
1394 q
->lock
= &p
->sighand
->siglock
;
1395 list_add_tail(&q
->list
, &p
->pending
.list
);
1396 sigaddset(&p
->pending
.signal
, sig
);
1397 if (!sigismember(&p
->blocked
, sig
))
1398 signal_wake_up(p
, sig
== SIGKILL
);
1401 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1402 read_unlock(&tasklist_lock
);
1407 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1409 unsigned long flags
;
1412 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1413 read_lock(&tasklist_lock
);
1414 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1415 handle_stop_signal(sig
, p
);
1417 /* Short-circuit ignored signals. */
1418 if (sig_ignored(p
, sig
)) {
1423 if (unlikely(!list_empty(&q
->list
))) {
1425 * If an SI_TIMER entry is already queue just increment
1426 * the overrun count. Other uses should not try to
1427 * send the signal multiple times.
1429 if (q
->info
.si_code
!= SI_TIMER
)
1431 q
->info
.si_overrun
++;
1436 * Put this signal on the shared-pending queue.
1437 * We always use the shared queue for process-wide signals,
1438 * to avoid several races.
1440 q
->lock
= &p
->sighand
->siglock
;
1441 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1442 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1444 __group_complete_signal(sig
, p
);
1446 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1447 read_unlock(&tasklist_lock
);
1452 * Wake up any threads in the parent blocked in wait* syscalls.
1454 static inline void __wake_up_parent(struct task_struct
*p
,
1455 struct task_struct
*parent
)
1457 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1461 * Let a parent know about the death of a child.
1462 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1465 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1467 struct siginfo info
;
1468 unsigned long flags
;
1469 struct sighand_struct
*psig
;
1473 /* do_notify_parent_cldstop should have been called instead. */
1474 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1476 BUG_ON(!tsk
->ptrace
&&
1477 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1479 info
.si_signo
= sig
;
1481 info
.si_pid
= tsk
->pid
;
1482 info
.si_uid
= tsk
->uid
;
1484 /* FIXME: find out whether or not this is supposed to be c*time. */
1485 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1486 tsk
->signal
->utime
));
1487 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1488 tsk
->signal
->stime
));
1490 info
.si_status
= tsk
->exit_code
& 0x7f;
1491 if (tsk
->exit_code
& 0x80)
1492 info
.si_code
= CLD_DUMPED
;
1493 else if (tsk
->exit_code
& 0x7f)
1494 info
.si_code
= CLD_KILLED
;
1496 info
.si_code
= CLD_EXITED
;
1497 info
.si_status
= tsk
->exit_code
>> 8;
1500 psig
= tsk
->parent
->sighand
;
1501 spin_lock_irqsave(&psig
->siglock
, flags
);
1502 if (sig
== SIGCHLD
&&
1503 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1504 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1506 * We are exiting and our parent doesn't care. POSIX.1
1507 * defines special semantics for setting SIGCHLD to SIG_IGN
1508 * or setting the SA_NOCLDWAIT flag: we should be reaped
1509 * automatically and not left for our parent's wait4 call.
1510 * Rather than having the parent do it as a magic kind of
1511 * signal handler, we just set this to tell do_exit that we
1512 * can be cleaned up without becoming a zombie. Note that
1513 * we still call __wake_up_parent in this case, because a
1514 * blocked sys_wait4 might now return -ECHILD.
1516 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1517 * is implementation-defined: we do (if you don't want
1518 * it, just use SIG_IGN instead).
1520 tsk
->exit_signal
= -1;
1521 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1524 if (valid_signal(sig
) && sig
> 0)
1525 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1526 __wake_up_parent(tsk
, tsk
->parent
);
1527 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1531 do_notify_parent_cldstop(struct task_struct
*tsk
, struct task_struct
*parent
,
1534 struct siginfo info
;
1535 unsigned long flags
;
1536 struct sighand_struct
*sighand
;
1538 info
.si_signo
= SIGCHLD
;
1540 info
.si_pid
= tsk
->pid
;
1541 info
.si_uid
= tsk
->uid
;
1543 /* FIXME: find out whether or not this is supposed to be c*time. */
1544 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1545 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1550 info
.si_status
= SIGCONT
;
1553 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1556 info
.si_status
= tsk
->exit_code
& 0x7f;
1562 sighand
= parent
->sighand
;
1563 spin_lock_irqsave(&sighand
->siglock
, flags
);
1564 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1565 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1566 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1568 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1570 __wake_up_parent(tsk
, parent
);
1571 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1575 * This must be called with current->sighand->siglock held.
1577 * This should be the path for all ptrace stops.
1578 * We always set current->last_siginfo while stopped here.
1579 * That makes it a way to test a stopped process for
1580 * being ptrace-stopped vs being job-control-stopped.
1582 * If we actually decide not to stop at all because the tracer is gone,
1583 * we leave nostop_code in current->exit_code.
1585 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1588 * If there is a group stop in progress,
1589 * we must participate in the bookkeeping.
1591 if (current
->signal
->group_stop_count
> 0)
1592 --current
->signal
->group_stop_count
;
1594 current
->last_siginfo
= info
;
1595 current
->exit_code
= exit_code
;
1597 /* Let the debugger run. */
1598 set_current_state(TASK_TRACED
);
1599 spin_unlock_irq(¤t
->sighand
->siglock
);
1600 read_lock(&tasklist_lock
);
1601 if (likely(current
->ptrace
& PT_PTRACED
) &&
1602 likely(current
->parent
!= current
->real_parent
||
1603 !(current
->ptrace
& PT_ATTACHED
)) &&
1604 (likely(current
->parent
->signal
!= current
->signal
) ||
1605 !unlikely(current
->signal
->flags
& SIGNAL_GROUP_EXIT
))) {
1606 do_notify_parent_cldstop(current
, current
->parent
,
1608 read_unlock(&tasklist_lock
);
1612 * By the time we got the lock, our tracer went away.
1615 read_unlock(&tasklist_lock
);
1616 set_current_state(TASK_RUNNING
);
1617 current
->exit_code
= nostop_code
;
1621 * We are back. Now reacquire the siglock before touching
1622 * last_siginfo, so that we are sure to have synchronized with
1623 * any signal-sending on another CPU that wants to examine it.
1625 spin_lock_irq(¤t
->sighand
->siglock
);
1626 current
->last_siginfo
= NULL
;
1629 * Queued signals ignored us while we were stopped for tracing.
1630 * So check for any that we should take before resuming user mode.
1632 recalc_sigpending();
1635 void ptrace_notify(int exit_code
)
1639 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1641 memset(&info
, 0, sizeof info
);
1642 info
.si_signo
= SIGTRAP
;
1643 info
.si_code
= exit_code
;
1644 info
.si_pid
= current
->pid
;
1645 info
.si_uid
= current
->uid
;
1647 /* Let the debugger run. */
1648 spin_lock_irq(¤t
->sighand
->siglock
);
1649 ptrace_stop(exit_code
, 0, &info
);
1650 spin_unlock_irq(¤t
->sighand
->siglock
);
1654 finish_stop(int stop_count
)
1657 * If there are no other threads in the group, or if there is
1658 * a group stop in progress and we are the last to stop,
1659 * report to the parent. When ptraced, every thread reports itself.
1661 if (stop_count
< 0 || (current
->ptrace
& PT_PTRACED
)) {
1662 read_lock(&tasklist_lock
);
1663 do_notify_parent_cldstop(current
, current
->parent
,
1665 read_unlock(&tasklist_lock
);
1667 else if (stop_count
== 0) {
1668 read_lock(&tasklist_lock
);
1669 do_notify_parent_cldstop(current
->group_leader
,
1670 current
->group_leader
->real_parent
,
1672 read_unlock(&tasklist_lock
);
1677 * Now we don't run again until continued.
1679 current
->exit_code
= 0;
1683 * This performs the stopping for SIGSTOP and other stop signals.
1684 * We have to stop all threads in the thread group.
1685 * Returns nonzero if we've actually stopped and released the siglock.
1686 * Returns zero if we didn't stop and still hold the siglock.
1689 do_signal_stop(int signr
)
1691 struct signal_struct
*sig
= current
->signal
;
1692 struct sighand_struct
*sighand
= current
->sighand
;
1693 int stop_count
= -1;
1695 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1698 if (sig
->group_stop_count
> 0) {
1700 * There is a group stop in progress. We don't need to
1701 * start another one.
1703 signr
= sig
->group_exit_code
;
1704 stop_count
= --sig
->group_stop_count
;
1705 current
->exit_code
= signr
;
1706 set_current_state(TASK_STOPPED
);
1707 if (stop_count
== 0)
1708 sig
->flags
= SIGNAL_STOP_STOPPED
;
1709 spin_unlock_irq(&sighand
->siglock
);
1711 else if (thread_group_empty(current
)) {
1713 * Lock must be held through transition to stopped state.
1715 current
->exit_code
= current
->signal
->group_exit_code
= signr
;
1716 set_current_state(TASK_STOPPED
);
1717 sig
->flags
= SIGNAL_STOP_STOPPED
;
1718 spin_unlock_irq(&sighand
->siglock
);
1722 * There is no group stop already in progress.
1723 * We must initiate one now, but that requires
1724 * dropping siglock to get both the tasklist lock
1725 * and siglock again in the proper order. Note that
1726 * this allows an intervening SIGCONT to be posted.
1727 * We need to check for that and bail out if necessary.
1729 struct task_struct
*t
;
1731 spin_unlock_irq(&sighand
->siglock
);
1733 /* signals can be posted during this window */
1735 read_lock(&tasklist_lock
);
1736 spin_lock_irq(&sighand
->siglock
);
1738 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
)) {
1740 * Another stop or continue happened while we
1741 * didn't have the lock. We can just swallow this
1742 * signal now. If we raced with a SIGCONT, that
1743 * should have just cleared it now. If we raced
1744 * with another processor delivering a stop signal,
1745 * then the SIGCONT that wakes us up should clear it.
1747 read_unlock(&tasklist_lock
);
1751 if (sig
->group_stop_count
== 0) {
1752 sig
->group_exit_code
= signr
;
1754 for (t
= next_thread(current
); t
!= current
;
1757 * Setting state to TASK_STOPPED for a group
1758 * stop is always done with the siglock held,
1759 * so this check has no races.
1761 if (t
->state
< TASK_STOPPED
) {
1763 signal_wake_up(t
, 0);
1765 sig
->group_stop_count
= stop_count
;
1768 /* A race with another thread while unlocked. */
1769 signr
= sig
->group_exit_code
;
1770 stop_count
= --sig
->group_stop_count
;
1773 current
->exit_code
= signr
;
1774 set_current_state(TASK_STOPPED
);
1775 if (stop_count
== 0)
1776 sig
->flags
= SIGNAL_STOP_STOPPED
;
1778 spin_unlock_irq(&sighand
->siglock
);
1779 read_unlock(&tasklist_lock
);
1782 finish_stop(stop_count
);
1787 * Do appropriate magic when group_stop_count > 0.
1788 * We return nonzero if we stopped, after releasing the siglock.
1789 * We return zero if we still hold the siglock and should look
1790 * for another signal without checking group_stop_count again.
1792 static inline int handle_group_stop(void)
1796 if (current
->signal
->group_exit_task
== current
) {
1798 * Group stop is so we can do a core dump,
1799 * We are the initiating thread, so get on with it.
1801 current
->signal
->group_exit_task
= NULL
;
1805 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1807 * Group stop is so another thread can do a core dump,
1808 * or else we are racing against a death signal.
1809 * Just punt the stop so we can get the next signal.
1814 * There is a group stop in progress. We stop
1815 * without any associated signal being in our queue.
1817 stop_count
= --current
->signal
->group_stop_count
;
1818 if (stop_count
== 0)
1819 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1820 current
->exit_code
= current
->signal
->group_exit_code
;
1821 set_current_state(TASK_STOPPED
);
1822 spin_unlock_irq(¤t
->sighand
->siglock
);
1823 finish_stop(stop_count
);
1827 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1828 struct pt_regs
*regs
, void *cookie
)
1830 sigset_t
*mask
= ¤t
->blocked
;
1834 spin_lock_irq(¤t
->sighand
->siglock
);
1836 struct k_sigaction
*ka
;
1838 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1839 handle_group_stop())
1842 signr
= dequeue_signal(current
, mask
, info
);
1845 break; /* will return 0 */
1847 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1848 ptrace_signal_deliver(regs
, cookie
);
1850 /* Let the debugger run. */
1851 ptrace_stop(signr
, signr
, info
);
1853 /* We're back. Did the debugger cancel the sig? */
1854 signr
= current
->exit_code
;
1858 current
->exit_code
= 0;
1860 /* Update the siginfo structure if the signal has
1861 changed. If the debugger wanted something
1862 specific in the siginfo structure then it should
1863 have updated *info via PTRACE_SETSIGINFO. */
1864 if (signr
!= info
->si_signo
) {
1865 info
->si_signo
= signr
;
1867 info
->si_code
= SI_USER
;
1868 info
->si_pid
= current
->parent
->pid
;
1869 info
->si_uid
= current
->parent
->uid
;
1872 /* If the (new) signal is now blocked, requeue it. */
1873 if (sigismember(¤t
->blocked
, signr
)) {
1874 specific_send_sig_info(signr
, info
, current
);
1879 ka
= ¤t
->sighand
->action
[signr
-1];
1880 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1882 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1883 /* Run the handler. */
1886 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1887 ka
->sa
.sa_handler
= SIG_DFL
;
1889 break; /* will return non-zero "signr" value */
1893 * Now we are doing the default action for this signal.
1895 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1898 /* Init gets no signals it doesn't want. */
1899 if (current
->pid
== 1)
1902 if (sig_kernel_stop(signr
)) {
1904 * The default action is to stop all threads in
1905 * the thread group. The job control signals
1906 * do nothing in an orphaned pgrp, but SIGSTOP
1907 * always works. Note that siglock needs to be
1908 * dropped during the call to is_orphaned_pgrp()
1909 * because of lock ordering with tasklist_lock.
1910 * This allows an intervening SIGCONT to be posted.
1911 * We need to check for that and bail out if necessary.
1913 if (signr
!= SIGSTOP
) {
1914 spin_unlock_irq(¤t
->sighand
->siglock
);
1916 /* signals can be posted during this window */
1918 if (is_orphaned_pgrp(process_group(current
)))
1921 spin_lock_irq(¤t
->sighand
->siglock
);
1924 if (likely(do_signal_stop(signr
))) {
1925 /* It released the siglock. */
1930 * We didn't actually stop, due to a race
1931 * with SIGCONT or something like that.
1936 spin_unlock_irq(¤t
->sighand
->siglock
);
1939 * Anything else is fatal, maybe with a core dump.
1941 current
->flags
|= PF_SIGNALED
;
1942 if (sig_kernel_coredump(signr
)) {
1944 * If it was able to dump core, this kills all
1945 * other threads in the group and synchronizes with
1946 * their demise. If we lost the race with another
1947 * thread getting here, it set group_exit_code
1948 * first and our do_group_exit call below will use
1949 * that value and ignore the one we pass it.
1951 do_coredump((long)signr
, signr
, regs
);
1955 * Death signals, no core dump.
1957 do_group_exit(signr
);
1960 spin_unlock_irq(¤t
->sighand
->siglock
);
1964 EXPORT_SYMBOL(recalc_sigpending
);
1965 EXPORT_SYMBOL_GPL(dequeue_signal
);
1966 EXPORT_SYMBOL(flush_signals
);
1967 EXPORT_SYMBOL(force_sig
);
1968 EXPORT_SYMBOL(kill_pg
);
1969 EXPORT_SYMBOL(kill_proc
);
1970 EXPORT_SYMBOL(ptrace_notify
);
1971 EXPORT_SYMBOL(send_sig
);
1972 EXPORT_SYMBOL(send_sig_info
);
1973 EXPORT_SYMBOL(sigprocmask
);
1974 EXPORT_SYMBOL(block_all_signals
);
1975 EXPORT_SYMBOL(unblock_all_signals
);
1979 * System call entry points.
1982 asmlinkage
long sys_restart_syscall(void)
1984 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1985 return restart
->fn(restart
);
1988 long do_no_restart_syscall(struct restart_block
*param
)
1994 * We don't need to get the kernel lock - this is all local to this
1995 * particular thread.. (and that's good, because this is _heavily_
1996 * used by various programs)
2000 * This is also useful for kernel threads that want to temporarily
2001 * (or permanently) block certain signals.
2003 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2004 * interface happily blocks "unblockable" signals like SIGKILL
2007 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2012 spin_lock_irq(¤t
->sighand
->siglock
);
2013 old_block
= current
->blocked
;
2017 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
2020 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
2023 current
->blocked
= *set
;
2028 recalc_sigpending();
2029 spin_unlock_irq(¤t
->sighand
->siglock
);
2031 *oldset
= old_block
;
2036 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
2038 int error
= -EINVAL
;
2039 sigset_t old_set
, new_set
;
2041 /* XXX: Don't preclude handling different sized sigset_t's. */
2042 if (sigsetsize
!= sizeof(sigset_t
))
2047 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2049 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2051 error
= sigprocmask(how
, &new_set
, &old_set
);
2057 spin_lock_irq(¤t
->sighand
->siglock
);
2058 old_set
= current
->blocked
;
2059 spin_unlock_irq(¤t
->sighand
->siglock
);
2063 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2071 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2073 long error
= -EINVAL
;
2076 if (sigsetsize
> sizeof(sigset_t
))
2079 spin_lock_irq(¤t
->sighand
->siglock
);
2080 sigorsets(&pending
, ¤t
->pending
.signal
,
2081 ¤t
->signal
->shared_pending
.signal
);
2082 spin_unlock_irq(¤t
->sighand
->siglock
);
2084 /* Outside the lock because only this thread touches it. */
2085 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2088 if (!copy_to_user(set
, &pending
, sigsetsize
))
2096 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2098 return do_sigpending(set
, sigsetsize
);
2101 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2103 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2107 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2109 if (from
->si_code
< 0)
2110 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2113 * If you change siginfo_t structure, please be sure
2114 * this code is fixed accordingly.
2115 * It should never copy any pad contained in the structure
2116 * to avoid security leaks, but must copy the generic
2117 * 3 ints plus the relevant union member.
2119 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2120 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2121 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2122 switch (from
->si_code
& __SI_MASK
) {
2124 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2125 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2128 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2129 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2130 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2133 err
|= __put_user(from
->si_band
, &to
->si_band
);
2134 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2137 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2138 #ifdef __ARCH_SI_TRAPNO
2139 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2143 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2144 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2145 err
|= __put_user(from
->si_status
, &to
->si_status
);
2146 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2147 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2149 case __SI_RT
: /* This is not generated by the kernel as of now. */
2150 case __SI_MESGQ
: /* But this is */
2151 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2152 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2153 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2155 default: /* this is just in case for now ... */
2156 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2157 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2166 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2167 siginfo_t __user
*uinfo
,
2168 const struct timespec __user
*uts
,
2177 /* XXX: Don't preclude handling different sized sigset_t's. */
2178 if (sigsetsize
!= sizeof(sigset_t
))
2181 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2185 * Invert the set of allowed signals to get those we
2188 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2192 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2194 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2199 spin_lock_irq(¤t
->sighand
->siglock
);
2200 sig
= dequeue_signal(current
, &these
, &info
);
2202 timeout
= MAX_SCHEDULE_TIMEOUT
;
2204 timeout
= (timespec_to_jiffies(&ts
)
2205 + (ts
.tv_sec
|| ts
.tv_nsec
));
2208 /* None ready -- temporarily unblock those we're
2209 * interested while we are sleeping in so that we'll
2210 * be awakened when they arrive. */
2211 current
->real_blocked
= current
->blocked
;
2212 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2213 recalc_sigpending();
2214 spin_unlock_irq(¤t
->sighand
->siglock
);
2216 current
->state
= TASK_INTERRUPTIBLE
;
2217 timeout
= schedule_timeout(timeout
);
2219 if (current
->flags
& PF_FREEZE
)
2220 refrigerator(PF_FREEZE
);
2221 spin_lock_irq(¤t
->sighand
->siglock
);
2222 sig
= dequeue_signal(current
, &these
, &info
);
2223 current
->blocked
= current
->real_blocked
;
2224 siginitset(¤t
->real_blocked
, 0);
2225 recalc_sigpending();
2228 spin_unlock_irq(¤t
->sighand
->siglock
);
2233 if (copy_siginfo_to_user(uinfo
, &info
))
2246 sys_kill(int pid
, int sig
)
2248 struct siginfo info
;
2250 info
.si_signo
= sig
;
2252 info
.si_code
= SI_USER
;
2253 info
.si_pid
= current
->tgid
;
2254 info
.si_uid
= current
->uid
;
2256 return kill_something_info(sig
, &info
, pid
);
2260 * sys_tgkill - send signal to one specific thread
2261 * @tgid: the thread group ID of the thread
2262 * @pid: the PID of the thread
2263 * @sig: signal to be sent
2265 * This syscall also checks the tgid and returns -ESRCH even if the PID
2266 * exists but it's not belonging to the target process anymore. This
2267 * method solves the problem of threads exiting and PIDs getting reused.
2269 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2271 struct siginfo info
;
2273 struct task_struct
*p
;
2275 /* This is only valid for single tasks */
2276 if (pid
<= 0 || tgid
<= 0)
2279 info
.si_signo
= sig
;
2281 info
.si_code
= SI_TKILL
;
2282 info
.si_pid
= current
->tgid
;
2283 info
.si_uid
= current
->uid
;
2285 read_lock(&tasklist_lock
);
2286 p
= find_task_by_pid(pid
);
2288 if (p
&& (p
->tgid
== tgid
)) {
2289 error
= check_kill_permission(sig
, &info
, p
);
2291 * The null signal is a permissions and process existence
2292 * probe. No signal is actually delivered.
2294 if (!error
&& sig
&& p
->sighand
) {
2295 spin_lock_irq(&p
->sighand
->siglock
);
2296 handle_stop_signal(sig
, p
);
2297 error
= specific_send_sig_info(sig
, &info
, p
);
2298 spin_unlock_irq(&p
->sighand
->siglock
);
2301 read_unlock(&tasklist_lock
);
2306 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2309 sys_tkill(int pid
, int sig
)
2311 struct siginfo info
;
2313 struct task_struct
*p
;
2315 /* This is only valid for single tasks */
2319 info
.si_signo
= sig
;
2321 info
.si_code
= SI_TKILL
;
2322 info
.si_pid
= current
->tgid
;
2323 info
.si_uid
= current
->uid
;
2325 read_lock(&tasklist_lock
);
2326 p
= find_task_by_pid(pid
);
2329 error
= check_kill_permission(sig
, &info
, p
);
2331 * The null signal is a permissions and process existence
2332 * probe. No signal is actually delivered.
2334 if (!error
&& sig
&& p
->sighand
) {
2335 spin_lock_irq(&p
->sighand
->siglock
);
2336 handle_stop_signal(sig
, p
);
2337 error
= specific_send_sig_info(sig
, &info
, p
);
2338 spin_unlock_irq(&p
->sighand
->siglock
);
2341 read_unlock(&tasklist_lock
);
2346 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2350 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2353 /* Not even root can pretend to send signals from the kernel.
2354 Nor can they impersonate a kill(), which adds source info. */
2355 if (info
.si_code
>= 0)
2357 info
.si_signo
= sig
;
2359 /* POSIX.1b doesn't mention process groups. */
2360 return kill_proc_info(sig
, &info
, pid
);
2364 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
2366 struct k_sigaction
*k
;
2368 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2371 k
= ¤t
->sighand
->action
[sig
-1];
2373 spin_lock_irq(¤t
->sighand
->siglock
);
2374 if (signal_pending(current
)) {
2376 * If there might be a fatal signal pending on multiple
2377 * threads, make sure we take it before changing the action.
2379 spin_unlock_irq(¤t
->sighand
->siglock
);
2380 return -ERESTARTNOINTR
;
2389 * "Setting a signal action to SIG_IGN for a signal that is
2390 * pending shall cause the pending signal to be discarded,
2391 * whether or not it is blocked."
2393 * "Setting a signal action to SIG_DFL for a signal that is
2394 * pending and whose default action is to ignore the signal
2395 * (for example, SIGCHLD), shall cause the pending signal to
2396 * be discarded, whether or not it is blocked"
2398 if (act
->sa
.sa_handler
== SIG_IGN
||
2399 (act
->sa
.sa_handler
== SIG_DFL
&&
2400 sig_kernel_ignore(sig
))) {
2402 * This is a fairly rare case, so we only take the
2403 * tasklist_lock once we're sure we'll need it.
2404 * Now we must do this little unlock and relock
2405 * dance to maintain the lock hierarchy.
2407 struct task_struct
*t
= current
;
2408 spin_unlock_irq(&t
->sighand
->siglock
);
2409 read_lock(&tasklist_lock
);
2410 spin_lock_irq(&t
->sighand
->siglock
);
2412 sigdelsetmask(&k
->sa
.sa_mask
,
2413 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2414 rm_from_queue(sigmask(sig
), &t
->signal
->shared_pending
);
2416 rm_from_queue(sigmask(sig
), &t
->pending
);
2417 recalc_sigpending_tsk(t
);
2419 } while (t
!= current
);
2420 spin_unlock_irq(¤t
->sighand
->siglock
);
2421 read_unlock(&tasklist_lock
);
2426 sigdelsetmask(&k
->sa
.sa_mask
,
2427 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2430 spin_unlock_irq(¤t
->sighand
->siglock
);
2435 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2441 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2442 oss
.ss_size
= current
->sas_ss_size
;
2443 oss
.ss_flags
= sas_ss_flags(sp
);
2452 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2453 || __get_user(ss_sp
, &uss
->ss_sp
)
2454 || __get_user(ss_flags
, &uss
->ss_flags
)
2455 || __get_user(ss_size
, &uss
->ss_size
))
2459 if (on_sig_stack(sp
))
2465 * Note - this code used to test ss_flags incorrectly
2466 * old code may have been written using ss_flags==0
2467 * to mean ss_flags==SS_ONSTACK (as this was the only
2468 * way that worked) - this fix preserves that older
2471 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2474 if (ss_flags
== SS_DISABLE
) {
2479 if (ss_size
< MINSIGSTKSZ
)
2483 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2484 current
->sas_ss_size
= ss_size
;
2489 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2498 #ifdef __ARCH_WANT_SYS_SIGPENDING
2501 sys_sigpending(old_sigset_t __user
*set
)
2503 return do_sigpending(set
, sizeof(*set
));
2508 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2509 /* Some platforms have their own version with special arguments others
2510 support only sys_rt_sigprocmask. */
2513 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2516 old_sigset_t old_set
, new_set
;
2520 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2522 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2524 spin_lock_irq(¤t
->sighand
->siglock
);
2525 old_set
= current
->blocked
.sig
[0];
2533 sigaddsetmask(¤t
->blocked
, new_set
);
2536 sigdelsetmask(¤t
->blocked
, new_set
);
2539 current
->blocked
.sig
[0] = new_set
;
2543 recalc_sigpending();
2544 spin_unlock_irq(¤t
->sighand
->siglock
);
2550 old_set
= current
->blocked
.sig
[0];
2553 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2560 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2562 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2564 sys_rt_sigaction(int sig
,
2565 const struct sigaction __user
*act
,
2566 struct sigaction __user
*oact
,
2569 struct k_sigaction new_sa
, old_sa
;
2572 /* XXX: Don't preclude handling different sized sigset_t's. */
2573 if (sigsetsize
!= sizeof(sigset_t
))
2577 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2581 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2584 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2590 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2592 #ifdef __ARCH_WANT_SYS_SGETMASK
2595 * For backwards compatibility. Functionality superseded by sigprocmask.
2601 return current
->blocked
.sig
[0];
2605 sys_ssetmask(int newmask
)
2609 spin_lock_irq(¤t
->sighand
->siglock
);
2610 old
= current
->blocked
.sig
[0];
2612 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2614 recalc_sigpending();
2615 spin_unlock_irq(¤t
->sighand
->siglock
);
2619 #endif /* __ARCH_WANT_SGETMASK */
2621 #ifdef __ARCH_WANT_SYS_SIGNAL
2623 * For backwards compatibility. Functionality superseded by sigaction.
2625 asmlinkage
unsigned long
2626 sys_signal(int sig
, __sighandler_t handler
)
2628 struct k_sigaction new_sa
, old_sa
;
2631 new_sa
.sa
.sa_handler
= handler
;
2632 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2634 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2636 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2638 #endif /* __ARCH_WANT_SYS_SIGNAL */
2640 #ifdef __ARCH_WANT_SYS_PAUSE
2645 current
->state
= TASK_INTERRUPTIBLE
;
2647 return -ERESTARTNOHAND
;
2652 void __init
signals_init(void)
2655 kmem_cache_create("sigqueue",
2656 sizeof(struct sigqueue
),
2657 __alignof__(struct sigqueue
),
2658 SLAB_PANIC
, NULL
, NULL
);