2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
34 #define SIG_SLAB_DEBUG 0
37 static kmem_cache_t
*signal_queue_cachep
;
39 static int nr_queued_signals
;
40 static int max_queued_signals
= 1024;
42 void __init
signals_init(void)
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue
),
47 __alignof__(struct signal_queue
),
48 SIG_SLAB_DEBUG
, NULL
, NULL
);
53 * Flush all pending signals for a task.
57 flush_signals(struct task_struct
*t
)
59 struct signal_queue
*q
, *n
;
62 sigemptyset(&t
->signal
);
65 t
->sigqueue_tail
= &t
->sigqueue
;
69 kmem_cache_free(signal_queue_cachep
, q
);
76 * Flush all handlers for a task.
80 flush_signal_handlers(struct task_struct
*t
)
83 struct k_sigaction
*ka
= &t
->sig
->action
[0];
84 for (i
= _NSIG
; i
!= 0 ; i
--) {
85 if (ka
->sa
.sa_handler
!= SIG_IGN
)
86 ka
->sa
.sa_handler
= SIG_DFL
;
88 sigemptyset(&ka
->sa
.sa_mask
);
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
103 unsigned long i
, *s
, *m
, x
;
107 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
108 signal_pending(current
));
111 /* Find the first desired signal that is pending. */
112 s
= current
->signal
.sig
;
114 switch (_NSIG_WORDS
) {
116 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
117 if ((x
= *s
&~ *m
) != 0) {
118 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
123 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
125 else if ((x
= s
[1] &~ m
[1]) != 0)
132 case 1: if ((x
= *s
&~ *m
) != 0)
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig
< SIGRTMIN
) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info
->si_signo
= sig
;
154 struct signal_queue
*q
, **pp
;
155 pp
= ¤t
->sigqueue
;
156 q
= current
->sigqueue
;
158 /* Find the one we're interested in ... */
159 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
160 if (q
->info
.si_signo
== sig
)
163 if ((*pp
= q
->next
) == NULL
)
164 current
->sigqueue_tail
= pp
;
166 kmem_cache_free(signal_queue_cachep
,q
);
169 /* then see if this signal is still pending. */
172 if (q
->info
.si_signo
== sig
) {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
183 info
->si_signo
= sig
;
192 sigdelset(¤t
->signal
, sig
);
193 recalc_sigpending(current
);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
204 printk(KERN_CRIT
"SIG: sigpending lied\n");
205 current
->sigpending
= 0;
210 printk(" %d -> %d\n", signal_pending(current
), sig
);
217 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
219 struct k_sigaction
*ka
;
224 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
228 if (sig
< 0 || sig
> _NSIG
)
231 /* If t->sig is gone, we must be trying to kill the task. So
232 pretend that it doesn't exist anymore. */
237 /* The somewhat baroque permissions check... */
239 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
240 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
241 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
242 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
243 && !capable(CAP_SYS_ADMIN
))
246 /* The null signal is a permissions and process existance probe.
247 No signal is actually delivered. */
252 ka
= &t
->sig
->action
[sig
-1];
253 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
256 case SIGKILL
: case SIGCONT
:
257 /* Wake up the process if stopped. */
258 if (t
->state
== TASK_STOPPED
)
261 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
262 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
263 /* Inflict this corner case with recalculations, not mainline */
264 recalc_sigpending(t
);
267 case SIGSTOP
: case SIGTSTP
:
268 case SIGTTIN
: case SIGTTOU
:
269 /* If we're stopping again, cancel SIGCONT */
270 sigdelset(&t
->signal
, SIGCONT
);
271 /* Inflict this corner case with recalculations, not mainline */
272 recalc_sigpending(t
);
276 /* Optimize away the signal, if it's a signal that can be
277 handled immediately (ie non-blocked and untraced) and
278 that is ignored (either explicitly or by default). */
280 if (!(t
->flags
& PF_PTRACED
) && !sigismember(&t
->blocked
, sig
)
281 /* Don't bother with ignored sigs (SIGCHLD is special) */
282 && ((ka
->sa
.sa_handler
== SIG_IGN
&& sig
!= SIGCHLD
)
283 /* Some signals are ignored by default.. (but SIGCONT
284 already did its deed) */
285 || (ka
->sa
.sa_handler
== SIG_DFL
286 && (sig
== SIGCONT
|| sig
== SIGCHLD
287 || sig
== SIGWINCH
|| sig
== SIGURG
)))) {
291 if (sig
< SIGRTMIN
) {
292 /* Non-real-time signals are not queued. */
293 /* XXX: As an extension, support queueing exactly one
294 non-rt signal if SA_SIGINFO is set, so that we can
295 get more detailed information about the cause of
297 if (sigismember(&t
->signal
, sig
))
300 /* Real-time signals must be queued if sent by sigqueue, or
301 some other real-time mechanism. It is implementation
302 defined whether kill() does so. We attempt to do so, on
303 the principle of least surprise, but since kill is not
304 allowed to fail with EAGAIN when low on memory we just
305 make sure at least one signal gets delivered and don't
306 pass on the info struct. */
308 struct signal_queue
*q
= 0;
310 if (nr_queued_signals
< max_queued_signals
) {
311 q
= (struct signal_queue
*)
312 kmem_cache_alloc(signal_queue_cachep
, GFP_KERNEL
);
318 *t
->sigqueue_tail
= q
;
319 t
->sigqueue_tail
= &q
->next
;
320 switch ((unsigned long) info
) {
322 q
->info
.si_signo
= sig
;
323 q
->info
.si_errno
= 0;
324 q
->info
.si_code
= SI_USER
;
325 q
->info
.si_pid
= current
->pid
;
326 q
->info
.si_uid
= current
->uid
;
329 q
->info
.si_signo
= sig
;
330 q
->info
.si_errno
= 0;
331 q
->info
.si_code
= SI_KERNEL
;
340 /* If this was sent by a rt mechanism, try again. */
341 if (info
->si_code
< 0) {
345 /* Otherwise, mention that the signal is pending,
346 but don't queue the info. */
350 sigaddset(&t
->signal
, sig
);
351 if (!sigismember(&t
->blocked
, sig
))
355 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
356 if (t
->state
== TASK_INTERRUPTIBLE
&& signal_pending(t
))
361 printk(" %d -> %d\n", signal_pending(t
), ret
);
368 * Force a signal that the process can't ignore: if necessary
369 * we unblock the signal and change any SIG_IGN to SIG_DFL.
373 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
378 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
379 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
380 sigdelset(&t
->blocked
, sig
);
382 return send_sig_info(sig
, info
, t
);
386 * kill_pg() sends a signal to a process group: this is what the tty
387 * control characters do (^C, ^Z etc)
391 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
393 int retval
= -EINVAL
;
395 struct task_struct
*p
;
399 read_lock(&tasklist_lock
);
401 if (p
->pgrp
== pgrp
) {
402 int err
= send_sig_info(sig
, info
, p
);
409 read_unlock(&tasklist_lock
);
417 * kill_sl() sends a signal to the session leader: this is used
418 * to send SIGHUP to the controlling process of a terminal when
419 * the connection is lost.
423 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
425 int retval
= -EINVAL
;
427 struct task_struct
*p
;
431 read_lock(&tasklist_lock
);
433 if (p
->leader
&& p
->session
== sess
) {
434 int err
= send_sig_info(sig
, info
, p
);
441 read_unlock(&tasklist_lock
);
449 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
452 struct task_struct
*p
;
454 read_lock(&tasklist_lock
);
455 p
= find_task_by_pid(pid
);
458 error
= send_sig_info(sig
, info
, p
);
459 read_unlock(&tasklist_lock
);
464 * kill_something() interprets pid in interesting ways just like kill(2).
466 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
467 * is probably wrong. Should make it like BSD or SYSV.
471 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
474 return kill_pg_info(sig
, info
, current
->pgrp
);
475 } else if (pid
== -1) {
476 int retval
= 0, count
= 0;
477 struct task_struct
* p
;
479 read_lock(&tasklist_lock
);
481 if (p
->pid
> 1 && p
!= current
) {
482 int err
= send_sig_info(sig
, info
, p
);
488 read_unlock(&tasklist_lock
);
489 return count
? retval
: -ESRCH
;
490 } else if (pid
< 0) {
491 return kill_pg_info(sig
, info
, -pid
);
493 return kill_proc_info(sig
, info
, pid
);
498 * These are for backward compatibility with the rest of the kernel source.
502 send_sig(int sig
, struct task_struct
*p
, int priv
)
504 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
508 force_sig(int sig
, struct task_struct
*p
)
510 force_sig_info(sig
, (void*)1L, p
);
514 kill_pg(pid_t pgrp
, int sig
, int priv
)
516 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
520 kill_sl(pid_t sess
, int sig
, int priv
)
522 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
526 kill_proc(pid_t pid
, int sig
, int priv
)
528 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
532 * Let a parent know about a status change of a child.
536 notify_parent(struct task_struct
*tsk
, int sig
)
543 info
.si_pid
= tsk
->pid
;
545 /* FIXME: find out whether or not this is supposed to be c*time. */
546 info
.si_utime
= tsk
->times
.tms_utime
;
547 info
.si_stime
= tsk
->times
.tms_stime
;
549 why
= SI_KERNEL
; /* shouldn't happen */
550 switch (tsk
->state
) {
552 if (tsk
->exit_code
& 0x80)
554 else if (tsk
->exit_code
& 0x7f)
560 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
565 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
571 send_sig_info(sig
, &info
, tsk
->p_pptr
);
572 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
575 EXPORT_SYMBOL(dequeue_signal
);
576 EXPORT_SYMBOL(flush_signals
);
577 EXPORT_SYMBOL(force_sig
);
578 EXPORT_SYMBOL(force_sig_info
);
579 EXPORT_SYMBOL(kill_pg
);
580 EXPORT_SYMBOL(kill_pg_info
);
581 EXPORT_SYMBOL(kill_proc
);
582 EXPORT_SYMBOL(kill_proc_info
);
583 EXPORT_SYMBOL(kill_sl
);
584 EXPORT_SYMBOL(kill_sl_info
);
585 EXPORT_SYMBOL(notify_parent
);
586 EXPORT_SYMBOL(recalc_sigpending
);
587 EXPORT_SYMBOL(send_sig
);
588 EXPORT_SYMBOL(send_sig_info
);
592 * System call entry points.
596 * We don't need to get the kernel lock - this is all local to this
597 * particular thread.. (and that's good, because this is _heavily_
598 * used by various programs)
602 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
605 sigset_t old_set
, new_set
;
607 /* XXX: Don't preclude handling different sized sigset_t's. */
608 if (sigsetsize
!= sizeof(sigset_t
))
613 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
615 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
617 spin_lock_irq(¤t
->sigmask_lock
);
618 old_set
= current
->blocked
;
626 sigorsets(&new_set
, &old_set
, &new_set
);
629 signandsets(&new_set
, &old_set
, &new_set
);
635 current
->blocked
= new_set
;
636 recalc_sigpending(current
);
637 spin_unlock_irq(¤t
->sigmask_lock
);
643 spin_lock_irq(¤t
->sigmask_lock
);
644 old_set
= current
->blocked
;
645 spin_unlock_irq(¤t
->sigmask_lock
);
649 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
658 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
663 /* XXX: Don't preclude handling different sized sigset_t's. */
664 if (sigsetsize
!= sizeof(sigset_t
))
667 spin_lock_irq(¤t
->sigmask_lock
);
668 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
669 spin_unlock_irq(¤t
->sigmask_lock
);
672 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
679 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
680 const struct timespec
*uts
, size_t sigsetsize
)
687 /* XXX: Don't preclude handling different sized sigset_t's. */
688 if (sigsetsize
!= sizeof(sigset_t
))
691 if (copy_from_user(&these
, uthese
, sizeof(these
)))
694 /* Invert the set of allowed signals to get those we
700 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
702 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
707 spin_lock_irq(¤t
->sigmask_lock
);
708 sig
= dequeue_signal(&these
, &info
);
710 /* None ready -- temporarily unblock those we're interested
711 in so that we'll be awakened when they arrive. */
712 unsigned long expire
;
713 sigset_t oldblocked
= current
->blocked
;
714 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
715 recalc_sigpending(current
);
716 spin_unlock_irq(¤t
->sigmask_lock
);
720 expire
= (timespec_to_jiffies(&ts
)
721 + (ts
.tv_sec
|| ts
.tv_nsec
));
724 current
->timeout
= expire
;
726 current
->state
= TASK_INTERRUPTIBLE
;
729 spin_lock_irq(¤t
->sigmask_lock
);
730 sig
= dequeue_signal(&these
, &info
);
731 current
->blocked
= oldblocked
;
732 recalc_sigpending(current
);
734 spin_unlock_irq(¤t
->sigmask_lock
);
739 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
744 if (current
->timeout
!= 0) {
745 current
->timeout
= 0;
754 sys_kill(int pid
, int sig
)
760 info
.si_code
= SI_USER
;
761 info
.si_pid
= current
->pid
;
762 info
.si_uid
= current
->uid
;
764 return kill_something_info(sig
, &info
, pid
);
768 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
772 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
775 /* Not even root can pretend to send signals from the kernel.
776 Nor can they impersonate a kill(), which adds source info. */
777 if (info
.si_code
>= 0)
781 /* POSIX.1b doesn't mention process groups. */
782 return kill_proc_info(sig
, &info
, pid
);
786 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
788 struct k_sigaction
*k
;
790 if (sig
< 1 || sig
> _NSIG
||
791 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
794 spin_lock_irq(¤t
->sigmask_lock
);
795 k
= ¤t
->sig
->action
[sig
-1];
797 if (oact
) *oact
= *k
;
801 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
805 * "Setting a signal action to SIG_IGN for a signal that is
806 * pending shall cause the pending signal to be discarded,
807 * whether or not it is blocked."
809 * "Setting a signal action to SIG_DFL for a signal that is
810 * pending and whose default action is to ignore the signal
811 * (for example, SIGCHLD), shall cause the pending signal to
812 * be discarded, whether or not it is blocked"
814 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
815 * signal isn't actually ignored, but does automatic child
816 * reaping, while SIG_DFL is explicitly said by POSIX to force
817 * the signal to be ignored.
820 if (k
->sa
.sa_handler
== SIG_IGN
821 || (k
->sa
.sa_handler
== SIG_DFL
822 && (sig
== SIGCONT
||
825 /* So dequeue any that might be pending.
826 XXX: process-wide signals? */
827 if (sig
>= SIGRTMIN
&&
828 sigismember(¤t
->signal
, sig
)) {
829 struct signal_queue
*q
, **pp
;
830 pp
= ¤t
->sigqueue
;
831 q
= current
->sigqueue
;
833 if (q
->info
.si_signo
!= sig
)
837 kmem_cache_free(signal_queue_cachep
, q
);
844 sigdelset(¤t
->signal
, sig
);
845 recalc_sigpending(current
);
849 spin_unlock_irq(¤t
->sigmask_lock
);
855 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
861 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
862 oss
.ss_size
= current
->sas_ss_size
;
863 oss
.ss_flags
= sas_ss_flags(sp
);
872 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
873 || __get_user(ss_sp
, &uss
->ss_sp
)
874 || __get_user(ss_flags
, &uss
->ss_flags
)
875 || __get_user(ss_size
, &uss
->ss_size
))
879 if (on_sig_stack (sp
))
883 if (ss_flags
& ~SS_DISABLE
)
886 if (ss_flags
& SS_DISABLE
) {
891 if (ss_size
< MINSIGSTKSZ
)
895 current
->sas_ss_sp
= (unsigned long) ss_sp
;
896 current
->sas_ss_size
= ss_size
;
901 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
910 #if !defined(__alpha__)
911 /* Alpha has its own versions with special arguments. */
914 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
917 old_sigset_t old_set
, new_set
;
921 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
923 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
925 spin_lock_irq(¤t
->sigmask_lock
);
926 old_set
= current
->blocked
.sig
[0];
934 sigaddsetmask(¤t
->blocked
, new_set
);
937 sigdelsetmask(¤t
->blocked
, new_set
);
940 current
->blocked
.sig
[0] = new_set
;
944 recalc_sigpending(current
);
945 spin_unlock_irq(¤t
->sigmask_lock
);
951 old_set
= current
->blocked
.sig
[0];
954 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
963 sys_sigpending(old_sigset_t
*set
)
966 old_sigset_t pending
;
968 spin_lock_irq(¤t
->sigmask_lock
);
969 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
970 spin_unlock_irq(¤t
->sigmask_lock
);
973 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
980 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
983 struct k_sigaction new_sa
, old_sa
;
986 /* XXX: Don't preclude handling different sized sigset_t's. */
987 if (sigsetsize
!= sizeof(sigset_t
))
991 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
995 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
998 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1004 #endif /* __sparc__ */
1007 #if !defined(__alpha__)
1009 * For backwards compatibility. Functionality superseded by sigprocmask.
1015 return current
->blocked
.sig
[0];
1019 sys_ssetmask(int newmask
)
1023 spin_lock_irq(¤t
->sigmask_lock
);
1024 old
= current
->blocked
.sig
[0];
1026 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1028 recalc_sigpending(current
);
1029 spin_unlock_irq(¤t
->sigmask_lock
);
1035 * For backwards compatibility. Functionality superseded by sigaction.
1037 asmlinkage
unsigned long
1038 sys_signal(int sig
, __sighandler_t handler
)
1040 struct k_sigaction new_sa
, old_sa
;
1043 new_sa
.sa
.sa_handler
= handler
;
1044 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1046 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1048 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;