2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/uaccess.h>
20 * SLAB caches for signal bits.
26 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
28 #define SIG_SLAB_DEBUG 0
31 static kmem_cache_t
*sigqueue_cachep
;
33 atomic_t nr_queued_signals
;
34 int max_queued_signals
= 1024;
36 void __init
signals_init(void)
39 kmem_cache_create("sigqueue",
40 sizeof(struct sigqueue
),
41 __alignof__(struct sigqueue
),
42 SIG_SLAB_DEBUG
, NULL
, NULL
);
44 panic("signals_init(): cannot create sigueue SLAB cache");
48 /* Given the mask, find the first available signal that should be serviced. */
51 next_signal(struct task_struct
*tsk
, sigset_t
*mask
)
53 unsigned long i
, *s
, *m
, x
;
56 s
= tsk
->pending
.signal
.sig
;
58 switch (_NSIG_WORDS
) {
60 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
61 if ((x
= *s
&~ *m
) != 0) {
62 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
67 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
69 else if ((x
= s
[1] &~ m
[1]) != 0)
76 case 1: if ((x
= *s
&~ *m
) != 0)
84 static void flush_sigqueue(struct sigpending
*queue
)
86 struct sigqueue
*q
, *n
;
88 sigemptyset(&queue
->signal
);
91 queue
->tail
= &queue
->head
;
95 kmem_cache_free(sigqueue_cachep
, q
);
96 atomic_dec(&nr_queued_signals
);
102 * Flush all pending signals for a task.
106 flush_signals(struct task_struct
*t
)
109 flush_sigqueue(&t
->pending
);
112 void exit_sighand(struct task_struct
*tsk
)
114 struct signal_struct
* sig
= tsk
->sig
;
116 spin_lock_irq(&tsk
->sigmask_lock
);
119 if (atomic_dec_and_test(&sig
->count
))
120 kmem_cache_free(sigact_cachep
, sig
);
123 flush_sigqueue(&tsk
->pending
);
124 spin_unlock_irq(&tsk
->sigmask_lock
);
128 * Flush all handlers for a task.
132 flush_signal_handlers(struct task_struct
*t
)
135 struct k_sigaction
*ka
= &t
->sig
->action
[0];
136 for (i
= _NSIG
; i
!= 0 ; i
--) {
137 if (ka
->sa
.sa_handler
!= SIG_IGN
)
138 ka
->sa
.sa_handler
= SIG_DFL
;
140 sigemptyset(&ka
->sa
.sa_mask
);
145 /* Notify the system that a driver wants to block all signals for this
146 * process, and wants to be notified if any signals at all were to be
147 * sent/acted upon. If the notifier routine returns non-zero, then the
148 * signal will be acted upon after all. If the notifier routine returns 0,
149 * then then signal will be blocked. Only one block per process is
150 * allowed. priv is a pointer to private data that the notifier routine
151 * can use to determine if the signal should be blocked or not. */
154 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
158 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
159 current
->notifier_mask
= mask
;
160 current
->notifier_data
= priv
;
161 current
->notifier
= notifier
;
162 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
165 /* Notify the system that blocking has ended. */
168 unblock_all_signals(void)
172 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
173 current
->notifier
= NULL
;
174 current
->notifier_data
= NULL
;
175 recalc_sigpending(current
);
176 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
179 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
181 if (sigismember(&list
->signal
, sig
)) {
182 /* Collect the siginfo appropriate to this signal. */
183 struct sigqueue
*q
, **pp
;
185 while ((q
= *pp
) != NULL
) {
186 if (q
->info
.si_signo
== sig
)
191 /* Ok, it wasn't in the queue. We must have
192 been out of queue space. So zero out the
194 info
->si_signo
= sig
;
202 if ((*pp
= q
->next
) == NULL
)
205 /* Copy the sigqueue information and free the queue entry */
206 copy_siginfo(info
, &q
->info
);
207 kmem_cache_free(sigqueue_cachep
,q
);
208 atomic_dec(&nr_queued_signals
);
210 /* Non-RT signals can exist multiple times.. */
211 if (sig
>= SIGRTMIN
) {
212 while ((q
= *pp
) != NULL
) {
213 if (q
->info
.si_signo
== sig
)
219 sigdelset(&list
->signal
, sig
);
227 * Dequeue a signal and return the element to the caller, which is
228 * expected to free it.
230 * All callers must be holding current->sigmask_lock.
234 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
239 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
240 signal_pending(current
));
243 sig
= next_signal(current
, mask
);
244 if (current
->notifier
) {
245 if (sigismember(current
->notifier_mask
, sig
)) {
246 if (!(current
->notifier
)(current
->notifier_data
)) {
247 current
->sigpending
= 0;
254 if (!collect_signal(sig
, ¤t
->pending
, info
))
257 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
258 we need to xchg out the timer overrun values. */
260 recalc_sigpending(current
);
263 printk(" %d -> %d\n", signal_pending(current
), sig
);
269 static int rm_from_queue(int sig
, struct sigpending
*s
)
271 struct sigqueue
*q
, **pp
;
273 if (!sigismember(&s
->signal
, sig
))
276 sigdelset(&s
->signal
, sig
);
280 while ((q
= *pp
) != NULL
) {
281 if (q
->info
.si_signo
== sig
) {
282 if ((*pp
= q
->next
) == NULL
)
284 kmem_cache_free(sigqueue_cachep
,q
);
285 atomic_dec(&nr_queued_signals
);
294 * Remove signal sig from t->pending.
295 * Returns 1 if sig was found.
297 * All callers must be holding t->sigmask_lock.
299 static int rm_sig_from_queue(int sig
, struct task_struct
*t
)
301 return rm_from_queue(sig
, &t
->pending
);
305 * Bad permissions for sending the signal
307 int bad_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
309 return (!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
310 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
311 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
312 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
313 && !capable(CAP_KILL
);
318 * < 0 : global action (kill - spread to all non-blocked threads)
322 static int signal_type(int sig
, struct signal_struct
*signals
)
324 unsigned long handler
;
329 handler
= (unsigned long) signals
->action
[sig
-1].sa
.sa_handler
;
333 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
335 return sig
== SIGCHLD
;
337 /* Default handler. Normally lethal, but.. */
341 case SIGCONT
: case SIGWINCH
:
342 case SIGCHLD
: case SIGURG
:
345 /* Implicit behaviour */
346 case SIGTSTP
: case SIGTTIN
: case SIGTTOU
:
349 /* Implicit actions (kill or do special stuff) */
357 * Determine whether a signal should be posted or not.
359 * Signals with SIG_IGN can be ignored, except for the
360 * special case of a SIGCHLD.
362 * Some signals with SIG_DFL default to a non-action.
364 static int ignored_signal(int sig
, struct task_struct
*t
)
366 /* Don't ignore traced or blocked signals */
367 if ((t
->ptrace
& PT_PTRACED
) || sigismember(&t
->blocked
, sig
))
370 return signal_type(sig
, t
->sig
) == 0;
374 * Handle TASK_STOPPED cases etc implicit behaviour
375 * of certain magical signals.
377 * SIGKILL gets spread out to every thread.
379 static void handle_stop_signal(int sig
, struct task_struct
*t
)
382 case SIGKILL
: case SIGCONT
:
383 /* Wake up the process if stopped. */
384 if (t
->state
== TASK_STOPPED
)
387 rm_sig_from_queue(SIGSTOP
, t
);
388 rm_sig_from_queue(SIGTSTP
, t
);
389 rm_sig_from_queue(SIGTTOU
, t
);
390 rm_sig_from_queue(SIGTTIN
, t
);
393 case SIGSTOP
: case SIGTSTP
:
394 case SIGTTIN
: case SIGTTOU
:
395 /* If we're stopping again, cancel SIGCONT */
396 rm_sig_from_queue(SIGCONT
, t
);
401 static int send_signal(int sig
, struct siginfo
*info
, struct sigpending
*signals
)
403 struct sigqueue
* q
= NULL
;
405 /* Real-time signals must be queued if sent by sigqueue, or
406 some other real-time mechanism. It is implementation
407 defined whether kill() does so. We attempt to do so, on
408 the principle of least surprise, but since kill is not
409 allowed to fail with EAGAIN when low on memory we just
410 make sure at least one signal gets delivered and don't
411 pass on the info struct. */
413 if (atomic_read(&nr_queued_signals
) < max_queued_signals
) {
414 q
= kmem_cache_alloc(sigqueue_cachep
, GFP_ATOMIC
);
418 atomic_inc(&nr_queued_signals
);
421 signals
->tail
= &q
->next
;
422 switch ((unsigned long) info
) {
424 q
->info
.si_signo
= sig
;
425 q
->info
.si_errno
= 0;
426 q
->info
.si_code
= SI_USER
;
427 q
->info
.si_pid
= current
->pid
;
428 q
->info
.si_uid
= current
->uid
;
431 q
->info
.si_signo
= sig
;
432 q
->info
.si_errno
= 0;
433 q
->info
.si_code
= SI_KERNEL
;
438 copy_siginfo(&q
->info
, info
);
441 } else if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
442 && info
->si_code
!= SI_USER
) {
444 * Queue overflow, abort. We may abort if the signal was rt
445 * and sent by user using something other than kill().
450 sigaddset(&signals
->signal
, sig
);
455 * Tell a process that it has a new active signal..
457 * NOTE! we rely on the previous spin_lock to
458 * lock interrupts for us! We can only be called with
459 * "sigmask_lock" held, and the local interrupt must
460 * have been disabled when that got aquired!
462 * No need to set need_resched since signal event passing
463 * goes through ->blocked
465 static inline void signal_wake_up(struct task_struct
*t
)
469 if (t
->state
& TASK_INTERRUPTIBLE
) {
476 * If the task is running on a different CPU
477 * force a reschedule on the other CPU to make
478 * it notice the new signal quickly.
480 * The code below is a tad loose and might occasionally
481 * kick the wrong CPU if we catch the process in the
482 * process of changing - but no harm is done by that
483 * other than doing an extra (lightweight) IPI interrupt.
485 spin_lock(&runqueue_lock
);
486 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
487 smp_send_reschedule(t
->processor
);
488 spin_unlock(&runqueue_lock
);
489 #endif /* CONFIG_SMP */
492 static int deliver_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
494 int retval
= send_signal(sig
, info
, &t
->pending
);
496 if (!retval
&& !sigismember(&t
->blocked
, sig
))
503 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
510 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
514 if (sig
< 0 || sig
> _NSIG
)
516 /* The somewhat baroque permissions check... */
518 if (bad_signal(sig
, info
, t
))
521 /* The null signal is a permissions and process existance probe.
522 No signal is actually delivered. Same goes for zombies. */
527 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
528 handle_stop_signal(sig
, t
);
530 /* Optimize away the signal, if it's a signal that can be
531 handled immediately (ie non-blocked and untraced) and
532 that is ignored (either explicitly or by default). */
534 if (ignored_signal(sig
, t
))
537 /* Support queueing exactly one non-rt signal, so that we
538 can get more detailed information about the cause of
540 if (sig
< SIGRTMIN
&& sigismember(&t
->pending
.signal
, sig
))
543 ret
= deliver_signal(sig
, info
, t
);
545 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
546 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
550 printk(" %d -> %d\n", signal_pending(t
), ret
);
557 * Force a signal that the process can't ignore: if necessary
558 * we unblock the signal and change any SIG_IGN to SIG_DFL.
562 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
564 unsigned long int flags
;
566 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
567 if (t
->sig
== NULL
) {
568 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
572 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
573 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
574 sigdelset(&t
->blocked
, sig
);
575 recalc_sigpending(t
);
576 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
578 return send_sig_info(sig
, info
, t
);
582 * kill_pg_info() sends a signal to a process group: this is what the tty
583 * control characters do (^C, ^Z etc)
587 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
589 int retval
= -EINVAL
;
591 struct task_struct
*p
;
594 read_lock(&tasklist_lock
);
596 if (p
->pgrp
== pgrp
) {
597 int err
= send_sig_info(sig
, info
, p
);
602 read_unlock(&tasklist_lock
);
608 * kill_sl_info() sends a signal to the session leader: this is used
609 * to send SIGHUP to the controlling process of a terminal when
610 * the connection is lost.
614 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
616 int retval
= -EINVAL
;
618 struct task_struct
*p
;
621 read_lock(&tasklist_lock
);
623 if (p
->leader
&& p
->session
== sess
) {
624 int err
= send_sig_info(sig
, info
, p
);
629 read_unlock(&tasklist_lock
);
635 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
638 struct task_struct
*p
;
640 read_lock(&tasklist_lock
);
641 p
= find_task_by_pid(pid
);
644 error
= send_sig_info(sig
, info
, p
);
645 read_unlock(&tasklist_lock
);
651 * kill_something_info() interprets pid in interesting ways just like kill(2).
653 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
654 * is probably wrong. Should make it like BSD or SYSV.
657 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
660 return kill_pg_info(sig
, info
, current
->pgrp
);
661 } else if (pid
== -1) {
662 int retval
= 0, count
= 0;
663 struct task_struct
* p
;
665 read_lock(&tasklist_lock
);
667 if (p
->pid
> 1 && p
!= current
) {
668 int err
= send_sig_info(sig
, info
, p
);
674 read_unlock(&tasklist_lock
);
675 return count
? retval
: -ESRCH
;
676 } else if (pid
< 0) {
677 return kill_pg_info(sig
, info
, -pid
);
679 return kill_proc_info(sig
, info
, pid
);
684 * These are for backward compatibility with the rest of the kernel source.
688 send_sig(int sig
, struct task_struct
*p
, int priv
)
690 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
694 force_sig(int sig
, struct task_struct
*p
)
696 force_sig_info(sig
, (void*)1L, p
);
700 kill_pg(pid_t pgrp
, int sig
, int priv
)
702 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
706 kill_sl(pid_t sess
, int sig
, int priv
)
708 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
712 kill_proc(pid_t pid
, int sig
, int priv
)
714 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
718 * Joy. Or not. Pthread wants us to wake up every thread
719 * in our parent group.
721 static void wake_up_parent(struct task_struct
*parent
)
723 struct task_struct
*tsk
= parent
;
726 wake_up_interruptible(&tsk
->wait_chldexit
);
727 tsk
= next_thread(tsk
);
728 } while (tsk
!= parent
);
732 * Let a parent know about a status change of a child.
735 void do_notify_parent(struct task_struct
*tsk
, int sig
)
742 info
.si_pid
= tsk
->pid
;
743 info
.si_uid
= tsk
->uid
;
745 /* FIXME: find out whether or not this is supposed to be c*time. */
746 info
.si_utime
= tsk
->times
.tms_utime
;
747 info
.si_stime
= tsk
->times
.tms_stime
;
749 status
= tsk
->exit_code
& 0x7f;
750 why
= SI_KERNEL
; /* shouldn't happen */
751 switch (tsk
->state
) {
753 if (tsk
->exit_code
& 0x80)
755 else if (tsk
->exit_code
& 0x7f)
759 status
= tsk
->exit_code
>> 8;
763 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
764 if (tsk
->ptrace
& PT_PTRACED
)
771 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
776 info
.si_status
= status
;
778 send_sig_info(sig
, &info
, tsk
->p_pptr
);
779 wake_up_parent(tsk
->p_pptr
);
784 * We need the tasklist lock because it's the only
785 * thing that protects out "parent" pointer.
787 * exit.c calls "do_notify_parent()" directly, because
788 * it already has the tasklist lock.
791 notify_parent(struct task_struct
*tsk
, int sig
)
793 read_lock(&tasklist_lock
);
794 do_notify_parent(tsk
, sig
);
795 read_unlock(&tasklist_lock
);
798 EXPORT_SYMBOL(dequeue_signal
);
799 EXPORT_SYMBOL(flush_signals
);
800 EXPORT_SYMBOL(force_sig
);
801 EXPORT_SYMBOL(force_sig_info
);
802 EXPORT_SYMBOL(kill_pg
);
803 EXPORT_SYMBOL(kill_pg_info
);
804 EXPORT_SYMBOL(kill_proc
);
805 EXPORT_SYMBOL(kill_proc_info
);
806 EXPORT_SYMBOL(kill_sl
);
807 EXPORT_SYMBOL(kill_sl_info
);
808 EXPORT_SYMBOL(notify_parent
);
809 EXPORT_SYMBOL(recalc_sigpending
);
810 EXPORT_SYMBOL(send_sig
);
811 EXPORT_SYMBOL(send_sig_info
);
812 EXPORT_SYMBOL(block_all_signals
);
813 EXPORT_SYMBOL(unblock_all_signals
);
817 * System call entry points.
821 * We don't need to get the kernel lock - this is all local to this
822 * particular thread.. (and that's good, because this is _heavily_
823 * used by various programs)
827 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
830 sigset_t old_set
, new_set
;
832 /* XXX: Don't preclude handling different sized sigset_t's. */
833 if (sigsetsize
!= sizeof(sigset_t
))
838 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
840 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
842 spin_lock_irq(¤t
->sigmask_lock
);
843 old_set
= current
->blocked
;
851 sigorsets(&new_set
, &old_set
, &new_set
);
854 signandsets(&new_set
, &old_set
, &new_set
);
860 current
->blocked
= new_set
;
861 recalc_sigpending(current
);
862 spin_unlock_irq(¤t
->sigmask_lock
);
868 spin_lock_irq(¤t
->sigmask_lock
);
869 old_set
= current
->blocked
;
870 spin_unlock_irq(¤t
->sigmask_lock
);
874 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
882 long do_sigpending(void *set
, unsigned long sigsetsize
)
884 long error
= -EINVAL
;
887 if (sigsetsize
> sizeof(sigset_t
))
890 spin_lock_irq(¤t
->sigmask_lock
);
891 sigandsets(&pending
, ¤t
->blocked
, ¤t
->pending
.signal
);
892 spin_unlock_irq(¤t
->sigmask_lock
);
895 if (!copy_to_user(set
, &pending
, sigsetsize
))
902 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
904 return do_sigpending(set
, sigsetsize
);
908 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
909 const struct timespec
*uts
, size_t sigsetsize
)
917 /* XXX: Don't preclude handling different sized sigset_t's. */
918 if (sigsetsize
!= sizeof(sigset_t
))
921 if (copy_from_user(&these
, uthese
, sizeof(these
)))
925 * Invert the set of allowed signals to get those we
928 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
932 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
934 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
939 spin_lock_irq(¤t
->sigmask_lock
);
940 sig
= dequeue_signal(&these
, &info
);
942 /* None ready -- temporarily unblock those we're interested
943 in so that we'll be awakened when they arrive. */
944 sigset_t oldblocked
= current
->blocked
;
945 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
946 recalc_sigpending(current
);
947 spin_unlock_irq(¤t
->sigmask_lock
);
949 timeout
= MAX_SCHEDULE_TIMEOUT
;
951 timeout
= (timespec_to_jiffies(&ts
)
952 + (ts
.tv_sec
|| ts
.tv_nsec
));
954 current
->state
= TASK_INTERRUPTIBLE
;
955 timeout
= schedule_timeout(timeout
);
957 spin_lock_irq(¤t
->sigmask_lock
);
958 sig
= dequeue_signal(&these
, &info
);
959 current
->blocked
= oldblocked
;
960 recalc_sigpending(current
);
962 spin_unlock_irq(¤t
->sigmask_lock
);
967 if (copy_siginfo_to_user(uinfo
, &info
))
980 sys_kill(int pid
, int sig
)
986 info
.si_code
= SI_USER
;
987 info
.si_pid
= current
->pid
;
988 info
.si_uid
= current
->uid
;
990 return kill_something_info(sig
, &info
, pid
);
994 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
998 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
1001 /* Not even root can pretend to send signals from the kernel.
1002 Nor can they impersonate a kill(), which adds source info. */
1003 if (info
.si_code
>= 0)
1005 info
.si_signo
= sig
;
1007 /* POSIX.1b doesn't mention process groups. */
1008 return kill_proc_info(sig
, &info
, pid
);
1012 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
1014 struct k_sigaction
*k
;
1016 if (sig
< 1 || sig
> _NSIG
||
1017 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
1020 k
= ¤t
->sig
->action
[sig
-1];
1022 spin_lock(¤t
->sig
->siglock
);
1029 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
1033 * "Setting a signal action to SIG_IGN for a signal that is
1034 * pending shall cause the pending signal to be discarded,
1035 * whether or not it is blocked."
1037 * "Setting a signal action to SIG_DFL for a signal that is
1038 * pending and whose default action is to ignore the signal
1039 * (for example, SIGCHLD), shall cause the pending signal to
1040 * be discarded, whether or not it is blocked"
1042 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1043 * signal isn't actually ignored, but does automatic child
1044 * reaping, while SIG_DFL is explicitly said by POSIX to force
1045 * the signal to be ignored.
1048 if (k
->sa
.sa_handler
== SIG_IGN
1049 || (k
->sa
.sa_handler
== SIG_DFL
1050 && (sig
== SIGCONT
||
1052 sig
== SIGWINCH
))) {
1053 spin_lock_irq(¤t
->sigmask_lock
);
1054 if (rm_sig_from_queue(sig
, current
))
1055 recalc_sigpending(current
);
1056 spin_unlock_irq(¤t
->sigmask_lock
);
1060 spin_unlock(¤t
->sig
->siglock
);
1065 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
1071 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
1072 oss
.ss_size
= current
->sas_ss_size
;
1073 oss
.ss_flags
= sas_ss_flags(sp
);
1082 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
1083 || __get_user(ss_sp
, &uss
->ss_sp
)
1084 || __get_user(ss_flags
, &uss
->ss_flags
)
1085 || __get_user(ss_size
, &uss
->ss_size
))
1089 if (on_sig_stack (sp
))
1095 * Note - this code used to test ss_flags incorrectly
1096 * old code may have been written using ss_flags==0
1097 * to mean ss_flags==SS_ONSTACK (as this was the only
1098 * way that worked) - this fix preserves that older
1101 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
1104 if (ss_flags
== SS_DISABLE
) {
1109 if (ss_size
< MINSIGSTKSZ
)
1113 current
->sas_ss_sp
= (unsigned long) ss_sp
;
1114 current
->sas_ss_size
= ss_size
;
1119 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
1129 sys_sigpending(old_sigset_t
*set
)
1131 return do_sigpending(set
, sizeof(*set
));
1134 #if !defined(__alpha__)
1135 /* Alpha has its own versions with special arguments. */
1138 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
1141 old_sigset_t old_set
, new_set
;
1145 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1147 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1149 spin_lock_irq(¤t
->sigmask_lock
);
1150 old_set
= current
->blocked
.sig
[0];
1158 sigaddsetmask(¤t
->blocked
, new_set
);
1161 sigdelsetmask(¤t
->blocked
, new_set
);
1164 current
->blocked
.sig
[0] = new_set
;
1168 recalc_sigpending(current
);
1169 spin_unlock_irq(¤t
->sigmask_lock
);
1175 old_set
= current
->blocked
.sig
[0];
1178 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1188 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1191 struct k_sigaction new_sa
, old_sa
;
1194 /* XXX: Don't preclude handling different sized sigset_t's. */
1195 if (sigsetsize
!= sizeof(sigset_t
))
1199 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1203 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1206 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1212 #endif /* __sparc__ */
1215 #if !defined(__alpha__) && !defined(__ia64__)
1217 * For backwards compatibility. Functionality superseded by sigprocmask.
1223 return current
->blocked
.sig
[0];
1227 sys_ssetmask(int newmask
)
1231 spin_lock_irq(¤t
->sigmask_lock
);
1232 old
= current
->blocked
.sig
[0];
1234 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1236 recalc_sigpending(current
);
1237 spin_unlock_irq(¤t
->sigmask_lock
);
1241 #endif /* !defined(__alpha__) */
1243 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1245 * For backwards compatibility. Functionality superseded by sigaction.
1247 asmlinkage
unsigned long
1248 sys_signal(int sig
, __sighandler_t handler
)
1250 struct k_sigaction new_sa
, old_sa
;
1253 new_sa
.sa
.sa_handler
= handler
;
1254 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1256 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1258 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1260 #endif /* !alpha && !__ia64__ && !defined(__mips__) */