2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/uaccess.h>
20 * SLAB caches for signal bits.
26 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
28 #define SIG_SLAB_DEBUG 0
31 static kmem_cache_t
*sigqueue_cachep
;
33 atomic_t nr_queued_signals
;
34 int max_queued_signals
= 1024;
36 void __init
signals_init(void)
39 kmem_cache_create("sigqueue",
40 sizeof(struct sigqueue
),
41 __alignof__(struct sigqueue
),
42 SIG_SLAB_DEBUG
, NULL
, NULL
);
44 panic("signals_init(): cannot create sigqueue SLAB cache");
48 /* Given the mask, find the first available signal that should be serviced. */
51 next_signal(struct task_struct
*tsk
, sigset_t
*mask
)
53 unsigned long i
, *s
, *m
, x
;
56 s
= tsk
->pending
.signal
.sig
;
58 switch (_NSIG_WORDS
) {
60 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
61 if ((x
= *s
&~ *m
) != 0) {
62 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
67 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
69 else if ((x
= s
[1] &~ m
[1]) != 0)
76 case 1: if ((x
= *s
&~ *m
) != 0)
84 static void flush_sigqueue(struct sigpending
*queue
)
86 struct sigqueue
*q
, *n
;
88 sigemptyset(&queue
->signal
);
91 queue
->tail
= &queue
->head
;
95 kmem_cache_free(sigqueue_cachep
, q
);
96 atomic_dec(&nr_queued_signals
);
102 * Flush all pending signals for a task.
106 flush_signals(struct task_struct
*t
)
109 flush_sigqueue(&t
->pending
);
112 void exit_sighand(struct task_struct
*tsk
)
114 struct signal_struct
* sig
= tsk
->sig
;
116 spin_lock_irq(&tsk
->sigmask_lock
);
119 if (atomic_dec_and_test(&sig
->count
))
120 kmem_cache_free(sigact_cachep
, sig
);
123 flush_sigqueue(&tsk
->pending
);
124 spin_unlock_irq(&tsk
->sigmask_lock
);
128 * Flush all handlers for a task.
132 flush_signal_handlers(struct task_struct
*t
)
135 struct k_sigaction
*ka
= &t
->sig
->action
[0];
136 for (i
= _NSIG
; i
!= 0 ; i
--) {
137 if (ka
->sa
.sa_handler
!= SIG_IGN
)
138 ka
->sa
.sa_handler
= SIG_DFL
;
140 sigemptyset(&ka
->sa
.sa_mask
);
145 /* Notify the system that a driver wants to block all signals for this
146 * process, and wants to be notified if any signals at all were to be
147 * sent/acted upon. If the notifier routine returns non-zero, then the
148 * signal will be acted upon after all. If the notifier routine returns 0,
149 * then then signal will be blocked. Only one block per process is
150 * allowed. priv is a pointer to private data that the notifier routine
151 * can use to determine if the signal should be blocked or not. */
154 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
158 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
159 current
->notifier_mask
= mask
;
160 current
->notifier_data
= priv
;
161 current
->notifier
= notifier
;
162 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
165 /* Notify the system that blocking has ended. */
168 unblock_all_signals(void)
172 spin_lock_irqsave(¤t
->sigmask_lock
, flags
);
173 current
->notifier
= NULL
;
174 current
->notifier_data
= NULL
;
175 recalc_sigpending(current
);
176 spin_unlock_irqrestore(¤t
->sigmask_lock
, flags
);
179 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
181 if (sigismember(&list
->signal
, sig
)) {
182 /* Collect the siginfo appropriate to this signal. */
183 struct sigqueue
*q
, **pp
;
185 while ((q
= *pp
) != NULL
) {
186 if (q
->info
.si_signo
== sig
)
191 /* Ok, it wasn't in the queue. We must have
192 been out of queue space. So zero out the
194 sigdelset(&list
->signal
, sig
);
195 info
->si_signo
= sig
;
203 if ((*pp
= q
->next
) == NULL
)
206 /* Copy the sigqueue information and free the queue entry */
207 copy_siginfo(info
, &q
->info
);
208 kmem_cache_free(sigqueue_cachep
,q
);
209 atomic_dec(&nr_queued_signals
);
211 /* Non-RT signals can exist multiple times.. */
212 if (sig
>= SIGRTMIN
) {
213 while ((q
= *pp
) != NULL
) {
214 if (q
->info
.si_signo
== sig
)
220 sigdelset(&list
->signal
, sig
);
228 * Dequeue a signal and return the element to the caller, which is
229 * expected to free it.
231 * All callers must be holding current->sigmask_lock.
235 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
240 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
241 signal_pending(current
));
244 sig
= next_signal(current
, mask
);
245 if (current
->notifier
) {
246 if (sigismember(current
->notifier_mask
, sig
)) {
247 if (!(current
->notifier
)(current
->notifier_data
)) {
248 current
->sigpending
= 0;
255 if (!collect_signal(sig
, ¤t
->pending
, info
))
258 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
259 we need to xchg out the timer overrun values. */
261 recalc_sigpending(current
);
264 printk(" %d -> %d\n", signal_pending(current
), sig
);
270 static int rm_from_queue(int sig
, struct sigpending
*s
)
272 struct sigqueue
*q
, **pp
;
274 if (!sigismember(&s
->signal
, sig
))
277 sigdelset(&s
->signal
, sig
);
281 while ((q
= *pp
) != NULL
) {
282 if (q
->info
.si_signo
== sig
) {
283 if ((*pp
= q
->next
) == NULL
)
285 kmem_cache_free(sigqueue_cachep
,q
);
286 atomic_dec(&nr_queued_signals
);
295 * Remove signal sig from t->pending.
296 * Returns 1 if sig was found.
298 * All callers must be holding t->sigmask_lock.
300 static int rm_sig_from_queue(int sig
, struct task_struct
*t
)
302 return rm_from_queue(sig
, &t
->pending
);
306 * Bad permissions for sending the signal
308 int bad_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
310 return (!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
311 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
312 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
313 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
314 && !capable(CAP_KILL
);
319 * < 0 : global action (kill - spread to all non-blocked threads)
323 static int signal_type(int sig
, struct signal_struct
*signals
)
325 unsigned long handler
;
330 handler
= (unsigned long) signals
->action
[sig
-1].sa
.sa_handler
;
334 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
336 return sig
== SIGCHLD
;
338 /* Default handler. Normally lethal, but.. */
342 case SIGCONT
: case SIGWINCH
:
343 case SIGCHLD
: case SIGURG
:
346 /* Implicit behaviour */
347 case SIGTSTP
: case SIGTTIN
: case SIGTTOU
:
350 /* Implicit actions (kill or do special stuff) */
358 * Determine whether a signal should be posted or not.
360 * Signals with SIG_IGN can be ignored, except for the
361 * special case of a SIGCHLD.
363 * Some signals with SIG_DFL default to a non-action.
365 static int ignored_signal(int sig
, struct task_struct
*t
)
367 /* Don't ignore traced or blocked signals */
368 if ((t
->ptrace
& PT_PTRACED
) || sigismember(&t
->blocked
, sig
))
371 return signal_type(sig
, t
->sig
) == 0;
375 * Handle TASK_STOPPED cases etc implicit behaviour
376 * of certain magical signals.
378 * SIGKILL gets spread out to every thread.
380 static void handle_stop_signal(int sig
, struct task_struct
*t
)
383 case SIGKILL
: case SIGCONT
:
384 /* Wake up the process if stopped. */
385 if (t
->state
== TASK_STOPPED
)
388 rm_sig_from_queue(SIGSTOP
, t
);
389 rm_sig_from_queue(SIGTSTP
, t
);
390 rm_sig_from_queue(SIGTTOU
, t
);
391 rm_sig_from_queue(SIGTTIN
, t
);
394 case SIGSTOP
: case SIGTSTP
:
395 case SIGTTIN
: case SIGTTOU
:
396 /* If we're stopping again, cancel SIGCONT */
397 rm_sig_from_queue(SIGCONT
, t
);
402 static int send_signal(int sig
, struct siginfo
*info
, struct sigpending
*signals
)
404 struct sigqueue
* q
= NULL
;
406 /* Real-time signals must be queued if sent by sigqueue, or
407 some other real-time mechanism. It is implementation
408 defined whether kill() does so. We attempt to do so, on
409 the principle of least surprise, but since kill is not
410 allowed to fail with EAGAIN when low on memory we just
411 make sure at least one signal gets delivered and don't
412 pass on the info struct. */
414 if (atomic_read(&nr_queued_signals
) < max_queued_signals
) {
415 q
= kmem_cache_alloc(sigqueue_cachep
, GFP_ATOMIC
);
419 atomic_inc(&nr_queued_signals
);
422 signals
->tail
= &q
->next
;
423 switch ((unsigned long) info
) {
425 q
->info
.si_signo
= sig
;
426 q
->info
.si_errno
= 0;
427 q
->info
.si_code
= SI_USER
;
428 q
->info
.si_pid
= current
->pid
;
429 q
->info
.si_uid
= current
->uid
;
432 q
->info
.si_signo
= sig
;
433 q
->info
.si_errno
= 0;
434 q
->info
.si_code
= SI_KERNEL
;
439 copy_siginfo(&q
->info
, info
);
442 } else if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
443 && info
->si_code
!= SI_USER
) {
445 * Queue overflow, abort. We may abort if the signal was rt
446 * and sent by user using something other than kill().
451 sigaddset(&signals
->signal
, sig
);
456 * Tell a process that it has a new active signal..
458 * NOTE! we rely on the previous spin_lock to
459 * lock interrupts for us! We can only be called with
460 * "sigmask_lock" held, and the local interrupt must
461 * have been disabled when that got acquired!
463 * No need to set need_resched since signal event passing
464 * goes through ->blocked
466 static inline void signal_wake_up(struct task_struct
*t
)
470 if (t
->state
& TASK_INTERRUPTIBLE
) {
477 * If the task is running on a different CPU
478 * force a reschedule on the other CPU to make
479 * it notice the new signal quickly.
481 * The code below is a tad loose and might occasionally
482 * kick the wrong CPU if we catch the process in the
483 * process of changing - but no harm is done by that
484 * other than doing an extra (lightweight) IPI interrupt.
486 spin_lock(&runqueue_lock
);
487 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
488 smp_send_reschedule(t
->processor
);
489 spin_unlock(&runqueue_lock
);
490 #endif /* CONFIG_SMP */
493 static int deliver_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
495 int retval
= send_signal(sig
, info
, &t
->pending
);
497 if (!retval
&& !sigismember(&t
->blocked
, sig
))
504 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
511 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
515 if (sig
< 0 || sig
> _NSIG
)
517 /* The somewhat baroque permissions check... */
519 if (bad_signal(sig
, info
, t
))
522 /* The null signal is a permissions and process existance probe.
523 No signal is actually delivered. Same goes for zombies. */
528 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
529 handle_stop_signal(sig
, t
);
531 /* Optimize away the signal, if it's a signal that can be
532 handled immediately (ie non-blocked and untraced) and
533 that is ignored (either explicitly or by default). */
535 if (ignored_signal(sig
, t
))
538 /* Support queueing exactly one non-rt signal, so that we
539 can get more detailed information about the cause of
541 if (sig
< SIGRTMIN
&& sigismember(&t
->pending
.signal
, sig
))
544 ret
= deliver_signal(sig
, info
, t
);
546 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
547 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
551 printk(" %d -> %d\n", signal_pending(t
), ret
);
558 * Force a signal that the process can't ignore: if necessary
559 * we unblock the signal and change any SIG_IGN to SIG_DFL.
563 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
565 unsigned long int flags
;
567 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
568 if (t
->sig
== NULL
) {
569 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
573 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
574 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
575 sigdelset(&t
->blocked
, sig
);
576 recalc_sigpending(t
);
577 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
579 return send_sig_info(sig
, info
, t
);
583 * kill_pg_info() sends a signal to a process group: this is what the tty
584 * control characters do (^C, ^Z etc)
588 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
590 int retval
= -EINVAL
;
592 struct task_struct
*p
;
595 read_lock(&tasklist_lock
);
597 if (p
->pgrp
== pgrp
) {
598 int err
= send_sig_info(sig
, info
, p
);
603 read_unlock(&tasklist_lock
);
609 * kill_sl_info() sends a signal to the session leader: this is used
610 * to send SIGHUP to the controlling process of a terminal when
611 * the connection is lost.
615 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
617 int retval
= -EINVAL
;
619 struct task_struct
*p
;
622 read_lock(&tasklist_lock
);
624 if (p
->leader
&& p
->session
== sess
) {
625 int err
= send_sig_info(sig
, info
, p
);
630 read_unlock(&tasklist_lock
);
636 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
639 struct task_struct
*p
;
641 read_lock(&tasklist_lock
);
642 p
= find_task_by_pid(pid
);
645 error
= send_sig_info(sig
, info
, p
);
646 read_unlock(&tasklist_lock
);
652 * kill_something_info() interprets pid in interesting ways just like kill(2).
654 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
655 * is probably wrong. Should make it like BSD or SYSV.
658 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
661 return kill_pg_info(sig
, info
, current
->pgrp
);
662 } else if (pid
== -1) {
663 int retval
= 0, count
= 0;
664 struct task_struct
* p
;
666 read_lock(&tasklist_lock
);
668 if (p
->pid
> 1 && p
!= current
) {
669 int err
= send_sig_info(sig
, info
, p
);
675 read_unlock(&tasklist_lock
);
676 return count
? retval
: -ESRCH
;
677 } else if (pid
< 0) {
678 return kill_pg_info(sig
, info
, -pid
);
680 return kill_proc_info(sig
, info
, pid
);
685 * These are for backward compatibility with the rest of the kernel source.
689 send_sig(int sig
, struct task_struct
*p
, int priv
)
691 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
695 force_sig(int sig
, struct task_struct
*p
)
697 force_sig_info(sig
, (void*)1L, p
);
701 kill_pg(pid_t pgrp
, int sig
, int priv
)
703 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
707 kill_sl(pid_t sess
, int sig
, int priv
)
709 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
713 kill_proc(pid_t pid
, int sig
, int priv
)
715 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
719 * Joy. Or not. Pthread wants us to wake up every thread
720 * in our parent group.
722 static void wake_up_parent(struct task_struct
*parent
)
724 struct task_struct
*tsk
= parent
;
727 wake_up_interruptible(&tsk
->wait_chldexit
);
728 tsk
= next_thread(tsk
);
729 } while (tsk
!= parent
);
733 * Let a parent know about a status change of a child.
736 void do_notify_parent(struct task_struct
*tsk
, int sig
)
743 info
.si_pid
= tsk
->pid
;
744 info
.si_uid
= tsk
->uid
;
746 /* FIXME: find out whether or not this is supposed to be c*time. */
747 info
.si_utime
= tsk
->times
.tms_utime
;
748 info
.si_stime
= tsk
->times
.tms_stime
;
750 status
= tsk
->exit_code
& 0x7f;
751 why
= SI_KERNEL
; /* shouldn't happen */
752 switch (tsk
->state
) {
754 if (tsk
->exit_code
& 0x80)
756 else if (tsk
->exit_code
& 0x7f)
760 status
= tsk
->exit_code
>> 8;
764 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
765 if (tsk
->ptrace
& PT_PTRACED
)
772 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
777 info
.si_status
= status
;
779 send_sig_info(sig
, &info
, tsk
->p_pptr
);
780 wake_up_parent(tsk
->p_pptr
);
785 * We need the tasklist lock because it's the only
786 * thing that protects out "parent" pointer.
788 * exit.c calls "do_notify_parent()" directly, because
789 * it already has the tasklist lock.
792 notify_parent(struct task_struct
*tsk
, int sig
)
794 read_lock(&tasklist_lock
);
795 do_notify_parent(tsk
, sig
);
796 read_unlock(&tasklist_lock
);
799 EXPORT_SYMBOL(dequeue_signal
);
800 EXPORT_SYMBOL(flush_signals
);
801 EXPORT_SYMBOL(force_sig
);
802 EXPORT_SYMBOL(force_sig_info
);
803 EXPORT_SYMBOL(kill_pg
);
804 EXPORT_SYMBOL(kill_pg_info
);
805 EXPORT_SYMBOL(kill_proc
);
806 EXPORT_SYMBOL(kill_proc_info
);
807 EXPORT_SYMBOL(kill_sl
);
808 EXPORT_SYMBOL(kill_sl_info
);
809 EXPORT_SYMBOL(notify_parent
);
810 EXPORT_SYMBOL(recalc_sigpending
);
811 EXPORT_SYMBOL(send_sig
);
812 EXPORT_SYMBOL(send_sig_info
);
813 EXPORT_SYMBOL(block_all_signals
);
814 EXPORT_SYMBOL(unblock_all_signals
);
818 * System call entry points.
822 * We don't need to get the kernel lock - this is all local to this
823 * particular thread.. (and that's good, because this is _heavily_
824 * used by various programs)
828 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
831 sigset_t old_set
, new_set
;
833 /* XXX: Don't preclude handling different sized sigset_t's. */
834 if (sigsetsize
!= sizeof(sigset_t
))
839 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
841 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
843 spin_lock_irq(¤t
->sigmask_lock
);
844 old_set
= current
->blocked
;
852 sigorsets(&new_set
, &old_set
, &new_set
);
855 signandsets(&new_set
, &old_set
, &new_set
);
861 current
->blocked
= new_set
;
862 recalc_sigpending(current
);
863 spin_unlock_irq(¤t
->sigmask_lock
);
869 spin_lock_irq(¤t
->sigmask_lock
);
870 old_set
= current
->blocked
;
871 spin_unlock_irq(¤t
->sigmask_lock
);
875 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
883 long do_sigpending(void *set
, unsigned long sigsetsize
)
885 long error
= -EINVAL
;
888 if (sigsetsize
> sizeof(sigset_t
))
891 spin_lock_irq(¤t
->sigmask_lock
);
892 sigandsets(&pending
, ¤t
->blocked
, ¤t
->pending
.signal
);
893 spin_unlock_irq(¤t
->sigmask_lock
);
896 if (!copy_to_user(set
, &pending
, sigsetsize
))
903 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
905 return do_sigpending(set
, sigsetsize
);
909 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
910 const struct timespec
*uts
, size_t sigsetsize
)
918 /* XXX: Don't preclude handling different sized sigset_t's. */
919 if (sigsetsize
!= sizeof(sigset_t
))
922 if (copy_from_user(&these
, uthese
, sizeof(these
)))
926 * Invert the set of allowed signals to get those we
929 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
933 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
935 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
940 spin_lock_irq(¤t
->sigmask_lock
);
941 sig
= dequeue_signal(&these
, &info
);
943 timeout
= MAX_SCHEDULE_TIMEOUT
;
945 timeout
= (timespec_to_jiffies(&ts
)
946 + (ts
.tv_sec
|| ts
.tv_nsec
));
949 /* None ready -- temporarily unblock those we're
950 * interested while we are sleeping in so that we'll
951 * be awakened when they arrive. */
952 sigset_t oldblocked
= current
->blocked
;
953 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
954 recalc_sigpending(current
);
955 spin_unlock_irq(¤t
->sigmask_lock
);
957 current
->state
= TASK_INTERRUPTIBLE
;
958 timeout
= schedule_timeout(timeout
);
960 spin_lock_irq(¤t
->sigmask_lock
);
961 sig
= dequeue_signal(&these
, &info
);
962 current
->blocked
= oldblocked
;
963 recalc_sigpending(current
);
966 spin_unlock_irq(¤t
->sigmask_lock
);
971 if (copy_siginfo_to_user(uinfo
, &info
))
984 sys_kill(int pid
, int sig
)
990 info
.si_code
= SI_USER
;
991 info
.si_pid
= current
->pid
;
992 info
.si_uid
= current
->uid
;
994 return kill_something_info(sig
, &info
, pid
);
998 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
1002 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
1005 /* Not even root can pretend to send signals from the kernel.
1006 Nor can they impersonate a kill(), which adds source info. */
1007 if (info
.si_code
>= 0)
1009 info
.si_signo
= sig
;
1011 /* POSIX.1b doesn't mention process groups. */
1012 return kill_proc_info(sig
, &info
, pid
);
1016 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
1018 struct k_sigaction
*k
;
1020 if (sig
< 1 || sig
> _NSIG
||
1021 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
1024 k
= ¤t
->sig
->action
[sig
-1];
1026 spin_lock(¤t
->sig
->siglock
);
1033 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
1037 * "Setting a signal action to SIG_IGN for a signal that is
1038 * pending shall cause the pending signal to be discarded,
1039 * whether or not it is blocked."
1041 * "Setting a signal action to SIG_DFL for a signal that is
1042 * pending and whose default action is to ignore the signal
1043 * (for example, SIGCHLD), shall cause the pending signal to
1044 * be discarded, whether or not it is blocked"
1046 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1047 * signal isn't actually ignored, but does automatic child
1048 * reaping, while SIG_DFL is explicitly said by POSIX to force
1049 * the signal to be ignored.
1052 if (k
->sa
.sa_handler
== SIG_IGN
1053 || (k
->sa
.sa_handler
== SIG_DFL
1054 && (sig
== SIGCONT
||
1056 sig
== SIGWINCH
))) {
1057 spin_lock_irq(¤t
->sigmask_lock
);
1058 if (rm_sig_from_queue(sig
, current
))
1059 recalc_sigpending(current
);
1060 spin_unlock_irq(¤t
->sigmask_lock
);
1064 spin_unlock(¤t
->sig
->siglock
);
1069 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
1075 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
1076 oss
.ss_size
= current
->sas_ss_size
;
1077 oss
.ss_flags
= sas_ss_flags(sp
);
1086 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
1087 || __get_user(ss_sp
, &uss
->ss_sp
)
1088 || __get_user(ss_flags
, &uss
->ss_flags
)
1089 || __get_user(ss_size
, &uss
->ss_size
))
1093 if (on_sig_stack (sp
))
1099 * Note - this code used to test ss_flags incorrectly
1100 * old code may have been written using ss_flags==0
1101 * to mean ss_flags==SS_ONSTACK (as this was the only
1102 * way that worked) - this fix preserves that older
1105 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
1108 if (ss_flags
== SS_DISABLE
) {
1113 if (ss_size
< MINSIGSTKSZ
)
1117 current
->sas_ss_sp
= (unsigned long) ss_sp
;
1118 current
->sas_ss_size
= ss_size
;
1123 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
1133 sys_sigpending(old_sigset_t
*set
)
1135 return do_sigpending(set
, sizeof(*set
));
1138 #if !defined(__alpha__)
1139 /* Alpha has its own versions with special arguments. */
1142 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
1145 old_sigset_t old_set
, new_set
;
1149 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1151 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1153 spin_lock_irq(¤t
->sigmask_lock
);
1154 old_set
= current
->blocked
.sig
[0];
1162 sigaddsetmask(¤t
->blocked
, new_set
);
1165 sigdelsetmask(¤t
->blocked
, new_set
);
1168 current
->blocked
.sig
[0] = new_set
;
1172 recalc_sigpending(current
);
1173 spin_unlock_irq(¤t
->sigmask_lock
);
1179 old_set
= current
->blocked
.sig
[0];
1182 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1192 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1195 struct k_sigaction new_sa
, old_sa
;
1198 /* XXX: Don't preclude handling different sized sigset_t's. */
1199 if (sigsetsize
!= sizeof(sigset_t
))
1203 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1207 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1210 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1216 #endif /* __sparc__ */
1219 #if !defined(__alpha__) && !defined(__ia64__)
1221 * For backwards compatibility. Functionality superseded by sigprocmask.
1227 return current
->blocked
.sig
[0];
1231 sys_ssetmask(int newmask
)
1235 spin_lock_irq(¤t
->sigmask_lock
);
1236 old
= current
->blocked
.sig
[0];
1238 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1240 recalc_sigpending(current
);
1241 spin_unlock_irq(¤t
->sigmask_lock
);
1245 #endif /* !defined(__alpha__) */
1247 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1249 * For backwards compatibility. Functionality superseded by sigaction.
1251 asmlinkage
unsigned long
1252 sys_signal(int sig
, __sighandler_t handler
)
1254 struct k_sigaction new_sa
, old_sa
;
1257 new_sa
.sa
.sa_handler
= handler
;
1258 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1260 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1262 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1264 #endif /* !alpha && !__ia64__ && !defined(__mips__) */