2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #define SIG_SLAB_DEBUG 0
30 static kmem_cache_t
*signal_queue_cachep
;
32 atomic_t nr_queued_signals
;
33 int max_queued_signals
= 1024;
35 void __init
signals_init(void)
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue
),
40 __alignof__(struct signal_queue
),
41 SIG_SLAB_DEBUG
, NULL
, NULL
);
46 * Flush all pending signals for a task.
50 flush_signals(struct task_struct
*t
)
52 struct signal_queue
*q
, *n
;
55 sigemptyset(&t
->signal
);
58 t
->sigqueue_tail
= &t
->sigqueue
;
62 kmem_cache_free(signal_queue_cachep
, q
);
63 atomic_dec(&nr_queued_signals
);
69 * Flush all handlers for a task.
73 flush_signal_handlers(struct task_struct
*t
)
76 struct k_sigaction
*ka
= &t
->sig
->action
[0];
77 for (i
= _NSIG
; i
!= 0 ; i
--) {
78 if (ka
->sa
.sa_handler
!= SIG_IGN
)
79 ka
->sa
.sa_handler
= SIG_DFL
;
81 sigemptyset(&ka
->sa
.sa_mask
);
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
94 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
96 unsigned long i
, *s
, *m
, x
;
100 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
101 signal_pending(current
));
104 /* Find the first desired signal that is pending. */
105 s
= current
->signal
.sig
;
107 switch (_NSIG_WORDS
) {
109 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
110 if ((x
= *s
&~ *m
) != 0) {
111 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
116 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
118 else if ((x
= s
[1] &~ m
[1]) != 0)
125 case 1: if ((x
= *s
&~ *m
) != 0)
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig
< SIGRTMIN
) {
135 /* XXX: As an extension, support queueing exactly
136 one non-rt signal if SA_SIGINFO is set, so that
137 we can get more detailed information about the
138 cause of the signal. */
139 /* Deciding not to init these couple of fields is
140 more expensive that just initializing them. */
141 info
->si_signo
= sig
;
147 struct signal_queue
*q
, **pp
;
148 pp
= ¤t
->sigqueue
;
149 q
= current
->sigqueue
;
151 /* Find the one we're interested in ... */
152 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
153 if (q
->info
.si_signo
== sig
)
156 if ((*pp
= q
->next
) == NULL
)
157 current
->sigqueue_tail
= pp
;
159 kmem_cache_free(signal_queue_cachep
,q
);
160 atomic_dec(&nr_queued_signals
);
162 /* then see if this signal is still pending. */
165 if (q
->info
.si_signo
== sig
) {
172 /* Ok, it wasn't in the queue. It must have
173 been sent either by a non-rt mechanism and
174 we ran out of queue space. So zero out the
176 info
->si_signo
= sig
;
185 sigdelset(¤t
->signal
, sig
);
186 recalc_sigpending(current
);
188 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
189 we need to xchg out the timer overrun values. */
191 /* XXX: Once CLONE_PID is in to join those "threads" that are
192 part of the same "process", look for signals sent to the
193 "process" as well. */
195 /* Sanity check... */
196 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
197 printk(KERN_CRIT
"SIG: sigpending lied\n");
198 current
->sigpending
= 0;
203 printk(" %d -> %d\n", signal_pending(current
), sig
);
210 * Determine whether a signal should be posted or not.
212 * Signals with SIG_IGN can be ignored, except for the
213 * special case of a SIGCHLD.
215 * Some signals with SIG_DFL default to a non-action.
217 static int ignored_signal(int sig
, struct task_struct
*t
)
219 struct signal_struct
*signals
;
220 struct k_sigaction
*ka
;
222 /* Don't ignore traced or blocked signals */
223 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
230 ka
= &signals
->action
[sig
-1];
231 switch ((unsigned long) ka
->sa
.sa_handler
) {
232 case (unsigned long) SIG_DFL
:
233 if (sig
== SIGCONT
||
240 case (unsigned long) SIG_IGN
:
251 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
257 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
261 if (sig
< 0 || sig
> _NSIG
)
263 /* The somewhat baroque permissions check... */
265 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
266 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
267 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
268 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
269 && !capable(CAP_KILL
))
272 /* The null signal is a permissions and process existance probe.
273 No signal is actually delivered. Same goes for zombies. */
278 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
280 case SIGKILL
: case SIGCONT
:
281 /* Wake up the process if stopped. */
282 if (t
->state
== TASK_STOPPED
)
285 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
286 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
287 /* Inflict this corner case with recalculations, not mainline */
288 recalc_sigpending(t
);
291 case SIGSTOP
: case SIGTSTP
:
292 case SIGTTIN
: case SIGTTOU
:
293 /* If we're stopping again, cancel SIGCONT */
294 sigdelset(&t
->signal
, SIGCONT
);
295 /* Inflict this corner case with recalculations, not mainline */
296 recalc_sigpending(t
);
300 /* Optimize away the signal, if it's a signal that can be
301 handled immediately (ie non-blocked and untraced) and
302 that is ignored (either explicitly or by default). */
304 if (ignored_signal(sig
, t
))
307 if (sig
< SIGRTMIN
) {
308 /* Non-real-time signals are not queued. */
309 /* XXX: As an extension, support queueing exactly one
310 non-rt signal if SA_SIGINFO is set, so that we can
311 get more detailed information about the cause of
313 if (sigismember(&t
->signal
, sig
))
316 /* Real-time signals must be queued if sent by sigqueue, or
317 some other real-time mechanism. It is implementation
318 defined whether kill() does so. We attempt to do so, on
319 the principle of least surprise, but since kill is not
320 allowed to fail with EAGAIN when low on memory we just
321 make sure at least one signal gets delivered and don't
322 pass on the info struct. */
324 struct signal_queue
*q
= 0;
326 if (atomic_read(&nr_queued_signals
) < max_queued_signals
) {
327 q
= (struct signal_queue
*)
328 kmem_cache_alloc(signal_queue_cachep
, GFP_ATOMIC
);
332 atomic_inc(&nr_queued_signals
);
334 *t
->sigqueue_tail
= q
;
335 t
->sigqueue_tail
= &q
->next
;
336 switch ((unsigned long) info
) {
338 q
->info
.si_signo
= sig
;
339 q
->info
.si_errno
= 0;
340 q
->info
.si_code
= SI_USER
;
341 q
->info
.si_pid
= current
->pid
;
342 q
->info
.si_uid
= current
->uid
;
345 q
->info
.si_signo
= sig
;
346 q
->info
.si_errno
= 0;
347 q
->info
.si_code
= SI_KERNEL
;
356 /* Queue overflow, we have to abort. */
362 sigaddset(&t
->signal
, sig
);
363 if (!sigismember(&t
->blocked
, sig
)) {
367 * If the task is running on a different CPU
368 * force a reschedule on the other CPU - note that
369 * the code below is a tad loose and might occasionally
370 * kick the wrong CPU if we catch the process in the
371 * process of changing - but no harm is done by that
372 * other than doing an extra (lightweight) IPI interrupt.
374 * note that we rely on the previous spin_lock to
375 * lock interrupts for us! No need to set need_resched
376 * since signal event passing goes through ->blocked.
378 spin_lock(&runqueue_lock
);
379 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
380 smp_send_reschedule(t
->processor
);
381 spin_unlock(&runqueue_lock
);
386 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
387 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
392 printk(" %d -> %d\n", signal_pending(t
), ret
);
399 * Force a signal that the process can't ignore: if necessary
400 * we unblock the signal and change any SIG_IGN to SIG_DFL.
404 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
406 unsigned long int flags
;
408 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
409 if (t
->sig
== NULL
) {
410 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
414 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
415 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
416 sigdelset(&t
->blocked
, sig
);
417 recalc_sigpending(t
);
418 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
420 return send_sig_info(sig
, info
, t
);
424 * kill_pg() sends a signal to a process group: this is what the tty
425 * control characters do (^C, ^Z etc)
429 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
431 int retval
= -EINVAL
;
433 struct task_struct
*p
;
437 read_lock(&tasklist_lock
);
439 if (p
->pgrp
== pgrp
) {
440 int err
= send_sig_info(sig
, info
, p
);
447 read_unlock(&tasklist_lock
);
455 * kill_sl() sends a signal to the session leader: this is used
456 * to send SIGHUP to the controlling process of a terminal when
457 * the connection is lost.
461 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
463 int retval
= -EINVAL
;
465 struct task_struct
*p
;
469 read_lock(&tasklist_lock
);
471 if (p
->leader
&& p
->session
== sess
) {
472 int err
= send_sig_info(sig
, info
, p
);
479 read_unlock(&tasklist_lock
);
487 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
490 struct task_struct
*p
;
492 read_lock(&tasklist_lock
);
493 p
= find_task_by_pid(pid
);
496 error
= send_sig_info(sig
, info
, p
);
497 read_unlock(&tasklist_lock
);
502 * kill_something() interprets pid in interesting ways just like kill(2).
504 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
505 * is probably wrong. Should make it like BSD or SYSV.
509 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
512 return kill_pg_info(sig
, info
, current
->pgrp
);
513 } else if (pid
== -1) {
514 int retval
= 0, count
= 0;
515 struct task_struct
* p
;
517 read_lock(&tasklist_lock
);
519 if (p
->pid
> 1 && p
!= current
) {
520 int err
= send_sig_info(sig
, info
, p
);
526 read_unlock(&tasklist_lock
);
527 return count
? retval
: -ESRCH
;
528 } else if (pid
< 0) {
529 return kill_pg_info(sig
, info
, -pid
);
531 return kill_proc_info(sig
, info
, pid
);
536 * These are for backward compatibility with the rest of the kernel source.
540 send_sig(int sig
, struct task_struct
*p
, int priv
)
542 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
546 force_sig(int sig
, struct task_struct
*p
)
548 force_sig_info(sig
, (void*)1L, p
);
552 kill_pg(pid_t pgrp
, int sig
, int priv
)
554 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
558 kill_sl(pid_t sess
, int sig
, int priv
)
560 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
564 kill_proc(pid_t pid
, int sig
, int priv
)
566 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
570 * Let a parent know about a status change of a child.
574 notify_parent(struct task_struct
*tsk
, int sig
)
581 info
.si_pid
= tsk
->pid
;
583 /* FIXME: find out whether or not this is supposed to be c*time. */
584 info
.si_utime
= tsk
->times
.tms_utime
;
585 info
.si_stime
= tsk
->times
.tms_stime
;
587 why
= SI_KERNEL
; /* shouldn't happen */
588 switch (tsk
->state
) {
590 if (tsk
->exit_code
& 0x80)
592 else if (tsk
->exit_code
& 0x7f)
598 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
603 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
609 send_sig_info(sig
, &info
, tsk
->p_pptr
);
610 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
613 EXPORT_SYMBOL(dequeue_signal
);
614 EXPORT_SYMBOL(flush_signals
);
615 EXPORT_SYMBOL(force_sig
);
616 EXPORT_SYMBOL(force_sig_info
);
617 EXPORT_SYMBOL(kill_pg
);
618 EXPORT_SYMBOL(kill_pg_info
);
619 EXPORT_SYMBOL(kill_proc
);
620 EXPORT_SYMBOL(kill_proc_info
);
621 EXPORT_SYMBOL(kill_sl
);
622 EXPORT_SYMBOL(kill_sl_info
);
623 EXPORT_SYMBOL(notify_parent
);
624 EXPORT_SYMBOL(recalc_sigpending
);
625 EXPORT_SYMBOL(send_sig
);
626 EXPORT_SYMBOL(send_sig_info
);
630 * System call entry points.
634 * We don't need to get the kernel lock - this is all local to this
635 * particular thread.. (and that's good, because this is _heavily_
636 * used by various programs)
640 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
643 sigset_t old_set
, new_set
;
645 /* XXX: Don't preclude handling different sized sigset_t's. */
646 if (sigsetsize
!= sizeof(sigset_t
))
651 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
653 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
655 spin_lock_irq(¤t
->sigmask_lock
);
656 old_set
= current
->blocked
;
664 sigorsets(&new_set
, &old_set
, &new_set
);
667 signandsets(&new_set
, &old_set
, &new_set
);
673 current
->blocked
= new_set
;
674 recalc_sigpending(current
);
675 spin_unlock_irq(¤t
->sigmask_lock
);
681 spin_lock_irq(¤t
->sigmask_lock
);
682 old_set
= current
->blocked
;
683 spin_unlock_irq(¤t
->sigmask_lock
);
687 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
696 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
701 /* XXX: Don't preclude handling different sized sigset_t's. */
702 if (sigsetsize
!= sizeof(sigset_t
))
705 spin_lock_irq(¤t
->sigmask_lock
);
706 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
707 spin_unlock_irq(¤t
->sigmask_lock
);
710 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
717 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
718 const struct timespec
*uts
, size_t sigsetsize
)
726 /* XXX: Don't preclude handling different sized sigset_t's. */
727 if (sigsetsize
!= sizeof(sigset_t
))
730 if (copy_from_user(&these
, uthese
, sizeof(these
)))
733 /* Invert the set of allowed signals to get those we
739 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
741 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
746 spin_lock_irq(¤t
->sigmask_lock
);
747 sig
= dequeue_signal(&these
, &info
);
749 /* None ready -- temporarily unblock those we're interested
750 in so that we'll be awakened when they arrive. */
751 sigset_t oldblocked
= current
->blocked
;
752 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
753 recalc_sigpending(current
);
754 spin_unlock_irq(¤t
->sigmask_lock
);
756 timeout
= MAX_SCHEDULE_TIMEOUT
;
758 timeout
= (timespec_to_jiffies(&ts
)
759 + (ts
.tv_sec
|| ts
.tv_nsec
));
761 current
->state
= TASK_INTERRUPTIBLE
;
762 timeout
= schedule_timeout(timeout
);
764 spin_lock_irq(¤t
->sigmask_lock
);
765 sig
= dequeue_signal(&these
, &info
);
766 current
->blocked
= oldblocked
;
767 recalc_sigpending(current
);
769 spin_unlock_irq(¤t
->sigmask_lock
);
774 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
787 sys_kill(int pid
, int sig
)
791 memset(&info
, 0, sizeof(info
));
795 info
.si_code
= SI_USER
;
796 info
.si_pid
= current
->pid
;
797 info
.si_uid
= current
->uid
;
799 return kill_something_info(sig
, &info
, pid
);
803 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
807 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
810 /* Not even root can pretend to send signals from the kernel.
811 Nor can they impersonate a kill(), which adds source info. */
812 if (info
.si_code
>= 0)
816 /* POSIX.1b doesn't mention process groups. */
817 return kill_proc_info(sig
, &info
, pid
);
821 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
823 struct k_sigaction
*k
;
825 if (sig
< 1 || sig
> _NSIG
||
826 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
829 spin_lock_irq(¤t
->sigmask_lock
);
830 k
= ¤t
->sig
->action
[sig
-1];
832 if (oact
) *oact
= *k
;
836 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
840 * "Setting a signal action to SIG_IGN for a signal that is
841 * pending shall cause the pending signal to be discarded,
842 * whether or not it is blocked."
844 * "Setting a signal action to SIG_DFL for a signal that is
845 * pending and whose default action is to ignore the signal
846 * (for example, SIGCHLD), shall cause the pending signal to
847 * be discarded, whether or not it is blocked"
849 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
850 * signal isn't actually ignored, but does automatic child
851 * reaping, while SIG_DFL is explicitly said by POSIX to force
852 * the signal to be ignored.
855 if (k
->sa
.sa_handler
== SIG_IGN
856 || (k
->sa
.sa_handler
== SIG_DFL
857 && (sig
== SIGCONT
||
860 /* So dequeue any that might be pending.
861 XXX: process-wide signals? */
862 if (sig
>= SIGRTMIN
&&
863 sigismember(¤t
->signal
, sig
)) {
864 struct signal_queue
*q
, **pp
;
865 pp
= ¤t
->sigqueue
;
866 q
= current
->sigqueue
;
868 if (q
->info
.si_signo
!= sig
)
872 kmem_cache_free(signal_queue_cachep
, q
);
873 atomic_dec(&nr_queued_signals
);
879 sigdelset(¤t
->signal
, sig
);
880 recalc_sigpending(current
);
884 spin_unlock_irq(¤t
->sigmask_lock
);
890 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
896 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
897 oss
.ss_size
= current
->sas_ss_size
;
898 oss
.ss_flags
= sas_ss_flags(sp
);
907 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
908 || __get_user(ss_sp
, &uss
->ss_sp
)
909 || __get_user(ss_flags
, &uss
->ss_flags
)
910 || __get_user(ss_size
, &uss
->ss_size
))
914 if (on_sig_stack (sp
))
918 if (ss_flags
& ~SS_DISABLE
)
921 if (ss_flags
& SS_DISABLE
) {
926 if (ss_size
< MINSIGSTKSZ
)
930 current
->sas_ss_sp
= (unsigned long) ss_sp
;
931 current
->sas_ss_size
= ss_size
;
936 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
945 #if !defined(__alpha__)
946 /* Alpha has its own versions with special arguments. */
949 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
952 old_sigset_t old_set
, new_set
;
956 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
958 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
960 spin_lock_irq(¤t
->sigmask_lock
);
961 old_set
= current
->blocked
.sig
[0];
969 sigaddsetmask(¤t
->blocked
, new_set
);
972 sigdelsetmask(¤t
->blocked
, new_set
);
975 current
->blocked
.sig
[0] = new_set
;
979 recalc_sigpending(current
);
980 spin_unlock_irq(¤t
->sigmask_lock
);
986 old_set
= current
->blocked
.sig
[0];
989 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
998 sys_sigpending(old_sigset_t
*set
)
1001 old_sigset_t pending
;
1003 spin_lock_irq(¤t
->sigmask_lock
);
1004 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
1005 spin_unlock_irq(¤t
->sigmask_lock
);
1008 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1015 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1018 struct k_sigaction new_sa
, old_sa
;
1021 /* XXX: Don't preclude handling different sized sigset_t's. */
1022 if (sigsetsize
!= sizeof(sigset_t
))
1026 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1030 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1033 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1039 #endif /* __sparc__ */
1042 #if !defined(__alpha__) && !defined(__ia64__)
1044 * For backwards compatibility. Functionality superseded by sigprocmask.
1050 return current
->blocked
.sig
[0];
1054 sys_ssetmask(int newmask
)
1058 spin_lock_irq(¤t
->sigmask_lock
);
1059 old
= current
->blocked
.sig
[0];
1061 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1063 recalc_sigpending(current
);
1064 spin_unlock_irq(¤t
->sigmask_lock
);
1068 #endif /* !defined(__alpha__) */
1070 #if !defined(__alpha__) && !defined(__mips__)
1072 * For backwards compatibility. Functionality superseded by sigaction.
1074 asmlinkage
unsigned long
1075 sys_signal(int sig
, __sighandler_t handler
)
1077 struct k_sigaction new_sa
, old_sa
;
1080 new_sa
.sa
.sa_handler
= handler
;
1081 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1083 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1085 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1087 #endif /* !alpha && !__ia64__ && !defined(__mips__) */