2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #define SIG_SLAB_DEBUG 0
30 static kmem_cache_t
*signal_queue_cachep
;
32 atomic_t nr_queued_signals
;
33 int max_queued_signals
= 1024;
35 void __init
signals_init(void)
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue
),
40 __alignof__(struct signal_queue
),
41 SIG_SLAB_DEBUG
, NULL
, NULL
);
46 * Flush all pending signals for a task.
50 flush_signals(struct task_struct
*t
)
52 struct signal_queue
*q
, *n
;
55 sigemptyset(&t
->signal
);
58 t
->sigqueue_tail
= &t
->sigqueue
;
62 kmem_cache_free(signal_queue_cachep
, q
);
63 atomic_dec(&nr_queued_signals
);
69 * Flush all handlers for a task.
73 flush_signal_handlers(struct task_struct
*t
)
76 struct k_sigaction
*ka
= &t
->sig
->action
[0];
77 for (i
= _NSIG
; i
!= 0 ; i
--) {
78 if (ka
->sa
.sa_handler
!= SIG_IGN
)
79 ka
->sa
.sa_handler
= SIG_DFL
;
81 sigemptyset(&ka
->sa
.sa_mask
);
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
94 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
96 unsigned long i
, *s
, *m
, x
;
100 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
101 signal_pending(current
));
104 /* Find the first desired signal that is pending. */
105 s
= current
->signal
.sig
;
107 switch (_NSIG_WORDS
) {
109 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
110 if ((x
= *s
&~ *m
) != 0) {
111 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
116 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
118 else if ((x
= s
[1] &~ m
[1]) != 0)
125 case 1: if ((x
= *s
&~ *m
) != 0)
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig
< SIGRTMIN
) {
135 *info
= current
->nrt_info
[sig
];
137 struct signal_queue
*q
, **pp
;
138 pp
= ¤t
->sigqueue
;
139 q
= current
->sigqueue
;
141 /* Find the one we're interested in ... */
142 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
143 if (q
->info
.si_signo
== sig
)
146 if ((*pp
= q
->next
) == NULL
)
147 current
->sigqueue_tail
= pp
;
149 kmem_cache_free(signal_queue_cachep
,q
);
150 atomic_dec(&nr_queued_signals
);
152 /* then see if this signal is still pending. */
155 if (q
->info
.si_signo
== sig
) {
162 /* Ok, it wasn't in the queue. It must have
163 been sent either by a non-rt mechanism and
164 we ran out of queue space. So zero out the
166 info
->si_signo
= sig
;
175 sigdelset(¤t
->signal
, sig
);
176 recalc_sigpending(current
);
179 /* XXX: Once CLONE_PID is in to join those "threads" that are
180 part of the same "process", look for signals sent to the
181 "process" as well. */
183 /* Sanity check... */
184 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
185 printk(KERN_CRIT
"SIG: sigpending lied\n");
186 current
->sigpending
= 0;
191 printk(" %d -> %d\n", signal_pending(current
), sig
);
198 * Determine whether a signal should be posted or not.
200 * Signals with SIG_IGN can be ignored, except for the
201 * special case of a SIGCHLD.
203 * Some signals with SIG_DFL default to a non-action.
205 static int ignored_signal(int sig
, struct task_struct
*t
)
207 struct signal_struct
*signals
;
208 struct k_sigaction
*ka
;
210 /* Don't ignore traced or blocked signals */
211 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
218 ka
= &signals
->action
[sig
-1];
219 switch ((unsigned long) ka
->sa
.sa_handler
) {
220 case (unsigned long) SIG_DFL
:
221 if (sig
== SIGCONT
||
228 case (unsigned long) SIG_IGN
:
238 static void set_siginfo(siginfo_t
*dst
, const siginfo_t
*src
, int sig
)
240 switch ((unsigned long)src
) {
244 dst
->si_code
= SI_USER
;
245 dst
->si_pid
= current
->pid
;
246 dst
->si_uid
= current
->uid
;
251 dst
->si_code
= SI_KERNEL
;
262 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
268 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
272 if (sig
< 0 || sig
> _NSIG
)
274 /* The somewhat baroque permissions check... */
276 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
277 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
278 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
279 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
280 && !capable(CAP_KILL
))
283 /* The null signal is a permissions and process existance probe.
284 No signal is actually delivered. Same goes for zombies. */
289 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
291 case SIGKILL
: case SIGCONT
:
292 /* Wake up the process if stopped. */
293 if (t
->state
== TASK_STOPPED
)
296 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
297 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
298 /* Inflict this corner case with recalculations, not mainline */
299 recalc_sigpending(t
);
302 case SIGSTOP
: case SIGTSTP
:
303 case SIGTTIN
: case SIGTTOU
:
304 /* If we're stopping again, cancel SIGCONT */
305 sigdelset(&t
->signal
, SIGCONT
);
306 /* Inflict this corner case with recalculations, not mainline */
307 recalc_sigpending(t
);
311 /* Optimize away the signal, if it's a signal that can be
312 handled immediately (ie non-blocked and untraced) and
313 that is ignored (either explicitly or by default). */
315 if (ignored_signal(sig
, t
))
318 if (sig
< SIGRTMIN
) {
319 /* Non-real-time signals are not queued. */
320 if (sigismember(&t
->signal
, sig
))
322 set_siginfo(&t
->nrt_info
[sig
], info
, sig
);
325 /* Real-time signals must be queued if sent by sigqueue, or
326 some other real-time mechanism. It is implementation
327 defined whether kill() does so. We attempt to do so, on
328 the principle of least surprise, but since kill is not
329 allowed to fail with EAGAIN when low on memory we just
330 make sure at least one signal gets delivered and don't
331 pass on the info struct. */
333 struct signal_queue
*q
= 0;
335 /* In case of a POSIX timer generated signal you must check
336 if a signal from this timer is already in the queue */
337 if (info
&& (info
->si_code
== SI_TIMER
)) {
338 for (q
= t
->sigqueue
; q
; q
= q
->next
) {
339 if ((q
->info
.si_code
== SI_TIMER
) &&
340 (q
->info
.si_timer1
== info
->si_timer1
)) {
341 /* this special value (1) is recognized
342 only by posix_timer_fn() in
350 if (atomic_read(&nr_queued_signals
) < max_queued_signals
) {
351 q
= (struct signal_queue
*)
352 kmem_cache_alloc(signal_queue_cachep
, GFP_ATOMIC
);
356 atomic_inc(&nr_queued_signals
);
358 *t
->sigqueue_tail
= q
;
359 t
->sigqueue_tail
= &q
->next
;
360 set_siginfo(&q
->info
, info
, sig
);
362 /* If this was sent by a rt mechanism, try again. */
363 if (info
->si_code
< 0) {
367 /* Otherwise, mention that the signal is pending,
368 but don't queue the info. */
372 sigaddset(&t
->signal
, sig
);
373 if (!sigismember(&t
->blocked
, sig
)) {
377 * If the task is running on a different CPU
378 * force a reschedule on the other CPU - note that
379 * the code below is a tad loose and might occasionally
380 * kick the wrong CPU if we catch the process in the
381 * process of changing - but no harm is done by that
382 * other than doing an extra (lightweight) IPI interrupt.
384 * note that we rely on the previous spin_lock to
385 * lock interrupts for us! No need to set need_resched
386 * since signal event passing goes through ->blocked.
388 spin_lock(&runqueue_lock
);
389 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
390 smp_send_reschedule(t
->processor
);
391 spin_unlock(&runqueue_lock
);
396 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
397 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
402 printk(" %d -> %d\n", signal_pending(t
), ret
);
409 * Force a signal that the process can't ignore: if necessary
410 * we unblock the signal and change any SIG_IGN to SIG_DFL.
414 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
416 unsigned long int flags
;
418 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
419 if (t
->sig
== NULL
) {
420 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
424 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
425 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
426 sigdelset(&t
->blocked
, sig
);
427 recalc_sigpending(t
);
428 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
430 return send_sig_info(sig
, info
, t
);
434 * kill_pg() sends a signal to a process group: this is what the tty
435 * control characters do (^C, ^Z etc)
439 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
441 int retval
= -EINVAL
;
443 struct task_struct
*p
;
447 read_lock(&tasklist_lock
);
449 if (p
->pgrp
== pgrp
) {
450 int err
= send_sig_info(sig
, info
, p
);
457 read_unlock(&tasklist_lock
);
465 * kill_sl() sends a signal to the session leader: this is used
466 * to send SIGHUP to the controlling process of a terminal when
467 * the connection is lost.
471 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
473 int retval
= -EINVAL
;
475 struct task_struct
*p
;
479 read_lock(&tasklist_lock
);
481 if (p
->leader
&& p
->session
== sess
) {
482 int err
= send_sig_info(sig
, info
, p
);
489 read_unlock(&tasklist_lock
);
497 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
500 struct task_struct
*p
;
502 read_lock(&tasklist_lock
);
503 p
= find_task_by_pid(pid
);
506 error
= send_sig_info(sig
, info
, p
);
507 read_unlock(&tasklist_lock
);
512 * kill_something() interprets pid in interesting ways just like kill(2).
514 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
515 * is probably wrong. Should make it like BSD or SYSV.
519 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
522 return kill_pg_info(sig
, info
, current
->pgrp
);
523 } else if (pid
== -1) {
524 int retval
= 0, count
= 0;
525 struct task_struct
* p
;
527 read_lock(&tasklist_lock
);
529 if (p
->pid
> 1 && p
!= current
) {
530 int err
= send_sig_info(sig
, info
, p
);
536 read_unlock(&tasklist_lock
);
537 return count
? retval
: -ESRCH
;
538 } else if (pid
< 0) {
539 return kill_pg_info(sig
, info
, -pid
);
541 return kill_proc_info(sig
, info
, pid
);
546 * These are for backward compatibility with the rest of the kernel source.
550 send_sig(int sig
, struct task_struct
*p
, int priv
)
552 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
556 force_sig(int sig
, struct task_struct
*p
)
558 force_sig_info(sig
, (void*)1L, p
);
562 kill_pg(pid_t pgrp
, int sig
, int priv
)
564 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
568 kill_sl(pid_t sess
, int sig
, int priv
)
570 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
574 kill_proc(pid_t pid
, int sig
, int priv
)
576 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
580 * Let a parent know about a status change of a child.
584 notify_parent(struct task_struct
*tsk
, int sig
)
591 info
.si_pid
= tsk
->pid
;
593 /* FIXME: find out whether or not this is supposed to be c*time. */
594 info
.si_utime
= tsk
->times
.tms_utime
;
595 info
.si_stime
= tsk
->times
.tms_stime
;
597 why
= SI_KERNEL
; /* shouldn't happen */
598 switch (tsk
->state
) {
600 if (tsk
->exit_code
& 0x80)
602 else if (tsk
->exit_code
& 0x7f)
608 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
613 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
619 send_sig_info(sig
, &info
, tsk
->p_pptr
);
620 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
623 EXPORT_SYMBOL(dequeue_signal
);
624 EXPORT_SYMBOL(flush_signals
);
625 EXPORT_SYMBOL(force_sig
);
626 EXPORT_SYMBOL(force_sig_info
);
627 EXPORT_SYMBOL(kill_pg
);
628 EXPORT_SYMBOL(kill_pg_info
);
629 EXPORT_SYMBOL(kill_proc
);
630 EXPORT_SYMBOL(kill_proc_info
);
631 EXPORT_SYMBOL(kill_sl
);
632 EXPORT_SYMBOL(kill_sl_info
);
633 EXPORT_SYMBOL(notify_parent
);
634 EXPORT_SYMBOL(recalc_sigpending
);
635 EXPORT_SYMBOL(send_sig
);
636 EXPORT_SYMBOL(send_sig_info
);
640 * System call entry points.
644 * We don't need to get the kernel lock - this is all local to this
645 * particular thread.. (and that's good, because this is _heavily_
646 * used by various programs)
650 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
653 sigset_t old_set
, new_set
;
655 /* XXX: Don't preclude handling different sized sigset_t's. */
656 if (sigsetsize
!= sizeof(sigset_t
))
661 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
663 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
665 spin_lock_irq(¤t
->sigmask_lock
);
666 old_set
= current
->blocked
;
674 sigorsets(&new_set
, &old_set
, &new_set
);
677 signandsets(&new_set
, &old_set
, &new_set
);
683 current
->blocked
= new_set
;
684 recalc_sigpending(current
);
685 spin_unlock_irq(¤t
->sigmask_lock
);
691 spin_lock_irq(¤t
->sigmask_lock
);
692 old_set
= current
->blocked
;
693 spin_unlock_irq(¤t
->sigmask_lock
);
697 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
706 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
711 /* XXX: Don't preclude handling different sized sigset_t's. */
712 if (sigsetsize
!= sizeof(sigset_t
))
715 spin_lock_irq(¤t
->sigmask_lock
);
716 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
717 spin_unlock_irq(¤t
->sigmask_lock
);
720 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
727 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
728 const struct timespec
*uts
, size_t sigsetsize
)
736 /* XXX: Don't preclude handling different sized sigset_t's. */
737 if (sigsetsize
!= sizeof(sigset_t
))
740 if (copy_from_user(&these
, uthese
, sizeof(these
)))
743 /* Invert the set of allowed signals to get those we
749 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
751 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
756 spin_lock_irq(¤t
->sigmask_lock
);
757 sig
= dequeue_signal(&these
, &info
);
759 /* None ready -- temporarily unblock those we're interested
760 in so that we'll be awakened when they arrive. */
761 sigset_t oldblocked
= current
->blocked
;
762 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
763 recalc_sigpending(current
);
764 spin_unlock_irq(¤t
->sigmask_lock
);
766 timeout
= MAX_SCHEDULE_TIMEOUT
;
768 timeout
= (timespec_to_jiffies(&ts
)
769 + (ts
.tv_sec
|| ts
.tv_nsec
));
771 current
->state
= TASK_INTERRUPTIBLE
;
772 timeout
= schedule_timeout(timeout
);
774 spin_lock_irq(¤t
->sigmask_lock
);
775 sig
= dequeue_signal(&these
, &info
);
776 current
->blocked
= oldblocked
;
777 recalc_sigpending(current
);
779 spin_unlock_irq(¤t
->sigmask_lock
);
784 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
797 sys_kill(int pid
, int sig
)
803 info
.si_code
= SI_USER
;
804 info
.si_pid
= current
->pid
;
805 info
.si_uid
= current
->uid
;
807 return kill_something_info(sig
, &info
, pid
);
811 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
815 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
818 /* Not even root can pretend to send signals from the kernel.
819 Nor can they impersonate a kill(), which adds source info. */
820 if (info
.si_code
>= 0)
824 /* POSIX.1b doesn't mention process groups. */
825 return kill_proc_info(sig
, &info
, pid
);
829 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
831 struct k_sigaction
*k
;
833 if (sig
< 1 || sig
> _NSIG
||
834 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
837 spin_lock_irq(¤t
->sigmask_lock
);
838 k
= ¤t
->sig
->action
[sig
-1];
840 if (oact
) *oact
= *k
;
844 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
848 * "Setting a signal action to SIG_IGN for a signal that is
849 * pending shall cause the pending signal to be discarded,
850 * whether or not it is blocked."
852 * "Setting a signal action to SIG_DFL for a signal that is
853 * pending and whose default action is to ignore the signal
854 * (for example, SIGCHLD), shall cause the pending signal to
855 * be discarded, whether or not it is blocked"
857 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
858 * signal isn't actually ignored, but does automatic child
859 * reaping, while SIG_DFL is explicitly said by POSIX to force
860 * the signal to be ignored.
863 if (k
->sa
.sa_handler
== SIG_IGN
864 || (k
->sa
.sa_handler
== SIG_DFL
865 && (sig
== SIGCONT
||
868 /* So dequeue any that might be pending.
869 XXX: process-wide signals? */
870 if (sig
>= SIGRTMIN
&&
871 sigismember(¤t
->signal
, sig
)) {
872 struct signal_queue
*q
, **pp
;
873 pp
= ¤t
->sigqueue
;
874 q
= current
->sigqueue
;
876 if (q
->info
.si_signo
!= sig
)
880 kmem_cache_free(signal_queue_cachep
, q
);
881 atomic_dec(&nr_queued_signals
);
887 sigdelset(¤t
->signal
, sig
);
888 recalc_sigpending(current
);
892 spin_unlock_irq(¤t
->sigmask_lock
);
898 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
904 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
905 oss
.ss_size
= current
->sas_ss_size
;
906 oss
.ss_flags
= sas_ss_flags(sp
);
915 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
916 || __get_user(ss_sp
, &uss
->ss_sp
)
917 || __get_user(ss_flags
, &uss
->ss_flags
)
918 || __get_user(ss_size
, &uss
->ss_size
))
922 if (on_sig_stack (sp
))
926 if (ss_flags
& ~SS_DISABLE
)
929 if (ss_flags
& SS_DISABLE
) {
934 if (ss_size
< MINSIGSTKSZ
)
938 current
->sas_ss_sp
= (unsigned long) ss_sp
;
939 current
->sas_ss_size
= ss_size
;
944 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
953 #if !defined(__alpha__)
954 /* Alpha has its own versions with special arguments. */
957 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
960 old_sigset_t old_set
, new_set
;
964 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
966 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
968 spin_lock_irq(¤t
->sigmask_lock
);
969 old_set
= current
->blocked
.sig
[0];
977 sigaddsetmask(¤t
->blocked
, new_set
);
980 sigdelsetmask(¤t
->blocked
, new_set
);
983 current
->blocked
.sig
[0] = new_set
;
987 recalc_sigpending(current
);
988 spin_unlock_irq(¤t
->sigmask_lock
);
994 old_set
= current
->blocked
.sig
[0];
997 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1006 sys_sigpending(old_sigset_t
*set
)
1009 old_sigset_t pending
;
1011 spin_lock_irq(¤t
->sigmask_lock
);
1012 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
1013 spin_unlock_irq(¤t
->sigmask_lock
);
1016 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1023 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1026 struct k_sigaction new_sa
, old_sa
;
1029 /* XXX: Don't preclude handling different sized sigset_t's. */
1030 if (sigsetsize
!= sizeof(sigset_t
))
1034 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1038 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1041 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1047 #endif /* __sparc__ */
1050 #if !defined(__alpha__) && !defined(__ia64__)
1052 * For backwards compatibility. Functionality superseded by sigprocmask.
1058 return current
->blocked
.sig
[0];
1062 sys_ssetmask(int newmask
)
1066 spin_lock_irq(¤t
->sigmask_lock
);
1067 old
= current
->blocked
.sig
[0];
1069 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1071 recalc_sigpending(current
);
1072 spin_unlock_irq(¤t
->sigmask_lock
);
1076 #endif /* !defined(__alpha__) */
1078 #if !defined(__alpha__) && !defined(__mips__)
1080 * For backwards compatibility. Functionality superseded by sigaction.
1082 asmlinkage
unsigned long
1083 sys_signal(int sig
, __sighandler_t handler
)
1085 struct k_sigaction new_sa
, old_sa
;
1088 new_sa
.sa
.sa_handler
= handler
;
1089 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1091 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1093 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1095 #endif /* !alpha && !__ia64__ && !defined(__mips__) */