2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
34 #define SIG_SLAB_DEBUG 0
37 static kmem_cache_t
*signal_queue_cachep
;
39 static int nr_queued_signals
;
40 static int max_queued_signals
= 1024;
42 void __init
signals_init(void)
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue
),
47 __alignof__(struct signal_queue
),
48 SIG_SLAB_DEBUG
, NULL
, NULL
);
53 * Flush all pending signals for a task.
57 flush_signals(struct task_struct
*t
)
59 struct signal_queue
*q
, *n
;
62 sigemptyset(&t
->signal
);
65 t
->sigqueue_tail
= &t
->sigqueue
;
69 kmem_cache_free(signal_queue_cachep
, q
);
76 * Flush all handlers for a task.
80 flush_signal_handlers(struct task_struct
*t
)
83 struct k_sigaction
*ka
= &t
->sig
->action
[0];
84 for (i
= _NSIG
; i
!= 0 ; i
--) {
85 if (ka
->sa
.sa_handler
!= SIG_IGN
)
86 ka
->sa
.sa_handler
= SIG_DFL
;
88 sigemptyset(&ka
->sa
.sa_mask
);
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
103 unsigned long i
, *s
, *m
, x
;
107 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
108 signal_pending(current
));
111 /* Find the first desired signal that is pending. */
112 s
= current
->signal
.sig
;
114 switch (_NSIG_WORDS
) {
116 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
117 if ((x
= *s
&~ *m
) != 0) {
118 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
123 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
125 else if ((x
= s
[1] &~ m
[1]) != 0)
132 case 1: if ((x
= *s
&~ *m
) != 0)
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig
< SIGRTMIN
) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info
->si_signo
= sig
;
154 struct signal_queue
*q
, **pp
;
155 pp
= ¤t
->sigqueue
;
156 q
= current
->sigqueue
;
158 /* Find the one we're interested in ... */
159 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
160 if (q
->info
.si_signo
== sig
)
163 if ((*pp
= q
->next
) == NULL
)
164 current
->sigqueue_tail
= pp
;
166 kmem_cache_free(signal_queue_cachep
,q
);
169 /* then see if this signal is still pending. */
172 if (q
->info
.si_signo
== sig
) {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
183 info
->si_signo
= sig
;
192 sigdelset(¤t
->signal
, sig
);
193 recalc_sigpending(current
);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
204 printk(KERN_CRIT
"SIG: sigpending lied\n");
205 current
->sigpending
= 0;
210 printk(" %d -> %d\n", signal_pending(current
), sig
);
217 * Determine whether a signal should be posted or not.
219 * Signals with SIG_IGN can be ignored, except for the
220 * special case of a SIGCHLD.
222 * Some signals with SIG_DFL default to a non-action.
224 static int ignored_signal(int sig
, struct task_struct
*t
)
226 struct signal_struct
*signals
;
227 struct k_sigaction
*ka
;
229 /* Don't ignore traced or blocked signals */
230 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
237 ka
= &signals
->action
[sig
-1];
238 switch ((unsigned long) ka
->sa
.sa_handler
) {
239 case (unsigned long) SIG_DFL
:
240 if (sig
== SIGCONT
||
247 case (unsigned long) SIG_IGN
:
258 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
264 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
268 if (sig
< 0 || sig
> _NSIG
)
270 /* The somewhat baroque permissions check... */
272 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
273 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
274 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
275 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
276 && !capable(CAP_SYS_ADMIN
))
279 /* The null signal is a permissions and process existance probe.
280 No signal is actually delivered. */
285 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
287 case SIGKILL
: case SIGCONT
:
288 /* Wake up the process if stopped. */
289 if (t
->state
== TASK_STOPPED
)
292 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
293 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t
);
298 case SIGSTOP
: case SIGTSTP
:
299 case SIGTTIN
: case SIGTTOU
:
300 /* If we're stopping again, cancel SIGCONT */
301 sigdelset(&t
->signal
, SIGCONT
);
302 /* Inflict this corner case with recalculations, not mainline */
303 recalc_sigpending(t
);
307 /* Optimize away the signal, if it's a signal that can be
308 handled immediately (ie non-blocked and untraced) and
309 that is ignored (either explicitly or by default). */
311 if (ignored_signal(sig
, t
))
314 if (sig
< SIGRTMIN
) {
315 /* Non-real-time signals are not queued. */
316 /* XXX: As an extension, support queueing exactly one
317 non-rt signal if SA_SIGINFO is set, so that we can
318 get more detailed information about the cause of
320 if (sigismember(&t
->signal
, sig
))
323 /* Real-time signals must be queued if sent by sigqueue, or
324 some other real-time mechanism. It is implementation
325 defined whether kill() does so. We attempt to do so, on
326 the principle of least surprise, but since kill is not
327 allowed to fail with EAGAIN when low on memory we just
328 make sure at least one signal gets delivered and don't
329 pass on the info struct. */
331 struct signal_queue
*q
= 0;
333 if (nr_queued_signals
< max_queued_signals
) {
334 q
= (struct signal_queue
*)
335 kmem_cache_alloc(signal_queue_cachep
, GFP_KERNEL
);
341 *t
->sigqueue_tail
= q
;
342 t
->sigqueue_tail
= &q
->next
;
343 switch ((unsigned long) info
) {
345 q
->info
.si_signo
= sig
;
346 q
->info
.si_errno
= 0;
347 q
->info
.si_code
= SI_USER
;
348 q
->info
.si_pid
= current
->pid
;
349 q
->info
.si_uid
= current
->uid
;
352 q
->info
.si_signo
= sig
;
353 q
->info
.si_errno
= 0;
354 q
->info
.si_code
= SI_KERNEL
;
363 /* If this was sent by a rt mechanism, try again. */
364 if (info
->si_code
< 0) {
368 /* Otherwise, mention that the signal is pending,
369 but don't queue the info. */
373 sigaddset(&t
->signal
, sig
);
374 if (!sigismember(&t
->blocked
, sig
))
378 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
379 if (t
->state
== TASK_INTERRUPTIBLE
&& signal_pending(t
))
384 printk(" %d -> %d\n", signal_pending(t
), ret
);
391 * Force a signal that the process can't ignore: if necessary
392 * we unblock the signal and change any SIG_IGN to SIG_DFL.
396 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
398 unsigned long int flags
;
400 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
401 if (t
->sig
== NULL
) {
402 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
406 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
407 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
408 sigdelset(&t
->blocked
, sig
);
409 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
411 return send_sig_info(sig
, info
, t
);
415 * kill_pg() sends a signal to a process group: this is what the tty
416 * control characters do (^C, ^Z etc)
420 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
422 int retval
= -EINVAL
;
424 struct task_struct
*p
;
428 read_lock(&tasklist_lock
);
430 if (p
->pgrp
== pgrp
) {
431 int err
= send_sig_info(sig
, info
, p
);
438 read_unlock(&tasklist_lock
);
446 * kill_sl() sends a signal to the session leader: this is used
447 * to send SIGHUP to the controlling process of a terminal when
448 * the connection is lost.
452 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
454 int retval
= -EINVAL
;
456 struct task_struct
*p
;
460 read_lock(&tasklist_lock
);
462 if (p
->leader
&& p
->session
== sess
) {
463 int err
= send_sig_info(sig
, info
, p
);
470 read_unlock(&tasklist_lock
);
478 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
481 struct task_struct
*p
;
483 read_lock(&tasklist_lock
);
484 p
= find_task_by_pid(pid
);
487 error
= send_sig_info(sig
, info
, p
);
488 read_unlock(&tasklist_lock
);
493 * kill_something() interprets pid in interesting ways just like kill(2).
495 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
496 * is probably wrong. Should make it like BSD or SYSV.
500 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
503 return kill_pg_info(sig
, info
, current
->pgrp
);
504 } else if (pid
== -1) {
505 int retval
= 0, count
= 0;
506 struct task_struct
* p
;
508 read_lock(&tasklist_lock
);
510 if (p
->pid
> 1 && p
!= current
) {
511 int err
= send_sig_info(sig
, info
, p
);
517 read_unlock(&tasklist_lock
);
518 return count
? retval
: -ESRCH
;
519 } else if (pid
< 0) {
520 return kill_pg_info(sig
, info
, -pid
);
522 return kill_proc_info(sig
, info
, pid
);
527 * These are for backward compatibility with the rest of the kernel source.
531 send_sig(int sig
, struct task_struct
*p
, int priv
)
533 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
537 force_sig(int sig
, struct task_struct
*p
)
539 force_sig_info(sig
, (void*)1L, p
);
543 kill_pg(pid_t pgrp
, int sig
, int priv
)
545 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
549 kill_sl(pid_t sess
, int sig
, int priv
)
551 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
555 kill_proc(pid_t pid
, int sig
, int priv
)
557 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
561 * Let a parent know about a status change of a child.
565 notify_parent(struct task_struct
*tsk
, int sig
)
572 info
.si_pid
= tsk
->pid
;
574 /* FIXME: find out whether or not this is supposed to be c*time. */
575 info
.si_utime
= tsk
->times
.tms_utime
;
576 info
.si_stime
= tsk
->times
.tms_stime
;
578 why
= SI_KERNEL
; /* shouldn't happen */
579 switch (tsk
->state
) {
581 if (tsk
->exit_code
& 0x80)
583 else if (tsk
->exit_code
& 0x7f)
589 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
594 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
600 send_sig_info(sig
, &info
, tsk
->p_pptr
);
601 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
604 EXPORT_SYMBOL(dequeue_signal
);
605 EXPORT_SYMBOL(flush_signals
);
606 EXPORT_SYMBOL(force_sig
);
607 EXPORT_SYMBOL(force_sig_info
);
608 EXPORT_SYMBOL(kill_pg
);
609 EXPORT_SYMBOL(kill_pg_info
);
610 EXPORT_SYMBOL(kill_proc
);
611 EXPORT_SYMBOL(kill_proc_info
);
612 EXPORT_SYMBOL(kill_sl
);
613 EXPORT_SYMBOL(kill_sl_info
);
614 EXPORT_SYMBOL(notify_parent
);
615 EXPORT_SYMBOL(recalc_sigpending
);
616 EXPORT_SYMBOL(send_sig
);
617 EXPORT_SYMBOL(send_sig_info
);
621 * System call entry points.
625 * We don't need to get the kernel lock - this is all local to this
626 * particular thread.. (and that's good, because this is _heavily_
627 * used by various programs)
631 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
634 sigset_t old_set
, new_set
;
636 /* XXX: Don't preclude handling different sized sigset_t's. */
637 if (sigsetsize
!= sizeof(sigset_t
))
642 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
644 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
646 spin_lock_irq(¤t
->sigmask_lock
);
647 old_set
= current
->blocked
;
655 sigorsets(&new_set
, &old_set
, &new_set
);
658 signandsets(&new_set
, &old_set
, &new_set
);
664 current
->blocked
= new_set
;
665 recalc_sigpending(current
);
666 spin_unlock_irq(¤t
->sigmask_lock
);
672 spin_lock_irq(¤t
->sigmask_lock
);
673 old_set
= current
->blocked
;
674 spin_unlock_irq(¤t
->sigmask_lock
);
678 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
687 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
692 /* XXX: Don't preclude handling different sized sigset_t's. */
693 if (sigsetsize
!= sizeof(sigset_t
))
696 spin_lock_irq(¤t
->sigmask_lock
);
697 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
698 spin_unlock_irq(¤t
->sigmask_lock
);
701 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
708 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
709 const struct timespec
*uts
, size_t sigsetsize
)
716 /* XXX: Don't preclude handling different sized sigset_t's. */
717 if (sigsetsize
!= sizeof(sigset_t
))
720 if (copy_from_user(&these
, uthese
, sizeof(these
)))
723 /* Invert the set of allowed signals to get those we
729 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
731 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
736 spin_lock_irq(¤t
->sigmask_lock
);
737 sig
= dequeue_signal(&these
, &info
);
739 /* None ready -- temporarily unblock those we're interested
740 in so that we'll be awakened when they arrive. */
741 unsigned long expire
;
742 sigset_t oldblocked
= current
->blocked
;
743 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
744 recalc_sigpending(current
);
745 spin_unlock_irq(¤t
->sigmask_lock
);
749 expire
= (timespec_to_jiffies(&ts
)
750 + (ts
.tv_sec
|| ts
.tv_nsec
));
753 current
->timeout
= expire
;
755 current
->state
= TASK_INTERRUPTIBLE
;
758 spin_lock_irq(¤t
->sigmask_lock
);
759 sig
= dequeue_signal(&these
, &info
);
760 current
->blocked
= oldblocked
;
761 recalc_sigpending(current
);
763 spin_unlock_irq(¤t
->sigmask_lock
);
768 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
773 if (current
->timeout
!= 0) {
774 current
->timeout
= 0;
783 sys_kill(int pid
, int sig
)
789 info
.si_code
= SI_USER
;
790 info
.si_pid
= current
->pid
;
791 info
.si_uid
= current
->uid
;
793 return kill_something_info(sig
, &info
, pid
);
797 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
801 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
804 /* Not even root can pretend to send signals from the kernel.
805 Nor can they impersonate a kill(), which adds source info. */
806 if (info
.si_code
>= 0)
810 /* POSIX.1b doesn't mention process groups. */
811 return kill_proc_info(sig
, &info
, pid
);
815 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
817 struct k_sigaction
*k
;
819 if (sig
< 1 || sig
> _NSIG
||
820 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
823 spin_lock_irq(¤t
->sigmask_lock
);
824 k
= ¤t
->sig
->action
[sig
-1];
826 if (oact
) *oact
= *k
;
830 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
834 * "Setting a signal action to SIG_IGN for a signal that is
835 * pending shall cause the pending signal to be discarded,
836 * whether or not it is blocked."
838 * "Setting a signal action to SIG_DFL for a signal that is
839 * pending and whose default action is to ignore the signal
840 * (for example, SIGCHLD), shall cause the pending signal to
841 * be discarded, whether or not it is blocked"
843 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
844 * signal isn't actually ignored, but does automatic child
845 * reaping, while SIG_DFL is explicitly said by POSIX to force
846 * the signal to be ignored.
849 if (k
->sa
.sa_handler
== SIG_IGN
850 || (k
->sa
.sa_handler
== SIG_DFL
851 && (sig
== SIGCONT
||
854 /* So dequeue any that might be pending.
855 XXX: process-wide signals? */
856 if (sig
>= SIGRTMIN
&&
857 sigismember(¤t
->signal
, sig
)) {
858 struct signal_queue
*q
, **pp
;
859 pp
= ¤t
->sigqueue
;
860 q
= current
->sigqueue
;
862 if (q
->info
.si_signo
!= sig
)
866 kmem_cache_free(signal_queue_cachep
, q
);
873 sigdelset(¤t
->signal
, sig
);
874 recalc_sigpending(current
);
878 spin_unlock_irq(¤t
->sigmask_lock
);
884 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
890 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
891 oss
.ss_size
= current
->sas_ss_size
;
892 oss
.ss_flags
= sas_ss_flags(sp
);
901 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
902 || __get_user(ss_sp
, &uss
->ss_sp
)
903 || __get_user(ss_flags
, &uss
->ss_flags
)
904 || __get_user(ss_size
, &uss
->ss_size
))
908 if (on_sig_stack (sp
))
912 if (ss_flags
& ~SS_DISABLE
)
915 if (ss_flags
& SS_DISABLE
) {
920 if (ss_size
< MINSIGSTKSZ
)
924 current
->sas_ss_sp
= (unsigned long) ss_sp
;
925 current
->sas_ss_size
= ss_size
;
930 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
939 #if !defined(__alpha__)
940 /* Alpha has its own versions with special arguments. */
943 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
946 old_sigset_t old_set
, new_set
;
950 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
952 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
954 spin_lock_irq(¤t
->sigmask_lock
);
955 old_set
= current
->blocked
.sig
[0];
963 sigaddsetmask(¤t
->blocked
, new_set
);
966 sigdelsetmask(¤t
->blocked
, new_set
);
969 current
->blocked
.sig
[0] = new_set
;
973 recalc_sigpending(current
);
974 spin_unlock_irq(¤t
->sigmask_lock
);
980 old_set
= current
->blocked
.sig
[0];
983 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
992 sys_sigpending(old_sigset_t
*set
)
995 old_sigset_t pending
;
997 spin_lock_irq(¤t
->sigmask_lock
);
998 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
999 spin_unlock_irq(¤t
->sigmask_lock
);
1002 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1009 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1012 struct k_sigaction new_sa
, old_sa
;
1015 /* XXX: Don't preclude handling different sized sigset_t's. */
1016 if (sigsetsize
!= sizeof(sigset_t
))
1020 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1024 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1027 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1033 #endif /* __sparc__ */
1036 #if !defined(__alpha__)
1038 * For backwards compatibility. Functionality superseded by sigprocmask.
1044 return current
->blocked
.sig
[0];
1048 sys_ssetmask(int newmask
)
1052 spin_lock_irq(¤t
->sigmask_lock
);
1053 old
= current
->blocked
.sig
[0];
1055 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1057 recalc_sigpending(current
);
1058 spin_unlock_irq(¤t
->sigmask_lock
);
1064 * For backwards compatibility. Functionality superseded by sigaction.
1066 asmlinkage
unsigned long
1067 sys_signal(int sig
, __sighandler_t handler
)
1069 struct k_sigaction new_sa
, old_sa
;
1072 new_sa
.sa
.sa_handler
= handler
;
1073 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1075 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1077 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;