2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
34 #define SIG_SLAB_DEBUG 0
37 static kmem_cache_t
*signal_queue_cachep
;
39 static int nr_queued_signals
;
40 static int max_queued_signals
= 1024;
42 void __init
signals_init(void)
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue
),
47 __alignof__(struct signal_queue
),
48 SIG_SLAB_DEBUG
, NULL
, NULL
);
53 * Flush all pending signals for a task.
57 flush_signals(struct task_struct
*t
)
59 struct signal_queue
*q
, *n
;
62 sigemptyset(&t
->signal
);
65 t
->sigqueue_tail
= &t
->sigqueue
;
69 kmem_cache_free(signal_queue_cachep
, q
);
76 * Flush all handlers for a task.
80 flush_signal_handlers(struct task_struct
*t
)
83 struct k_sigaction
*ka
= &t
->sig
->action
[0];
84 for (i
= _NSIG
; i
!= 0 ; i
--) {
85 if (ka
->sa
.sa_handler
!= SIG_IGN
)
86 ka
->sa
.sa_handler
= SIG_DFL
;
88 sigemptyset(&ka
->sa
.sa_mask
);
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
103 unsigned long i
, *s
, *m
, x
;
107 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
108 signal_pending(current
));
111 /* Find the first desired signal that is pending. */
112 s
= current
->signal
.sig
;
114 switch (_NSIG_WORDS
) {
116 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
117 if ((x
= *s
&~ *m
) != 0) {
118 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
123 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
125 else if ((x
= s
[1] &~ m
[1]) != 0)
132 case 1: if ((x
= *s
&~ *m
) != 0)
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig
< SIGRTMIN
) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info
->si_signo
= sig
;
154 struct signal_queue
*q
, **pp
;
155 pp
= ¤t
->sigqueue
;
156 q
= current
->sigqueue
;
158 /* Find the one we're interested in ... */
159 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
160 if (q
->info
.si_signo
== sig
)
163 if ((*pp
= q
->next
) == NULL
)
164 current
->sigqueue_tail
= pp
;
166 kmem_cache_free(signal_queue_cachep
,q
);
169 /* then see if this signal is still pending. */
172 if (q
->info
.si_signo
== sig
) {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
183 info
->si_signo
= sig
;
192 sigdelset(¤t
->signal
, sig
);
193 recalc_sigpending(current
);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
204 printk(KERN_CRIT
"SIG: sigpending lied\n");
205 current
->sigpending
= 0;
210 printk(" %d -> %d\n", signal_pending(current
), sig
);
217 * Determine whether a signal should be posted or not.
219 * Signals with SIG_IGN can be ignored, except for the
220 * special case of a SIGCHLD.
222 * Some signals with SIG_DFL default to a non-action.
224 static int ignored_signal(int sig
, struct task_struct
*t
)
226 struct signal_struct
*signals
;
227 struct k_sigaction
*ka
;
229 /* Don't ignore traced or blocked signals */
230 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
237 ka
= &signals
->action
[sig
-1];
238 switch ((unsigned long) ka
->sa
.sa_handler
) {
239 case (unsigned long) SIG_DFL
:
240 if (sig
== SIGCONT
||
247 case (unsigned long) SIG_IGN
:
258 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
264 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
268 if (sig
< 0 || sig
> _NSIG
)
270 /* The somewhat baroque permissions check... */
272 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
273 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
274 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
275 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
276 && !capable(CAP_SYS_ADMIN
))
279 /* The null signal is a permissions and process existance probe.
280 No signal is actually delivered. */
285 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
287 case SIGKILL
: case SIGCONT
:
288 /* Wake up the process if stopped. */
289 if (t
->state
== TASK_STOPPED
)
292 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
293 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t
);
298 case SIGSTOP
: case SIGTSTP
:
299 case SIGTTIN
: case SIGTTOU
:
300 /* If we're stopping again, cancel SIGCONT */
301 sigdelset(&t
->signal
, SIGCONT
);
302 /* Inflict this corner case with recalculations, not mainline */
303 recalc_sigpending(t
);
307 /* Optimize away the signal, if it's a signal that can be
308 handled immediately (ie non-blocked and untraced) and
309 that is ignored (either explicitly or by default). */
311 if (ignored_signal(sig
, t
))
314 if (sig
< SIGRTMIN
) {
315 /* Non-real-time signals are not queued. */
316 /* XXX: As an extension, support queueing exactly one
317 non-rt signal if SA_SIGINFO is set, so that we can
318 get more detailed information about the cause of
320 if (sigismember(&t
->signal
, sig
))
323 /* Real-time signals must be queued if sent by sigqueue, or
324 some other real-time mechanism. It is implementation
325 defined whether kill() does so. We attempt to do so, on
326 the principle of least surprise, but since kill is not
327 allowed to fail with EAGAIN when low on memory we just
328 make sure at least one signal gets delivered and don't
329 pass on the info struct. */
331 struct signal_queue
*q
= 0;
333 if (nr_queued_signals
< max_queued_signals
) {
334 q
= (struct signal_queue
*)
335 kmem_cache_alloc(signal_queue_cachep
, GFP_KERNEL
);
341 *t
->sigqueue_tail
= q
;
342 t
->sigqueue_tail
= &q
->next
;
343 switch ((unsigned long) info
) {
345 q
->info
.si_signo
= sig
;
346 q
->info
.si_errno
= 0;
347 q
->info
.si_code
= SI_USER
;
348 q
->info
.si_pid
= current
->pid
;
349 q
->info
.si_uid
= current
->uid
;
352 q
->info
.si_signo
= sig
;
353 q
->info
.si_errno
= 0;
354 q
->info
.si_code
= SI_KERNEL
;
363 /* If this was sent by a rt mechanism, try again. */
364 if (info
->si_code
< 0) {
368 /* Otherwise, mention that the signal is pending,
369 but don't queue the info. */
373 sigaddset(&t
->signal
, sig
);
374 if (!sigismember(&t
->blocked
, sig
))
378 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
379 if (t
->state
== TASK_INTERRUPTIBLE
&& signal_pending(t
))
384 printk(" %d -> %d\n", signal_pending(t
), ret
);
391 * Force a signal that the process can't ignore: if necessary
392 * we unblock the signal and change any SIG_IGN to SIG_DFL.
396 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
398 unsigned long int flags
;
400 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
401 if (t
->sig
== NULL
) {
402 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
406 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
407 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
408 sigdelset(&t
->blocked
, sig
);
409 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
411 return send_sig_info(sig
, info
, t
);
415 * kill_pg() sends a signal to a process group: this is what the tty
416 * control characters do (^C, ^Z etc)
420 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
422 int retval
= -EINVAL
;
424 struct task_struct
*p
;
428 read_lock(&tasklist_lock
);
430 if (p
->pgrp
== pgrp
) {
431 int err
= send_sig_info(sig
, info
, p
);
438 read_unlock(&tasklist_lock
);
446 * kill_sl() sends a signal to the session leader: this is used
447 * to send SIGHUP to the controlling process of a terminal when
448 * the connection is lost.
452 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
454 int retval
= -EINVAL
;
456 struct task_struct
*p
;
460 read_lock(&tasklist_lock
);
462 if (p
->leader
&& p
->session
== sess
) {
463 int err
= send_sig_info(sig
, info
, p
);
470 read_unlock(&tasklist_lock
);
478 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
481 struct task_struct
*p
;
483 read_lock(&tasklist_lock
);
484 p
= find_task_by_pid(pid
);
487 error
= send_sig_info(sig
, info
, p
);
488 read_unlock(&tasklist_lock
);
493 * kill_something() interprets pid in interesting ways just like kill(2).
495 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
496 * is probably wrong. Should make it like BSD or SYSV.
500 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
503 return kill_pg_info(sig
, info
, current
->pgrp
);
504 } else if (pid
== -1) {
505 int retval
= 0, count
= 0;
506 struct task_struct
* p
;
508 read_lock(&tasklist_lock
);
510 if (p
->pid
> 1 && p
!= current
) {
511 int err
= send_sig_info(sig
, info
, p
);
517 read_unlock(&tasklist_lock
);
518 return count
? retval
: -ESRCH
;
519 } else if (pid
< 0) {
520 return kill_pg_info(sig
, info
, -pid
);
522 return kill_proc_info(sig
, info
, pid
);
527 * These are for backward compatibility with the rest of the kernel source.
531 send_sig(int sig
, struct task_struct
*p
, int priv
)
533 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
537 force_sig(int sig
, struct task_struct
*p
)
539 force_sig_info(sig
, (void*)1L, p
);
543 kill_pg(pid_t pgrp
, int sig
, int priv
)
545 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
549 kill_sl(pid_t sess
, int sig
, int priv
)
551 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
555 kill_proc(pid_t pid
, int sig
, int priv
)
557 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
561 * Let a parent know about a status change of a child.
565 notify_parent(struct task_struct
*tsk
, int sig
)
572 info
.si_pid
= tsk
->pid
;
574 /* FIXME: find out whether or not this is supposed to be c*time. */
575 info
.si_utime
= tsk
->times
.tms_utime
;
576 info
.si_stime
= tsk
->times
.tms_stime
;
578 why
= SI_KERNEL
; /* shouldn't happen */
579 switch (tsk
->state
) {
581 if (tsk
->exit_code
& 0x80)
583 else if (tsk
->exit_code
& 0x7f)
589 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
594 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
600 send_sig_info(sig
, &info
, tsk
->p_pptr
);
601 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
604 EXPORT_SYMBOL(dequeue_signal
);
605 EXPORT_SYMBOL(flush_signals
);
606 EXPORT_SYMBOL(force_sig
);
607 EXPORT_SYMBOL(force_sig_info
);
608 EXPORT_SYMBOL(kill_pg
);
609 EXPORT_SYMBOL(kill_pg_info
);
610 EXPORT_SYMBOL(kill_proc
);
611 EXPORT_SYMBOL(kill_proc_info
);
612 EXPORT_SYMBOL(kill_sl
);
613 EXPORT_SYMBOL(kill_sl_info
);
614 EXPORT_SYMBOL(notify_parent
);
615 EXPORT_SYMBOL(recalc_sigpending
);
616 EXPORT_SYMBOL(send_sig
);
617 EXPORT_SYMBOL(send_sig_info
);
621 * System call entry points.
625 * We don't need to get the kernel lock - this is all local to this
626 * particular thread.. (and that's good, because this is _heavily_
627 * used by various programs)
631 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
634 sigset_t old_set
, new_set
;
636 /* XXX: Don't preclude handling different sized sigset_t's. */
637 if (sigsetsize
!= sizeof(sigset_t
))
642 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
644 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
646 spin_lock_irq(¤t
->sigmask_lock
);
647 old_set
= current
->blocked
;
655 sigorsets(&new_set
, &old_set
, &new_set
);
658 signandsets(&new_set
, &old_set
, &new_set
);
664 current
->blocked
= new_set
;
665 recalc_sigpending(current
);
666 spin_unlock_irq(¤t
->sigmask_lock
);
672 spin_lock_irq(¤t
->sigmask_lock
);
673 old_set
= current
->blocked
;
674 spin_unlock_irq(¤t
->sigmask_lock
);
678 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
687 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
692 /* XXX: Don't preclude handling different sized sigset_t's. */
693 if (sigsetsize
!= sizeof(sigset_t
))
696 spin_lock_irq(¤t
->sigmask_lock
);
697 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
698 spin_unlock_irq(¤t
->sigmask_lock
);
701 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
708 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
709 const struct timespec
*uts
, size_t sigsetsize
)
717 /* XXX: Don't preclude handling different sized sigset_t's. */
718 if (sigsetsize
!= sizeof(sigset_t
))
721 if (copy_from_user(&these
, uthese
, sizeof(these
)))
724 /* Invert the set of allowed signals to get those we
730 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
732 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
737 spin_lock_irq(¤t
->sigmask_lock
);
738 sig
= dequeue_signal(&these
, &info
);
740 /* None ready -- temporarily unblock those we're interested
741 in so that we'll be awakened when they arrive. */
742 sigset_t oldblocked
= current
->blocked
;
743 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
744 recalc_sigpending(current
);
745 spin_unlock_irq(¤t
->sigmask_lock
);
747 timeout
= MAX_SCHEDULE_TIMEOUT
;
749 timeout
= (timespec_to_jiffies(&ts
)
750 + (ts
.tv_sec
|| ts
.tv_nsec
));
752 current
->state
= TASK_INTERRUPTIBLE
;
753 timeout
= schedule_timeout(timeout
);
755 spin_lock_irq(¤t
->sigmask_lock
);
756 sig
= dequeue_signal(&these
, &info
);
757 current
->blocked
= oldblocked
;
758 recalc_sigpending(current
);
760 spin_unlock_irq(¤t
->sigmask_lock
);
765 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
778 sys_kill(int pid
, int sig
)
784 info
.si_code
= SI_USER
;
785 info
.si_pid
= current
->pid
;
786 info
.si_uid
= current
->uid
;
788 return kill_something_info(sig
, &info
, pid
);
792 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
796 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
799 /* Not even root can pretend to send signals from the kernel.
800 Nor can they impersonate a kill(), which adds source info. */
801 if (info
.si_code
>= 0)
805 /* POSIX.1b doesn't mention process groups. */
806 return kill_proc_info(sig
, &info
, pid
);
810 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
812 struct k_sigaction
*k
;
814 if (sig
< 1 || sig
> _NSIG
||
815 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
818 spin_lock_irq(¤t
->sigmask_lock
);
819 k
= ¤t
->sig
->action
[sig
-1];
821 if (oact
) *oact
= *k
;
825 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
829 * "Setting a signal action to SIG_IGN for a signal that is
830 * pending shall cause the pending signal to be discarded,
831 * whether or not it is blocked."
833 * "Setting a signal action to SIG_DFL for a signal that is
834 * pending and whose default action is to ignore the signal
835 * (for example, SIGCHLD), shall cause the pending signal to
836 * be discarded, whether or not it is blocked"
838 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
839 * signal isn't actually ignored, but does automatic child
840 * reaping, while SIG_DFL is explicitly said by POSIX to force
841 * the signal to be ignored.
844 if (k
->sa
.sa_handler
== SIG_IGN
845 || (k
->sa
.sa_handler
== SIG_DFL
846 && (sig
== SIGCONT
||
849 /* So dequeue any that might be pending.
850 XXX: process-wide signals? */
851 if (sig
>= SIGRTMIN
&&
852 sigismember(¤t
->signal
, sig
)) {
853 struct signal_queue
*q
, **pp
;
854 pp
= ¤t
->sigqueue
;
855 q
= current
->sigqueue
;
857 if (q
->info
.si_signo
!= sig
)
861 kmem_cache_free(signal_queue_cachep
, q
);
868 sigdelset(¤t
->signal
, sig
);
869 recalc_sigpending(current
);
873 spin_unlock_irq(¤t
->sigmask_lock
);
879 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
885 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
886 oss
.ss_size
= current
->sas_ss_size
;
887 oss
.ss_flags
= sas_ss_flags(sp
);
896 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
897 || __get_user(ss_sp
, &uss
->ss_sp
)
898 || __get_user(ss_flags
, &uss
->ss_flags
)
899 || __get_user(ss_size
, &uss
->ss_size
))
903 if (on_sig_stack (sp
))
907 if (ss_flags
& ~SS_DISABLE
)
910 if (ss_flags
& SS_DISABLE
) {
915 if (ss_size
< MINSIGSTKSZ
)
919 current
->sas_ss_sp
= (unsigned long) ss_sp
;
920 current
->sas_ss_size
= ss_size
;
925 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
934 #if !defined(__alpha__)
935 /* Alpha has its own versions with special arguments. */
938 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
941 old_sigset_t old_set
, new_set
;
945 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
947 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
949 spin_lock_irq(¤t
->sigmask_lock
);
950 old_set
= current
->blocked
.sig
[0];
958 sigaddsetmask(¤t
->blocked
, new_set
);
961 sigdelsetmask(¤t
->blocked
, new_set
);
964 current
->blocked
.sig
[0] = new_set
;
968 recalc_sigpending(current
);
969 spin_unlock_irq(¤t
->sigmask_lock
);
975 old_set
= current
->blocked
.sig
[0];
978 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
987 sys_sigpending(old_sigset_t
*set
)
990 old_sigset_t pending
;
992 spin_lock_irq(¤t
->sigmask_lock
);
993 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
994 spin_unlock_irq(¤t
->sigmask_lock
);
997 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1004 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1007 struct k_sigaction new_sa
, old_sa
;
1010 /* XXX: Don't preclude handling different sized sigset_t's. */
1011 if (sigsetsize
!= sizeof(sigset_t
))
1015 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1019 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1022 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1028 #endif /* __sparc__ */
1031 #if !defined(__alpha__)
1033 * For backwards compatibility. Functionality superseded by sigprocmask.
1039 return current
->blocked
.sig
[0];
1043 sys_ssetmask(int newmask
)
1047 spin_lock_irq(¤t
->sigmask_lock
);
1048 old
= current
->blocked
.sig
[0];
1050 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1052 recalc_sigpending(current
);
1053 spin_unlock_irq(¤t
->sigmask_lock
);
1059 * For backwards compatibility. Functionality superseded by sigaction.
1061 asmlinkage
unsigned long
1062 sys_signal(int sig
, __sighandler_t handler
)
1064 struct k_sigaction new_sa
, old_sa
;
1067 new_sa
.sa
.sa_handler
= handler
;
1068 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1070 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1072 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;