2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
15 #include <asm/uaccess.h>
18 * SLAB caches for signal bits.
24 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
26 #define SIG_SLAB_DEBUG 0
29 static kmem_cache_t
*signal_queue_cachep
;
31 int nr_queued_signals
;
32 int max_queued_signals
= 1024;
34 void __init
signals_init(void)
37 kmem_cache_create("signal_queue",
38 sizeof(struct signal_queue
),
39 __alignof__(struct signal_queue
),
40 SIG_SLAB_DEBUG
, NULL
, NULL
);
45 * Flush all pending signals for a task.
49 flush_signals(struct task_struct
*t
)
51 struct signal_queue
*q
, *n
;
54 sigemptyset(&t
->signal
);
57 t
->sigqueue_tail
= &t
->sigqueue
;
61 kmem_cache_free(signal_queue_cachep
, q
);
68 * Flush all handlers for a task.
72 flush_signal_handlers(struct task_struct
*t
)
75 struct k_sigaction
*ka
= &t
->sig
->action
[0];
76 for (i
= _NSIG
; i
!= 0 ; i
--) {
77 if (ka
->sa
.sa_handler
!= SIG_IGN
)
78 ka
->sa
.sa_handler
= SIG_DFL
;
80 sigemptyset(&ka
->sa
.sa_mask
);
86 * Dequeue a signal and return the element to the caller, which is
87 * expected to free it.
89 * All callers of must be holding current->sigmask_lock.
93 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
95 unsigned long i
, *s
, *m
, x
;
99 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
100 signal_pending(current
));
103 /* Find the first desired signal that is pending. */
104 s
= current
->signal
.sig
;
106 switch (_NSIG_WORDS
) {
108 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
109 if ((x
= *s
&~ *m
) != 0) {
110 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
115 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
117 else if ((x
= s
[1] &~ m
[1]) != 0)
124 case 1: if ((x
= *s
&~ *m
) != 0)
132 /* Collect the siginfo appropriate to this signal. */
133 if (sig
< SIGRTMIN
) {
134 /* XXX: As an extension, support queueing exactly
135 one non-rt signal if SA_SIGINFO is set, so that
136 we can get more detailed information about the
137 cause of the signal. */
138 /* Deciding not to init these couple of fields is
139 more expensive that just initializing them. */
140 info
->si_signo
= sig
;
146 struct signal_queue
*q
, **pp
;
147 pp
= ¤t
->sigqueue
;
148 q
= current
->sigqueue
;
150 /* Find the one we're interested in ... */
151 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
152 if (q
->info
.si_signo
== sig
)
155 if ((*pp
= q
->next
) == NULL
)
156 current
->sigqueue_tail
= pp
;
158 kmem_cache_free(signal_queue_cachep
,q
);
161 /* then see if this signal is still pending. */
164 if (q
->info
.si_signo
== sig
) {
171 /* Ok, it wasn't in the queue. It must have
172 been sent either by a non-rt mechanism and
173 we ran out of queue space. So zero out the
175 info
->si_signo
= sig
;
184 sigdelset(¤t
->signal
, sig
);
185 recalc_sigpending(current
);
187 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
188 we need to xchg out the timer overrun values. */
190 /* XXX: Once CLONE_PID is in to join those "threads" that are
191 part of the same "process", look for signals sent to the
192 "process" as well. */
194 /* Sanity check... */
195 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
196 printk(KERN_CRIT
"SIG: sigpending lied\n");
197 current
->sigpending
= 0;
202 printk(" %d -> %d\n", signal_pending(current
), sig
);
209 * Determine whether a signal should be posted or not.
211 * Signals with SIG_IGN can be ignored, except for the
212 * special case of a SIGCHLD.
214 * Some signals with SIG_DFL default to a non-action.
216 static int ignored_signal(int sig
, struct task_struct
*t
)
218 struct signal_struct
*signals
;
219 struct k_sigaction
*ka
;
221 /* Don't ignore traced or blocked signals */
222 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
229 ka
= &signals
->action
[sig
-1];
230 switch ((unsigned long) ka
->sa
.sa_handler
) {
231 case (unsigned long) SIG_DFL
:
232 if (sig
== SIGCONT
||
239 case (unsigned long) SIG_IGN
:
250 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
256 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
260 if (sig
< 0 || sig
> _NSIG
)
262 /* The somewhat baroque permissions check... */
264 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
265 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
266 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
267 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
268 && !capable(CAP_KILL
))
271 /* The null signal is a permissions and process existance probe.
272 No signal is actually delivered. Same goes for zombies. */
277 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
279 case SIGKILL
: case SIGCONT
:
280 /* Wake up the process if stopped. */
281 if (t
->state
== TASK_STOPPED
)
284 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
285 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
286 /* Inflict this corner case with recalculations, not mainline */
287 recalc_sigpending(t
);
290 case SIGSTOP
: case SIGTSTP
:
291 case SIGTTIN
: case SIGTTOU
:
292 /* If we're stopping again, cancel SIGCONT */
293 sigdelset(&t
->signal
, SIGCONT
);
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t
);
299 /* Optimize away the signal, if it's a signal that can be
300 handled immediately (ie non-blocked and untraced) and
301 that is ignored (either explicitly or by default). */
303 if (ignored_signal(sig
, t
))
306 if (sig
< SIGRTMIN
) {
307 /* Non-real-time signals are not queued. */
308 /* XXX: As an extension, support queueing exactly one
309 non-rt signal if SA_SIGINFO is set, so that we can
310 get more detailed information about the cause of
312 if (sigismember(&t
->signal
, sig
))
315 /* Real-time signals must be queued if sent by sigqueue, or
316 some other real-time mechanism. It is implementation
317 defined whether kill() does so. We attempt to do so, on
318 the principle of least surprise, but since kill is not
319 allowed to fail with EAGAIN when low on memory we just
320 make sure at least one signal gets delivered and don't
321 pass on the info struct. */
323 struct signal_queue
*q
= 0;
325 if (nr_queued_signals
< max_queued_signals
) {
326 q
= (struct signal_queue
*)
327 kmem_cache_alloc(signal_queue_cachep
, GFP_KERNEL
);
333 *t
->sigqueue_tail
= q
;
334 t
->sigqueue_tail
= &q
->next
;
335 switch ((unsigned long) info
) {
337 q
->info
.si_signo
= sig
;
338 q
->info
.si_errno
= 0;
339 q
->info
.si_code
= SI_USER
;
340 q
->info
.si_pid
= current
->pid
;
341 q
->info
.si_uid
= current
->uid
;
344 q
->info
.si_signo
= sig
;
345 q
->info
.si_errno
= 0;
346 q
->info
.si_code
= SI_KERNEL
;
355 /* If this was sent by a rt mechanism, try again. */
356 if (info
->si_code
< 0) {
360 /* Otherwise, mention that the signal is pending,
361 but don't queue the info. */
365 sigaddset(&t
->signal
, sig
);
366 if (!sigismember(&t
->blocked
, sig
)) {
370 * If the task is running on a different CPU
371 * force a reschedule on the other CPU - note that
372 * the code below is a tad loose and might occasionally
373 * kick the wrong CPU if we catch the process in the
374 * process of changing - but no harm is done by that
375 * other than doing an extra (lightweight) IPI interrupt.
377 * note that we rely on the previous spin_lock to
378 * lock interrupts for us! No need to set need_resched
379 * since signal event passing goes through ->blocked.
381 spin_lock(&runqueue_lock
);
382 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
383 smp_send_reschedule(t
->processor
);
384 spin_unlock(&runqueue_lock
);
389 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
390 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
395 printk(" %d -> %d\n", signal_pending(t
), ret
);
402 * Force a signal that the process can't ignore: if necessary
403 * we unblock the signal and change any SIG_IGN to SIG_DFL.
407 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
409 unsigned long int flags
;
411 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
412 if (t
->sig
== NULL
) {
413 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
417 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
418 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
419 sigdelset(&t
->blocked
, sig
);
420 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
422 return send_sig_info(sig
, info
, t
);
426 * kill_pg() sends a signal to a process group: this is what the tty
427 * control characters do (^C, ^Z etc)
431 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
433 int retval
= -EINVAL
;
435 struct task_struct
*p
;
439 read_lock(&tasklist_lock
);
441 if (p
->pgrp
== pgrp
) {
442 int err
= send_sig_info(sig
, info
, p
);
449 read_unlock(&tasklist_lock
);
457 * kill_sl() sends a signal to the session leader: this is used
458 * to send SIGHUP to the controlling process of a terminal when
459 * the connection is lost.
463 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
465 int retval
= -EINVAL
;
467 struct task_struct
*p
;
471 read_lock(&tasklist_lock
);
473 if (p
->leader
&& p
->session
== sess
) {
474 int err
= send_sig_info(sig
, info
, p
);
481 read_unlock(&tasklist_lock
);
489 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
492 struct task_struct
*p
;
494 read_lock(&tasklist_lock
);
495 p
= find_task_by_pid(pid
);
498 error
= send_sig_info(sig
, info
, p
);
499 read_unlock(&tasklist_lock
);
504 * kill_something() interprets pid in interesting ways just like kill(2).
506 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
507 * is probably wrong. Should make it like BSD or SYSV.
511 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
514 return kill_pg_info(sig
, info
, current
->pgrp
);
515 } else if (pid
== -1) {
516 int retval
= 0, count
= 0;
517 struct task_struct
* p
;
519 read_lock(&tasklist_lock
);
521 if (p
->pid
> 1 && p
!= current
) {
522 int err
= send_sig_info(sig
, info
, p
);
528 read_unlock(&tasklist_lock
);
529 return count
? retval
: -ESRCH
;
530 } else if (pid
< 0) {
531 return kill_pg_info(sig
, info
, -pid
);
533 return kill_proc_info(sig
, info
, pid
);
538 * These are for backward compatibility with the rest of the kernel source.
542 send_sig(int sig
, struct task_struct
*p
, int priv
)
544 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
548 force_sig(int sig
, struct task_struct
*p
)
550 force_sig_info(sig
, (void*)1L, p
);
554 kill_pg(pid_t pgrp
, int sig
, int priv
)
556 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
560 kill_sl(pid_t sess
, int sig
, int priv
)
562 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
566 kill_proc(pid_t pid
, int sig
, int priv
)
568 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
572 * Let a parent know about a status change of a child.
576 notify_parent(struct task_struct
*tsk
, int sig
)
583 info
.si_pid
= tsk
->pid
;
585 /* FIXME: find out whether or not this is supposed to be c*time. */
586 info
.si_utime
= tsk
->times
.tms_utime
;
587 info
.si_stime
= tsk
->times
.tms_stime
;
589 why
= SI_KERNEL
; /* shouldn't happen */
590 switch (tsk
->state
) {
592 if (tsk
->exit_code
& 0x80)
594 else if (tsk
->exit_code
& 0x7f)
600 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
605 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
611 send_sig_info(sig
, &info
, tsk
->p_pptr
);
612 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
615 EXPORT_SYMBOL(dequeue_signal
);
616 EXPORT_SYMBOL(flush_signals
);
617 EXPORT_SYMBOL(force_sig
);
618 EXPORT_SYMBOL(force_sig_info
);
619 EXPORT_SYMBOL(kill_pg
);
620 EXPORT_SYMBOL(kill_pg_info
);
621 EXPORT_SYMBOL(kill_proc
);
622 EXPORT_SYMBOL(kill_proc_info
);
623 EXPORT_SYMBOL(kill_sl
);
624 EXPORT_SYMBOL(kill_sl_info
);
625 EXPORT_SYMBOL(notify_parent
);
626 EXPORT_SYMBOL(recalc_sigpending
);
627 EXPORT_SYMBOL(send_sig
);
628 EXPORT_SYMBOL(send_sig_info
);
632 * System call entry points.
636 * We don't need to get the kernel lock - this is all local to this
637 * particular thread.. (and that's good, because this is _heavily_
638 * used by various programs)
642 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
645 sigset_t old_set
, new_set
;
647 /* XXX: Don't preclude handling different sized sigset_t's. */
648 if (sigsetsize
!= sizeof(sigset_t
))
653 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
655 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
657 spin_lock_irq(¤t
->sigmask_lock
);
658 old_set
= current
->blocked
;
666 sigorsets(&new_set
, &old_set
, &new_set
);
669 signandsets(&new_set
, &old_set
, &new_set
);
675 current
->blocked
= new_set
;
676 recalc_sigpending(current
);
677 spin_unlock_irq(¤t
->sigmask_lock
);
683 spin_lock_irq(¤t
->sigmask_lock
);
684 old_set
= current
->blocked
;
685 spin_unlock_irq(¤t
->sigmask_lock
);
689 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
698 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
703 /* XXX: Don't preclude handling different sized sigset_t's. */
704 if (sigsetsize
!= sizeof(sigset_t
))
707 spin_lock_irq(¤t
->sigmask_lock
);
708 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
709 spin_unlock_irq(¤t
->sigmask_lock
);
712 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
719 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
720 const struct timespec
*uts
, size_t sigsetsize
)
728 /* XXX: Don't preclude handling different sized sigset_t's. */
729 if (sigsetsize
!= sizeof(sigset_t
))
732 if (copy_from_user(&these
, uthese
, sizeof(these
)))
735 /* Invert the set of allowed signals to get those we
741 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
743 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
748 spin_lock_irq(¤t
->sigmask_lock
);
749 sig
= dequeue_signal(&these
, &info
);
751 /* None ready -- temporarily unblock those we're interested
752 in so that we'll be awakened when they arrive. */
753 sigset_t oldblocked
= current
->blocked
;
754 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
755 recalc_sigpending(current
);
756 spin_unlock_irq(¤t
->sigmask_lock
);
758 timeout
= MAX_SCHEDULE_TIMEOUT
;
760 timeout
= (timespec_to_jiffies(&ts
)
761 + (ts
.tv_sec
|| ts
.tv_nsec
));
763 current
->state
= TASK_INTERRUPTIBLE
;
764 timeout
= schedule_timeout(timeout
);
766 spin_lock_irq(¤t
->sigmask_lock
);
767 sig
= dequeue_signal(&these
, &info
);
768 current
->blocked
= oldblocked
;
769 recalc_sigpending(current
);
771 spin_unlock_irq(¤t
->sigmask_lock
);
776 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
789 sys_kill(int pid
, int sig
)
795 info
.si_code
= SI_USER
;
796 info
.si_pid
= current
->pid
;
797 info
.si_uid
= current
->uid
;
799 return kill_something_info(sig
, &info
, pid
);
803 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
807 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
810 /* Not even root can pretend to send signals from the kernel.
811 Nor can they impersonate a kill(), which adds source info. */
812 if (info
.si_code
>= 0)
816 /* POSIX.1b doesn't mention process groups. */
817 return kill_proc_info(sig
, &info
, pid
);
821 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
823 struct k_sigaction
*k
;
825 if (sig
< 1 || sig
> _NSIG
||
826 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
829 spin_lock_irq(¤t
->sigmask_lock
);
830 k
= ¤t
->sig
->action
[sig
-1];
832 if (oact
) *oact
= *k
;
836 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
840 * "Setting a signal action to SIG_IGN for a signal that is
841 * pending shall cause the pending signal to be discarded,
842 * whether or not it is blocked."
844 * "Setting a signal action to SIG_DFL for a signal that is
845 * pending and whose default action is to ignore the signal
846 * (for example, SIGCHLD), shall cause the pending signal to
847 * be discarded, whether or not it is blocked"
849 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
850 * signal isn't actually ignored, but does automatic child
851 * reaping, while SIG_DFL is explicitly said by POSIX to force
852 * the signal to be ignored.
855 if (k
->sa
.sa_handler
== SIG_IGN
856 || (k
->sa
.sa_handler
== SIG_DFL
857 && (sig
== SIGCONT
||
860 /* So dequeue any that might be pending.
861 XXX: process-wide signals? */
862 if (sig
>= SIGRTMIN
&&
863 sigismember(¤t
->signal
, sig
)) {
864 struct signal_queue
*q
, **pp
;
865 pp
= ¤t
->sigqueue
;
866 q
= current
->sigqueue
;
868 if (q
->info
.si_signo
!= sig
)
872 kmem_cache_free(signal_queue_cachep
, q
);
879 sigdelset(¤t
->signal
, sig
);
880 recalc_sigpending(current
);
884 spin_unlock_irq(¤t
->sigmask_lock
);
890 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
896 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
897 oss
.ss_size
= current
->sas_ss_size
;
898 oss
.ss_flags
= sas_ss_flags(sp
);
907 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
908 || __get_user(ss_sp
, &uss
->ss_sp
)
909 || __get_user(ss_flags
, &uss
->ss_flags
)
910 || __get_user(ss_size
, &uss
->ss_size
))
914 if (on_sig_stack (sp
))
918 if (ss_flags
& ~SS_DISABLE
)
921 if (ss_flags
& SS_DISABLE
) {
926 if (ss_size
< MINSIGSTKSZ
)
930 current
->sas_ss_sp
= (unsigned long) ss_sp
;
931 current
->sas_ss_size
= ss_size
;
936 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
945 #if !defined(__alpha__)
946 /* Alpha has its own versions with special arguments. */
949 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
952 old_sigset_t old_set
, new_set
;
956 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
958 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
960 spin_lock_irq(¤t
->sigmask_lock
);
961 old_set
= current
->blocked
.sig
[0];
969 sigaddsetmask(¤t
->blocked
, new_set
);
972 sigdelsetmask(¤t
->blocked
, new_set
);
975 current
->blocked
.sig
[0] = new_set
;
979 recalc_sigpending(current
);
980 spin_unlock_irq(¤t
->sigmask_lock
);
986 old_set
= current
->blocked
.sig
[0];
989 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
998 sys_sigpending(old_sigset_t
*set
)
1001 old_sigset_t pending
;
1003 spin_lock_irq(¤t
->sigmask_lock
);
1004 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
1005 spin_unlock_irq(¤t
->sigmask_lock
);
1008 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1015 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1018 struct k_sigaction new_sa
, old_sa
;
1021 /* XXX: Don't preclude handling different sized sigset_t's. */
1022 if (sigsetsize
!= sizeof(sigset_t
))
1026 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1030 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1033 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1039 #endif /* __sparc__ */
1042 #if !defined(__alpha__)
1044 * For backwards compatibility. Functionality superseded by sigprocmask.
1050 return current
->blocked
.sig
[0];
1054 sys_ssetmask(int newmask
)
1058 spin_lock_irq(¤t
->sigmask_lock
);
1059 old
= current
->blocked
.sig
[0];
1061 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1063 recalc_sigpending(current
);
1064 spin_unlock_irq(¤t
->sigmask_lock
);
1070 * For backwards compatibility. Functionality superseded by sigaction.
1072 asmlinkage
unsigned long
1073 sys_signal(int sig
, __sighandler_t handler
)
1075 struct k_sigaction new_sa
, old_sa
;
1078 new_sa
.sa
.sa_handler
= handler
;
1079 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1081 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1083 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;