2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #define SIG_SLAB_DEBUG 0
30 static kmem_cache_t
*signal_queue_cachep
;
32 int nr_queued_signals
;
33 int max_queued_signals
= 1024;
35 void __init
signals_init(void)
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue
),
40 __alignof__(struct signal_queue
),
41 SIG_SLAB_DEBUG
, NULL
, NULL
);
46 * Flush all pending signals for a task.
50 flush_signals(struct task_struct
*t
)
52 struct signal_queue
*q
, *n
;
55 sigemptyset(&t
->signal
);
58 t
->sigqueue_tail
= &t
->sigqueue
;
62 kmem_cache_free(signal_queue_cachep
, q
);
69 * Flush all handlers for a task.
73 flush_signal_handlers(struct task_struct
*t
)
76 struct k_sigaction
*ka
= &t
->sig
->action
[0];
77 for (i
= _NSIG
; i
!= 0 ; i
--) {
78 if (ka
->sa
.sa_handler
!= SIG_IGN
)
79 ka
->sa
.sa_handler
= SIG_DFL
;
81 sigemptyset(&ka
->sa
.sa_mask
);
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
94 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
96 unsigned long i
, *s
, *m
, x
;
100 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
101 signal_pending(current
));
104 /* Find the first desired signal that is pending. */
105 s
= current
->signal
.sig
;
107 switch (_NSIG_WORDS
) {
109 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
110 if ((x
= *s
&~ *m
) != 0) {
111 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
116 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
118 else if ((x
= s
[1] &~ m
[1]) != 0)
125 case 1: if ((x
= *s
&~ *m
) != 0)
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig
< SIGRTMIN
) {
135 /* XXX: As an extension, support queueing exactly
136 one non-rt signal if SA_SIGINFO is set, so that
137 we can get more detailed information about the
138 cause of the signal. */
139 /* Deciding not to init these couple of fields is
140 more expensive that just initializing them. */
141 info
->si_signo
= sig
;
147 struct signal_queue
*q
, **pp
;
148 pp
= ¤t
->sigqueue
;
149 q
= current
->sigqueue
;
151 /* Find the one we're interested in ... */
152 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
153 if (q
->info
.si_signo
== sig
)
156 if ((*pp
= q
->next
) == NULL
)
157 current
->sigqueue_tail
= pp
;
159 kmem_cache_free(signal_queue_cachep
,q
);
162 /* then see if this signal is still pending. */
165 if (q
->info
.si_signo
== sig
) {
172 /* Ok, it wasn't in the queue. It must have
173 been sent either by a non-rt mechanism and
174 we ran out of queue space. So zero out the
176 info
->si_signo
= sig
;
185 sigdelset(¤t
->signal
, sig
);
186 recalc_sigpending(current
);
188 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
189 we need to xchg out the timer overrun values. */
191 /* XXX: Once CLONE_PID is in to join those "threads" that are
192 part of the same "process", look for signals sent to the
193 "process" as well. */
195 /* Sanity check... */
196 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
197 printk(KERN_CRIT
"SIG: sigpending lied\n");
198 current
->sigpending
= 0;
203 printk(" %d -> %d\n", signal_pending(current
), sig
);
210 * Determine whether a signal should be posted or not.
212 * Signals with SIG_IGN can be ignored, except for the
213 * special case of a SIGCHLD.
215 * Some signals with SIG_DFL default to a non-action.
217 static int ignored_signal(int sig
, struct task_struct
*t
)
219 struct signal_struct
*signals
;
220 struct k_sigaction
*ka
;
222 /* Don't ignore traced or blocked signals */
223 if ((t
->flags
& PF_PTRACED
) || sigismember(&t
->blocked
, sig
))
230 ka
= &signals
->action
[sig
-1];
231 switch ((unsigned long) ka
->sa
.sa_handler
) {
232 case (unsigned long) SIG_DFL
:
233 if (sig
== SIGCONT
||
240 case (unsigned long) SIG_IGN
:
251 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
257 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
261 if (sig
< 0 || sig
> _NSIG
)
263 /* The somewhat baroque permissions check... */
265 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
266 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
267 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
268 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
269 && !capable(CAP_KILL
))
272 /* The null signal is a permissions and process existance probe.
273 No signal is actually delivered. Same goes for zombies. */
278 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
280 case SIGKILL
: case SIGCONT
:
281 /* Wake up the process if stopped. */
282 if (t
->state
== TASK_STOPPED
)
285 sigdelsetmask(&t
->signal
, (sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|
286 sigmask(SIGTTOU
)|sigmask(SIGTTIN
)));
287 /* Inflict this corner case with recalculations, not mainline */
288 recalc_sigpending(t
);
291 case SIGSTOP
: case SIGTSTP
:
292 case SIGTTIN
: case SIGTTOU
:
293 /* If we're stopping again, cancel SIGCONT */
294 sigdelset(&t
->signal
, SIGCONT
);
295 /* Inflict this corner case with recalculations, not mainline */
296 recalc_sigpending(t
);
300 /* Optimize away the signal, if it's a signal that can be
301 handled immediately (ie non-blocked and untraced) and
302 that is ignored (either explicitly or by default). */
304 if (ignored_signal(sig
, t
))
307 if (sig
< SIGRTMIN
) {
308 /* Non-real-time signals are not queued. */
309 /* XXX: As an extension, support queueing exactly one
310 non-rt signal if SA_SIGINFO is set, so that we can
311 get more detailed information about the cause of
313 if (sigismember(&t
->signal
, sig
))
316 /* Real-time signals must be queued if sent by sigqueue, or
317 some other real-time mechanism. It is implementation
318 defined whether kill() does so. We attempt to do so, on
319 the principle of least surprise, but since kill is not
320 allowed to fail with EAGAIN when low on memory we just
321 make sure at least one signal gets delivered and don't
322 pass on the info struct. */
324 struct signal_queue
*q
= 0;
326 if (nr_queued_signals
< max_queued_signals
) {
327 q
= (struct signal_queue
*)
328 kmem_cache_alloc(signal_queue_cachep
, GFP_ATOMIC
);
334 *t
->sigqueue_tail
= q
;
335 t
->sigqueue_tail
= &q
->next
;
336 switch ((unsigned long) info
) {
338 q
->info
.si_signo
= sig
;
339 q
->info
.si_errno
= 0;
340 q
->info
.si_code
= SI_USER
;
341 q
->info
.si_pid
= current
->pid
;
342 q
->info
.si_uid
= current
->uid
;
345 q
->info
.si_signo
= sig
;
346 q
->info
.si_errno
= 0;
347 q
->info
.si_code
= SI_KERNEL
;
356 /* If this was sent by a rt mechanism, try again. */
357 if (info
->si_code
< 0) {
361 /* Otherwise, mention that the signal is pending,
362 but don't queue the info. */
366 sigaddset(&t
->signal
, sig
);
367 if (!sigismember(&t
->blocked
, sig
)) {
371 * If the task is running on a different CPU
372 * force a reschedule on the other CPU - note that
373 * the code below is a tad loose and might occasionally
374 * kick the wrong CPU if we catch the process in the
375 * process of changing - but no harm is done by that
376 * other than doing an extra (lightweight) IPI interrupt.
378 * note that we rely on the previous spin_lock to
379 * lock interrupts for us! No need to set need_resched
380 * since signal event passing goes through ->blocked.
382 spin_lock(&runqueue_lock
);
383 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
384 smp_send_reschedule(t
->processor
);
385 spin_unlock(&runqueue_lock
);
390 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
391 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
396 printk(" %d -> %d\n", signal_pending(t
), ret
);
403 * Force a signal that the process can't ignore: if necessary
404 * we unblock the signal and change any SIG_IGN to SIG_DFL.
408 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
410 unsigned long int flags
;
412 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
413 if (t
->sig
== NULL
) {
414 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
418 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
419 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
420 sigdelset(&t
->blocked
, sig
);
421 recalc_sigpending(t
);
422 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
424 return send_sig_info(sig
, info
, t
);
428 * kill_pg() sends a signal to a process group: this is what the tty
429 * control characters do (^C, ^Z etc)
433 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
435 int retval
= -EINVAL
;
437 struct task_struct
*p
;
441 read_lock(&tasklist_lock
);
443 if (p
->pgrp
== pgrp
) {
444 int err
= send_sig_info(sig
, info
, p
);
451 read_unlock(&tasklist_lock
);
459 * kill_sl() sends a signal to the session leader: this is used
460 * to send SIGHUP to the controlling process of a terminal when
461 * the connection is lost.
465 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
467 int retval
= -EINVAL
;
469 struct task_struct
*p
;
473 read_lock(&tasklist_lock
);
475 if (p
->leader
&& p
->session
== sess
) {
476 int err
= send_sig_info(sig
, info
, p
);
483 read_unlock(&tasklist_lock
);
491 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
494 struct task_struct
*p
;
496 read_lock(&tasklist_lock
);
497 p
= find_task_by_pid(pid
);
500 error
= send_sig_info(sig
, info
, p
);
501 read_unlock(&tasklist_lock
);
506 * kill_something() interprets pid in interesting ways just like kill(2).
508 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
509 * is probably wrong. Should make it like BSD or SYSV.
513 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
516 return kill_pg_info(sig
, info
, current
->pgrp
);
517 } else if (pid
== -1) {
518 int retval
= 0, count
= 0;
519 struct task_struct
* p
;
521 read_lock(&tasklist_lock
);
523 if (p
->pid
> 1 && p
!= current
) {
524 int err
= send_sig_info(sig
, info
, p
);
530 read_unlock(&tasklist_lock
);
531 return count
? retval
: -ESRCH
;
532 } else if (pid
< 0) {
533 return kill_pg_info(sig
, info
, -pid
);
535 return kill_proc_info(sig
, info
, pid
);
540 * These are for backward compatibility with the rest of the kernel source.
544 send_sig(int sig
, struct task_struct
*p
, int priv
)
546 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
550 force_sig(int sig
, struct task_struct
*p
)
552 force_sig_info(sig
, (void*)1L, p
);
556 kill_pg(pid_t pgrp
, int sig
, int priv
)
558 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
562 kill_sl(pid_t sess
, int sig
, int priv
)
564 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
568 kill_proc(pid_t pid
, int sig
, int priv
)
570 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
574 * Let a parent know about a status change of a child.
578 notify_parent(struct task_struct
*tsk
, int sig
)
585 info
.si_pid
= tsk
->pid
;
587 /* FIXME: find out whether or not this is supposed to be c*time. */
588 info
.si_utime
= tsk
->times
.tms_utime
;
589 info
.si_stime
= tsk
->times
.tms_stime
;
591 why
= SI_KERNEL
; /* shouldn't happen */
592 switch (tsk
->state
) {
594 if (tsk
->exit_code
& 0x80)
596 else if (tsk
->exit_code
& 0x7f)
602 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
607 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
613 send_sig_info(sig
, &info
, tsk
->p_pptr
);
614 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
617 EXPORT_SYMBOL(dequeue_signal
);
618 EXPORT_SYMBOL(flush_signals
);
619 EXPORT_SYMBOL(force_sig
);
620 EXPORT_SYMBOL(force_sig_info
);
621 EXPORT_SYMBOL(kill_pg
);
622 EXPORT_SYMBOL(kill_pg_info
);
623 EXPORT_SYMBOL(kill_proc
);
624 EXPORT_SYMBOL(kill_proc_info
);
625 EXPORT_SYMBOL(kill_sl
);
626 EXPORT_SYMBOL(kill_sl_info
);
627 EXPORT_SYMBOL(notify_parent
);
628 EXPORT_SYMBOL(recalc_sigpending
);
629 EXPORT_SYMBOL(send_sig
);
630 EXPORT_SYMBOL(send_sig_info
);
634 * System call entry points.
638 * We don't need to get the kernel lock - this is all local to this
639 * particular thread.. (and that's good, because this is _heavily_
640 * used by various programs)
644 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
647 sigset_t old_set
, new_set
;
649 /* XXX: Don't preclude handling different sized sigset_t's. */
650 if (sigsetsize
!= sizeof(sigset_t
))
655 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
657 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
659 spin_lock_irq(¤t
->sigmask_lock
);
660 old_set
= current
->blocked
;
668 sigorsets(&new_set
, &old_set
, &new_set
);
671 signandsets(&new_set
, &old_set
, &new_set
);
677 current
->blocked
= new_set
;
678 recalc_sigpending(current
);
679 spin_unlock_irq(¤t
->sigmask_lock
);
685 spin_lock_irq(¤t
->sigmask_lock
);
686 old_set
= current
->blocked
;
687 spin_unlock_irq(¤t
->sigmask_lock
);
691 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
700 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
705 /* XXX: Don't preclude handling different sized sigset_t's. */
706 if (sigsetsize
!= sizeof(sigset_t
))
709 spin_lock_irq(¤t
->sigmask_lock
);
710 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
711 spin_unlock_irq(¤t
->sigmask_lock
);
714 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
721 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
722 const struct timespec
*uts
, size_t sigsetsize
)
730 /* XXX: Don't preclude handling different sized sigset_t's. */
731 if (sigsetsize
!= sizeof(sigset_t
))
734 if (copy_from_user(&these
, uthese
, sizeof(these
)))
737 /* Invert the set of allowed signals to get those we
743 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
745 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
750 spin_lock_irq(¤t
->sigmask_lock
);
751 sig
= dequeue_signal(&these
, &info
);
753 /* None ready -- temporarily unblock those we're interested
754 in so that we'll be awakened when they arrive. */
755 sigset_t oldblocked
= current
->blocked
;
756 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
757 recalc_sigpending(current
);
758 spin_unlock_irq(¤t
->sigmask_lock
);
760 timeout
= MAX_SCHEDULE_TIMEOUT
;
762 timeout
= (timespec_to_jiffies(&ts
)
763 + (ts
.tv_sec
|| ts
.tv_nsec
));
765 current
->state
= TASK_INTERRUPTIBLE
;
766 timeout
= schedule_timeout(timeout
);
768 spin_lock_irq(¤t
->sigmask_lock
);
769 sig
= dequeue_signal(&these
, &info
);
770 current
->blocked
= oldblocked
;
771 recalc_sigpending(current
);
773 spin_unlock_irq(¤t
->sigmask_lock
);
778 if (copy_to_user(uinfo
, &info
, sizeof(siginfo_t
)))
791 sys_kill(int pid
, int sig
)
797 info
.si_code
= SI_USER
;
798 info
.si_pid
= current
->pid
;
799 info
.si_uid
= current
->uid
;
801 return kill_something_info(sig
, &info
, pid
);
805 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
809 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
812 /* Not even root can pretend to send signals from the kernel.
813 Nor can they impersonate a kill(), which adds source info. */
814 if (info
.si_code
>= 0)
818 /* POSIX.1b doesn't mention process groups. */
819 return kill_proc_info(sig
, &info
, pid
);
823 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
825 struct k_sigaction
*k
;
827 if (sig
< 1 || sig
> _NSIG
||
828 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
831 spin_lock_irq(¤t
->sigmask_lock
);
832 k
= ¤t
->sig
->action
[sig
-1];
834 if (oact
) *oact
= *k
;
838 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
842 * "Setting a signal action to SIG_IGN for a signal that is
843 * pending shall cause the pending signal to be discarded,
844 * whether or not it is blocked."
846 * "Setting a signal action to SIG_DFL for a signal that is
847 * pending and whose default action is to ignore the signal
848 * (for example, SIGCHLD), shall cause the pending signal to
849 * be discarded, whether or not it is blocked"
851 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
852 * signal isn't actually ignored, but does automatic child
853 * reaping, while SIG_DFL is explicitly said by POSIX to force
854 * the signal to be ignored.
857 if (k
->sa
.sa_handler
== SIG_IGN
858 || (k
->sa
.sa_handler
== SIG_DFL
859 && (sig
== SIGCONT
||
862 /* So dequeue any that might be pending.
863 XXX: process-wide signals? */
864 if (sig
>= SIGRTMIN
&&
865 sigismember(¤t
->signal
, sig
)) {
866 struct signal_queue
*q
, **pp
;
867 pp
= ¤t
->sigqueue
;
868 q
= current
->sigqueue
;
870 if (q
->info
.si_signo
!= sig
)
874 kmem_cache_free(signal_queue_cachep
, q
);
881 sigdelset(¤t
->signal
, sig
);
882 recalc_sigpending(current
);
886 spin_unlock_irq(¤t
->sigmask_lock
);
892 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
898 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
899 oss
.ss_size
= current
->sas_ss_size
;
900 oss
.ss_flags
= sas_ss_flags(sp
);
909 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
910 || __get_user(ss_sp
, &uss
->ss_sp
)
911 || __get_user(ss_flags
, &uss
->ss_flags
)
912 || __get_user(ss_size
, &uss
->ss_size
))
916 if (on_sig_stack (sp
))
920 if (ss_flags
& ~SS_DISABLE
)
923 if (ss_flags
& SS_DISABLE
) {
928 if (ss_size
< MINSIGSTKSZ
)
932 current
->sas_ss_sp
= (unsigned long) ss_sp
;
933 current
->sas_ss_size
= ss_size
;
938 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
947 #if !defined(__alpha__)
948 /* Alpha has its own versions with special arguments. */
951 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
954 old_sigset_t old_set
, new_set
;
958 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
960 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
962 spin_lock_irq(¤t
->sigmask_lock
);
963 old_set
= current
->blocked
.sig
[0];
971 sigaddsetmask(¤t
->blocked
, new_set
);
974 sigdelsetmask(¤t
->blocked
, new_set
);
977 current
->blocked
.sig
[0] = new_set
;
981 recalc_sigpending(current
);
982 spin_unlock_irq(¤t
->sigmask_lock
);
988 old_set
= current
->blocked
.sig
[0];
991 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1000 sys_sigpending(old_sigset_t
*set
)
1003 old_sigset_t pending
;
1005 spin_lock_irq(¤t
->sigmask_lock
);
1006 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
1007 spin_unlock_irq(¤t
->sigmask_lock
);
1010 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1017 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1020 struct k_sigaction new_sa
, old_sa
;
1023 /* XXX: Don't preclude handling different sized sigset_t's. */
1024 if (sigsetsize
!= sizeof(sigset_t
))
1028 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1032 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1035 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1041 #endif /* __sparc__ */
1044 #if !defined(__alpha__) && !defined(__ia64__)
1046 * For backwards compatibility. Functionality superseded by sigprocmask.
1052 return current
->blocked
.sig
[0];
1056 sys_ssetmask(int newmask
)
1060 spin_lock_irq(¤t
->sigmask_lock
);
1061 old
= current
->blocked
.sig
[0];
1063 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1065 recalc_sigpending(current
);
1066 spin_unlock_irq(¤t
->sigmask_lock
);
1072 * For backwards compatibility. Functionality superseded by sigaction.
1074 asmlinkage
unsigned long
1075 sys_signal(int sig
, __sighandler_t handler
)
1077 struct k_sigaction new_sa
, old_sa
;
1080 new_sa
.sa
.sa_handler
= handler
;
1081 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1083 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1085 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1087 #endif /* !alpha && !__ia64__ */