2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/param.h>
18 #include <asm/uaccess.h>
21 * SLAB caches for signal bits.
27 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
29 #define SIG_SLAB_DEBUG 0
32 static kmem_cache_t
*signal_queue_cachep
;
34 atomic_t nr_queued_signals
;
35 int max_queued_signals
= 1024;
37 void __init
signals_init(void)
40 kmem_cache_create("signal_queue",
41 sizeof(struct signal_queue
),
42 __alignof__(struct signal_queue
),
43 SIG_SLAB_DEBUG
, NULL
, NULL
);
44 if (!signal_queue_cachep
)
45 panic("signals_init(): cannot create signal_queue SLAB cache");
50 * Flush all pending signals for a task.
54 flush_signals(struct task_struct
*t
)
56 struct signal_queue
*q
, *n
;
59 sigemptyset(&t
->signal
);
62 t
->sigqueue_tail
= &t
->sigqueue
;
66 kmem_cache_free(signal_queue_cachep
, q
);
67 atomic_dec(&nr_queued_signals
);
73 * Flush all handlers for a task.
77 flush_signal_handlers(struct task_struct
*t
)
80 struct k_sigaction
*ka
= &t
->sig
->action
[0];
81 for (i
= _NSIG
; i
!= 0 ; i
--) {
82 if (ka
->sa
.sa_handler
!= SIG_IGN
)
83 ka
->sa
.sa_handler
= SIG_DFL
;
85 sigemptyset(&ka
->sa
.sa_mask
);
91 * Dequeue a signal and return the element to the caller, which is
92 * expected to free it.
94 * All callers must be holding current->sigmask_lock.
98 dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
100 unsigned long i
, *s
, *m
, x
;
104 printk("SIG dequeue (%s:%d): %d ", current
->comm
, current
->pid
,
105 signal_pending(current
));
108 /* Find the first desired signal that is pending. */
109 s
= current
->signal
.sig
;
111 switch (_NSIG_WORDS
) {
113 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
114 if ((x
= *s
&~ *m
) != 0) {
115 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
120 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
122 else if ((x
= s
[1] &~ m
[1]) != 0)
129 case 1: if ((x
= *s
&~ *m
) != 0)
137 /* Collect the siginfo appropriate to this signal. */
138 struct signal_queue
*q
, **pp
;
139 pp
= ¤t
->sigqueue
;
140 q
= current
->sigqueue
;
142 /* Find the one we're interested in ... */
143 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
144 if (q
->info
.si_signo
== sig
)
147 if ((*pp
= q
->next
) == NULL
)
148 current
->sigqueue_tail
= pp
;
149 copy_siginfo(info
, &q
->info
);
150 kmem_cache_free(signal_queue_cachep
,q
);
151 atomic_dec(&nr_queued_signals
);
153 /* Then see if this signal is still pending.
154 (Non rt signals may not be queued twice.)
157 for (q
= *pp
; q
; q
= q
->next
)
158 if (q
->info
.si_signo
== sig
) {
164 /* Ok, it wasn't in the queue. We must have
165 been out of queue space. So zero out the
167 info
->si_signo
= sig
;
175 sigdelset(¤t
->signal
, sig
);
176 recalc_sigpending(current
);
179 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
180 we need to xchg out the timer overrun values. */
182 /* XXX: Once CLONE_PID is in to join those "threads" that are
183 part of the same "process", look for signals sent to the
184 "process" as well. */
186 /* Sanity check... */
187 if (mask
== ¤t
->blocked
&& signal_pending(current
)) {
188 printk(KERN_CRIT
"SIG: sigpending lied\n");
189 current
->sigpending
= 0;
194 printk(" %d -> %d\n", signal_pending(current
), sig
);
201 * Remove signal sig from queue and from t->signal.
202 * Returns 1 if sig was found in t->signal.
204 * All callers must be holding t->sigmask_lock.
206 static int rm_sig_from_queue(int sig
, struct task_struct
*t
)
208 struct signal_queue
*q
, **pp
;
210 if (sig
>= SIGRTMIN
) {
211 printk(KERN_CRIT
"SIG: rm_sig_from_queue() doesn't support rt signals\n");
215 if (!sigismember(&t
->signal
, sig
))
218 sigdelset(&t
->signal
, sig
);
223 /* Find the one we're interested in ...
224 It may appear only once. */
225 for ( ; q
; pp
= &q
->next
, q
= q
->next
)
226 if (q
->info
.si_signo
== sig
)
229 if ((*pp
= q
->next
) == NULL
)
230 t
->sigqueue_tail
= pp
;
231 kmem_cache_free(signal_queue_cachep
,q
);
232 atomic_dec(&nr_queued_signals
);
238 * Determine whether a signal should be posted or not.
240 * Signals with SIG_IGN can be ignored, except for the
241 * special case of a SIGCHLD.
243 * Some signals with SIG_DFL default to a non-action.
245 static int ignored_signal(int sig
, struct task_struct
*t
)
247 struct signal_struct
*signals
;
248 struct k_sigaction
*ka
;
250 /* Don't ignore traced or blocked signals */
251 if ((t
->ptrace
& PT_PTRACED
) || sigismember(&t
->blocked
, sig
))
258 ka
= &signals
->action
[sig
-1];
259 switch ((unsigned long) ka
->sa
.sa_handler
) {
260 case (unsigned long) SIG_DFL
:
261 if (sig
== SIGCONT
||
268 case (unsigned long) SIG_IGN
:
279 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
283 struct signal_queue
*q
= 0;
287 printk("SIG queue (%s:%d): %d ", t
->comm
, t
->pid
, sig
);
291 if (sig
< 0 || sig
> _NSIG
)
293 /* The somewhat baroque permissions check... */
295 if ((!info
|| ((unsigned long)info
!= 1 && SI_FROMUSER(info
)))
296 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
297 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
298 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
299 && !capable(CAP_KILL
))
302 /* The null signal is a permissions and process existance probe.
303 No signal is actually delivered. Same goes for zombies. */
308 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
310 case SIGKILL
: case SIGCONT
:
311 /* Wake up the process if stopped. */
312 if (t
->state
== TASK_STOPPED
)
315 if (rm_sig_from_queue(SIGSTOP
, t
) || rm_sig_from_queue(SIGTSTP
, t
) ||
316 rm_sig_from_queue(SIGTTOU
, t
) || rm_sig_from_queue(SIGTTIN
, t
))
317 recalc_sigpending(t
);
320 case SIGSTOP
: case SIGTSTP
:
321 case SIGTTIN
: case SIGTTOU
:
322 /* If we're stopping again, cancel SIGCONT */
323 if (rm_sig_from_queue(SIGCONT
, t
))
324 recalc_sigpending(t
);
328 /* Optimize away the signal, if it's a signal that can be
329 handled immediately (ie non-blocked and untraced) and
330 that is ignored (either explicitly or by default). */
332 if (ignored_signal(sig
, t
))
335 /* Support queueing exactly one non-rt signal, so that we
336 can get more detailed information about the cause of
338 if (sig
< SIGRTMIN
&& sigismember(&t
->signal
, sig
))
341 /* Real-time signals must be queued if sent by sigqueue, or
342 some other real-time mechanism. It is implementation
343 defined whether kill() does so. We attempt to do so, on
344 the principle of least surprise, but since kill is not
345 allowed to fail with EAGAIN when low on memory we just
346 make sure at least one signal gets delivered and don't
347 pass on the info struct. */
349 if (atomic_read(&nr_queued_signals
) < max_queued_signals
) {
350 q
= (struct signal_queue
*)
351 kmem_cache_alloc(signal_queue_cachep
, GFP_ATOMIC
);
355 atomic_inc(&nr_queued_signals
);
357 *t
->sigqueue_tail
= q
;
358 t
->sigqueue_tail
= &q
->next
;
359 switch ((unsigned long) info
) {
361 q
->info
.si_signo
= sig
;
362 q
->info
.si_errno
= 0;
363 q
->info
.si_code
= SI_USER
;
364 q
->info
.si_pid
= current
->pid
;
365 q
->info
.si_uid
= current
->uid
;
368 q
->info
.si_signo
= sig
;
369 q
->info
.si_errno
= 0;
370 q
->info
.si_code
= SI_KERNEL
;
375 copy_siginfo(&q
->info
, info
);
378 } else if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
379 && info
->si_code
!= SI_USER
) {
381 * Queue overflow, abort. We may abort if the signal was rt
382 * and sent by user using something other than kill().
388 sigaddset(&t
->signal
, sig
);
389 if (!sigismember(&t
->blocked
, sig
)) {
393 * If the task is running on a different CPU
394 * force a reschedule on the other CPU - note that
395 * the code below is a tad loose and might occasionally
396 * kick the wrong CPU if we catch the process in the
397 * process of changing - but no harm is done by that
398 * other than doing an extra (lightweight) IPI interrupt.
400 * note that we rely on the previous spin_lock to
401 * lock interrupts for us! No need to set need_resched
402 * since signal event passing goes through ->blocked.
404 spin_lock(&runqueue_lock
);
405 if (t
->has_cpu
&& t
->processor
!= smp_processor_id())
406 smp_send_reschedule(t
->processor
);
407 spin_unlock(&runqueue_lock
);
408 #endif /* CONFIG_SMP */
412 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
413 if ((t
->state
& TASK_INTERRUPTIBLE
) && signal_pending(t
))
418 printk(" %d -> %d\n", signal_pending(t
), ret
);
425 * Force a signal that the process can't ignore: if necessary
426 * we unblock the signal and change any SIG_IGN to SIG_DFL.
430 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
432 unsigned long int flags
;
434 spin_lock_irqsave(&t
->sigmask_lock
, flags
);
435 if (t
->sig
== NULL
) {
436 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
440 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
441 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
442 sigdelset(&t
->blocked
, sig
);
443 recalc_sigpending(t
);
444 spin_unlock_irqrestore(&t
->sigmask_lock
, flags
);
446 return send_sig_info(sig
, info
, t
);
450 * kill_pg_info() sends a signal to a process group: this is what the tty
451 * control characters do (^C, ^Z etc)
455 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
457 int retval
= -EINVAL
;
459 struct task_struct
*p
;
463 read_lock(&tasklist_lock
);
465 if (p
->pgrp
== pgrp
) {
466 int err
= send_sig_info(sig
, info
, p
);
473 read_unlock(&tasklist_lock
);
481 * kill_sl_info() sends a signal to the session leader: this is used
482 * to send SIGHUP to the controlling process of a terminal when
483 * the connection is lost.
487 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sess
)
489 int retval
= -EINVAL
;
491 struct task_struct
*p
;
495 read_lock(&tasklist_lock
);
497 if (p
->leader
&& p
->session
== sess
) {
498 int err
= send_sig_info(sig
, info
, p
);
505 read_unlock(&tasklist_lock
);
513 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
516 struct task_struct
*p
;
518 read_lock(&tasklist_lock
);
519 p
= find_task_by_pid(pid
);
522 error
= send_sig_info(sig
, info
, p
);
523 read_unlock(&tasklist_lock
);
528 * kill_something_info() interprets pid in interesting ways just like kill(2).
530 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
531 * is probably wrong. Should make it like BSD or SYSV.
535 kill_something_info(int sig
, struct siginfo
*info
, int pid
)
538 return kill_pg_info(sig
, info
, current
->pgrp
);
539 } else if (pid
== -1) {
540 int retval
= 0, count
= 0;
541 struct task_struct
* p
;
543 read_lock(&tasklist_lock
);
545 if (p
->pid
> 1 && p
!= current
) {
546 int err
= send_sig_info(sig
, info
, p
);
552 read_unlock(&tasklist_lock
);
553 return count
? retval
: -ESRCH
;
554 } else if (pid
< 0) {
555 return kill_pg_info(sig
, info
, -pid
);
557 return kill_proc_info(sig
, info
, pid
);
562 * These are for backward compatibility with the rest of the kernel source.
566 send_sig(int sig
, struct task_struct
*p
, int priv
)
568 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
572 force_sig(int sig
, struct task_struct
*p
)
574 force_sig_info(sig
, (void*)1L, p
);
578 kill_pg(pid_t pgrp
, int sig
, int priv
)
580 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
584 kill_sl(pid_t sess
, int sig
, int priv
)
586 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
590 kill_proc(pid_t pid
, int sig
, int priv
)
592 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
596 * Let a parent know about a status change of a child.
600 notify_parent(struct task_struct
*tsk
, int sig
)
607 info
.si_pid
= tsk
->pid
;
608 info
.si_uid
= tsk
->uid
;
610 /* FIXME: find out whether or not this is supposed to be c*time. */
611 info
.si_utime
= hz_to_std(tsk
->times
.tms_utime
);
612 info
.si_stime
= hz_to_std(tsk
->times
.tms_stime
);
614 status
= tsk
->exit_code
& 0x7f;
615 why
= SI_KERNEL
; /* shouldn't happen */
616 switch (tsk
->state
) {
618 if (tsk
->exit_code
& 0x80)
620 else if (tsk
->exit_code
& 0x7f)
624 status
= tsk
->exit_code
>> 8;
628 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
629 if (tsk
->ptrace
& PT_PTRACED
)
636 printk(KERN_DEBUG
"eh? notify_parent with state %ld?\n",
641 info
.si_status
= status
;
643 send_sig_info(sig
, &info
, tsk
->p_pptr
);
644 wake_up_interruptible(&tsk
->p_pptr
->wait_chldexit
);
647 EXPORT_SYMBOL(dequeue_signal
);
648 EXPORT_SYMBOL(flush_signals
);
649 EXPORT_SYMBOL(force_sig
);
650 EXPORT_SYMBOL(force_sig_info
);
651 EXPORT_SYMBOL(kill_pg
);
652 EXPORT_SYMBOL(kill_pg_info
);
653 EXPORT_SYMBOL(kill_proc
);
654 EXPORT_SYMBOL(kill_proc_info
);
655 EXPORT_SYMBOL(kill_sl
);
656 EXPORT_SYMBOL(kill_sl_info
);
657 EXPORT_SYMBOL(notify_parent
);
658 EXPORT_SYMBOL(recalc_sigpending
);
659 EXPORT_SYMBOL(send_sig
);
660 EXPORT_SYMBOL(send_sig_info
);
664 * System call entry points.
668 * We don't need to get the kernel lock - this is all local to this
669 * particular thread.. (and that's good, because this is _heavily_
670 * used by various programs)
674 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
677 sigset_t old_set
, new_set
;
679 /* XXX: Don't preclude handling different sized sigset_t's. */
680 if (sigsetsize
!= sizeof(sigset_t
))
685 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
687 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
689 spin_lock_irq(¤t
->sigmask_lock
);
690 old_set
= current
->blocked
;
698 sigorsets(&new_set
, &old_set
, &new_set
);
701 signandsets(&new_set
, &old_set
, &new_set
);
707 current
->blocked
= new_set
;
708 recalc_sigpending(current
);
709 spin_unlock_irq(¤t
->sigmask_lock
);
715 spin_lock_irq(¤t
->sigmask_lock
);
716 old_set
= current
->blocked
;
717 spin_unlock_irq(¤t
->sigmask_lock
);
721 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
730 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
735 /* XXX: Don't preclude handling different sized sigset_t's. */
736 if (sigsetsize
!= sizeof(sigset_t
))
739 spin_lock_irq(¤t
->sigmask_lock
);
740 sigandsets(&pending
, ¤t
->blocked
, ¤t
->signal
);
741 spin_unlock_irq(¤t
->sigmask_lock
);
744 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
751 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
752 const struct timespec
*uts
, size_t sigsetsize
)
760 /* XXX: Don't preclude handling different sized sigset_t's. */
761 if (sigsetsize
!= sizeof(sigset_t
))
764 if (copy_from_user(&these
, uthese
, sizeof(these
)))
768 * Invert the set of allowed signals to get those we
771 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
775 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
777 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
782 spin_lock_irq(¤t
->sigmask_lock
);
783 sig
= dequeue_signal(&these
, &info
);
785 /* None ready -- temporarily unblock those we're interested
786 in so that we'll be awakened when they arrive. */
787 sigset_t oldblocked
= current
->blocked
;
788 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
789 recalc_sigpending(current
);
790 spin_unlock_irq(¤t
->sigmask_lock
);
792 timeout
= MAX_SCHEDULE_TIMEOUT
;
794 timeout
= (timespec_to_jiffies(&ts
)
795 + (ts
.tv_sec
|| ts
.tv_nsec
));
797 current
->state
= TASK_INTERRUPTIBLE
;
798 timeout
= schedule_timeout(timeout
);
800 spin_lock_irq(¤t
->sigmask_lock
);
801 sig
= dequeue_signal(&these
, &info
);
802 current
->blocked
= oldblocked
;
803 recalc_sigpending(current
);
805 spin_unlock_irq(¤t
->sigmask_lock
);
810 if (copy_siginfo_to_user(uinfo
, &info
))
823 sys_kill(int pid
, int sig
)
829 info
.si_code
= SI_USER
;
830 info
.si_pid
= current
->pid
;
831 info
.si_uid
= current
->uid
;
833 return kill_something_info(sig
, &info
, pid
);
837 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
841 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
844 /* Not even root can pretend to send signals from the kernel.
845 Nor can they impersonate a kill(), which adds source info. */
846 if (info
.si_code
>= 0)
850 /* POSIX.1b doesn't mention process groups. */
851 return kill_proc_info(sig
, &info
, pid
);
855 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
857 struct k_sigaction
*k
;
859 if (sig
< 1 || sig
> _NSIG
||
860 (act
&& (sig
== SIGKILL
|| sig
== SIGSTOP
)))
863 spin_lock_irq(¤t
->sigmask_lock
);
864 k
= ¤t
->sig
->action
[sig
-1];
866 if (oact
) *oact
= *k
;
870 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
874 * "Setting a signal action to SIG_IGN for a signal that is
875 * pending shall cause the pending signal to be discarded,
876 * whether or not it is blocked."
878 * "Setting a signal action to SIG_DFL for a signal that is
879 * pending and whose default action is to ignore the signal
880 * (for example, SIGCHLD), shall cause the pending signal to
881 * be discarded, whether or not it is blocked"
883 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
884 * signal isn't actually ignored, but does automatic child
885 * reaping, while SIG_DFL is explicitly said by POSIX to force
886 * the signal to be ignored.
889 if (k
->sa
.sa_handler
== SIG_IGN
890 || (k
->sa
.sa_handler
== SIG_DFL
891 && (sig
== SIGCONT
||
894 /* So dequeue any that might be pending.
895 XXX: process-wide signals? */
896 if (sig
>= SIGRTMIN
&&
897 sigismember(¤t
->signal
, sig
)) {
898 struct signal_queue
*q
, **pp
;
899 pp
= ¤t
->sigqueue
;
900 q
= current
->sigqueue
;
902 if (q
->info
.si_signo
!= sig
)
905 if ((*pp
= q
->next
) == NULL
)
906 current
->sigqueue_tail
= pp
;
907 kmem_cache_free(signal_queue_cachep
, q
);
908 atomic_dec(&nr_queued_signals
);
914 sigdelset(¤t
->signal
, sig
);
915 recalc_sigpending(current
);
919 spin_unlock_irq(¤t
->sigmask_lock
);
925 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
931 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
932 oss
.ss_size
= current
->sas_ss_size
;
933 oss
.ss_flags
= sas_ss_flags(sp
);
942 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
943 || __get_user(ss_sp
, &uss
->ss_sp
)
944 || __get_user(ss_flags
, &uss
->ss_flags
)
945 || __get_user(ss_size
, &uss
->ss_size
))
949 if (on_sig_stack (sp
))
955 * Note - this code used to test ss_flags incorrectly
956 * old code may have been written using ss_flags==0
957 * to mean ss_flags==SS_ONSTACK (as this was the only
958 * way that worked) - this fix preserves that older
961 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
964 if (ss_flags
== SS_DISABLE
) {
969 if (ss_size
< MINSIGSTKSZ
)
973 current
->sas_ss_sp
= (unsigned long) ss_sp
;
974 current
->sas_ss_size
= ss_size
;
979 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
988 #if !defined(__alpha__)
989 /* Alpha has its own versions with special arguments. */
992 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
995 old_sigset_t old_set
, new_set
;
999 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1001 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1003 spin_lock_irq(¤t
->sigmask_lock
);
1004 old_set
= current
->blocked
.sig
[0];
1012 sigaddsetmask(¤t
->blocked
, new_set
);
1015 sigdelsetmask(¤t
->blocked
, new_set
);
1018 current
->blocked
.sig
[0] = new_set
;
1022 recalc_sigpending(current
);
1023 spin_unlock_irq(¤t
->sigmask_lock
);
1029 old_set
= current
->blocked
.sig
[0];
1032 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1041 sys_sigpending(old_sigset_t
*set
)
1044 old_sigset_t pending
;
1046 spin_lock_irq(¤t
->sigmask_lock
);
1047 pending
= current
->blocked
.sig
[0] & current
->signal
.sig
[0];
1048 spin_unlock_irq(¤t
->sigmask_lock
);
1051 if (!copy_to_user(set
, &pending
, sizeof(*set
)))
1058 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1061 struct k_sigaction new_sa
, old_sa
;
1064 /* XXX: Don't preclude handling different sized sigset_t's. */
1065 if (sigsetsize
!= sizeof(sigset_t
))
1069 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1073 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1076 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1082 #endif /* __sparc__ */
1085 #if !defined(__alpha__) && !defined(__ia64__)
1087 * For backwards compatibility. Functionality superseded by sigprocmask.
1093 return current
->blocked
.sig
[0];
1097 sys_ssetmask(int newmask
)
1101 spin_lock_irq(¤t
->sigmask_lock
);
1102 old
= current
->blocked
.sig
[0];
1104 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1106 recalc_sigpending(current
);
1107 spin_unlock_irq(¤t
->sigmask_lock
);
1111 #endif /* !defined(__alpha__) */
1113 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1115 * For backwards compatibility. Functionality superseded by sigaction.
1117 asmlinkage
unsigned long
1118 sys_signal(int sig
, __sighandler_t handler
)
1120 struct k_sigaction new_sa
, old_sa
;
1123 new_sa
.sa
.sa_handler
= handler
;
1124 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1126 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1128 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1130 #endif /* !alpha && !__ia64__ && !defined(__mips__) */