2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #define __KERNEL_SYSCALLS__
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/unistd.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <asm/param.h>
22 #include <asm/uaccess.h>
23 #include <asm/siginfo.h>
26 * SLAB caches for signal bits.
29 static kmem_cache_t
*sigqueue_cachep
;
31 atomic_t nr_queued_signals
;
32 int max_queued_signals
= 1024;
34 /*********************************************************
36 POSIX thread group signal behavior:
38 ----------------------------------------------------------
39 | | userspace | kernel |
40 ----------------------------------------------------------
41 | SIGHUP | load-balance | kill-all |
42 | SIGINT | load-balance | kill-all |
43 | SIGQUIT | load-balance | kill-all+core |
44 | SIGILL | specific | kill-all+core |
45 | SIGTRAP | specific | kill-all+core |
46 | SIGABRT/SIGIOT | specific | kill-all+core |
47 | SIGBUS | specific | kill-all+core |
48 | SIGFPE | specific | kill-all+core |
49 | SIGKILL | n/a | kill-all |
50 | SIGUSR1 | load-balance | kill-all |
51 | SIGSEGV | specific | kill-all+core |
52 | SIGUSR2 | load-balance | kill-all |
53 | SIGPIPE | specific | kill-all |
54 | SIGALRM | load-balance | kill-all |
55 | SIGTERM | load-balance | kill-all |
56 | SIGCHLD | load-balance | ignore |
57 | SIGCONT | specific | continue-all |
58 | SIGSTOP | n/a | stop-all |
59 | SIGTSTP | load-balance | stop-all |
60 | SIGTTIN | load-balance | stop-all |
61 | SIGTTOU | load-balance | stop-all |
62 | SIGURG | load-balance | ignore |
63 | SIGXCPU | specific | kill-all+core |
64 | SIGXFSZ | specific | kill-all+core |
65 | SIGVTALRM | load-balance | kill-all |
66 | SIGPROF | specific | kill-all |
67 | SIGPOLL/SIGIO | load-balance | kill-all |
68 | SIGSYS/SIGUNUSED | specific | kill-all+core |
69 | SIGSTKFLT | specific | kill-all |
70 | SIGWINCH | load-balance | ignore |
71 | SIGPWR | load-balance | kill-all |
72 | SIGRTMIN-SIGRTMAX | load-balance | kill-all |
73 ----------------------------------------------------------
75 non-POSIX signal thread group behavior:
77 ----------------------------------------------------------
78 | | userspace | kernel |
79 ----------------------------------------------------------
80 | SIGEMT | specific | kill-all+core |
81 ----------------------------------------------------------
84 /* Some systems do not have a SIGSTKFLT and the kernel never
85 * generates such signals anyways.
88 #define M_SIGSTKFLT M(SIGSTKFLT)
94 #define M_SIGEMT M(SIGEMT)
99 #define M(sig) (1UL << (sig))
101 #define SIG_USER_SPECIFIC_MASK (\
102 M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | \
103 M(SIGFPE) | M(SIGSEGV) | M(SIGPIPE) | M(SIGXFSZ) | \
104 M(SIGPROF) | M(SIGSYS) | M_SIGSTKFLT | M(SIGCONT) | \
107 #define SIG_USER_LOAD_BALANCE_MASK (\
108 M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGUSR1) | \
109 M(SIGUSR2) | M(SIGALRM) | M(SIGTERM) | M(SIGCHLD) | \
110 M(SIGURG) | M(SIGVTALRM) | M(SIGPOLL) | M(SIGWINCH) | \
111 M(SIGPWR) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
113 #define SIG_KERNEL_SPECIFIC_MASK (\
114 M(SIGCHLD) | M(SIGURG) | M(SIGWINCH) )
116 #define SIG_KERNEL_BROADCAST_MASK (\
117 M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGILL) | \
118 M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | M(SIGFPE) | \
119 M(SIGKILL) | M(SIGUSR1) | M(SIGSEGV) | M(SIGUSR2) | \
120 M(SIGPIPE) | M(SIGALRM) | M(SIGTERM) | M(SIGXCPU) | \
121 M(SIGXFSZ) | M(SIGVTALRM) | M(SIGPROF) | M(SIGPOLL) | \
122 M(SIGSYS) | M_SIGSTKFLT | M(SIGPWR) | M(SIGCONT) | \
123 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) | \
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_COREDUMP_MASK (\
130 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
131 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
132 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
134 #define T(sig, mask) \
135 ((1UL << (sig)) & mask)
137 #define sig_user_specific(sig) \
138 (((sig) < SIGRTMIN) && T(sig, SIG_USER_SPECIFIC_MASK))
139 #define sig_user_load_balance(sig) \
140 (((sig) >= SIGRTMIN) || T(sig, SIG_USER_LOAD_BALANCE_MASK))
141 #define sig_kernel_specific(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_SPECIFIC_MASK))
143 #define sig_kernel_broadcast(sig) \
144 (((sig) >= SIGRTMIN) || T(sig, SIG_KERNEL_BROADCAST_MASK))
145 #define sig_kernel_only(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
147 #define sig_kernel_coredump(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
150 #define sig_user_defined(t, sig) \
151 (((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \
152 ((t)->sig->action[(sig)-1].sa.sa_handler != SIG_IGN))
154 #define sig_ignored(t, sig) \
155 (((sig) != SIGCHLD) && \
156 ((t)->sig->action[(sig)-1].sa.sa_handler == SIG_IGN))
159 __send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
);
161 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
163 void recalc_sigpending_tsk(struct task_struct
*t
)
165 if (PENDING(&t
->pending
, &t
->blocked
) ||
166 PENDING(&t
->sig
->shared_pending
, &t
->blocked
))
167 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
169 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
172 void recalc_sigpending(void)
174 if (PENDING(¤t
->pending
, ¤t
->blocked
) ||
175 PENDING(¤t
->sig
->shared_pending
, ¤t
->blocked
))
176 set_thread_flag(TIF_SIGPENDING
);
178 clear_thread_flag(TIF_SIGPENDING
);
181 /* Given the mask, find the first available signal that should be serviced. */
184 next_signal(struct sigpending
*pending
, sigset_t
*mask
)
186 unsigned long i
, *s
, *m
, x
;
189 s
= pending
->signal
.sig
;
191 switch (_NSIG_WORDS
) {
193 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
194 if ((x
= *s
&~ *m
) != 0) {
195 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
200 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
202 else if ((x
= s
[1] &~ m
[1]) != 0)
209 case 1: if ((x
= *s
&~ *m
) != 0)
217 static void flush_sigqueue(struct sigpending
*queue
)
219 struct sigqueue
*q
, *n
;
221 sigemptyset(&queue
->signal
);
224 queue
->tail
= &queue
->head
;
228 kmem_cache_free(sigqueue_cachep
, q
);
229 atomic_dec(&nr_queued_signals
);
235 * Flush all pending signals for a task.
239 flush_signals(struct task_struct
*t
)
241 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
242 flush_sigqueue(&t
->pending
);
246 * This function expects the tasklist_lock write-locked.
248 void __exit_sighand(struct task_struct
*tsk
)
250 struct signal_struct
* sig
= tsk
->sig
;
254 if (!atomic_read(&sig
->count
))
256 spin_lock(&sig
->siglock
);
257 if (atomic_dec_and_test(&sig
->count
)) {
258 if (tsk
== sig
->curr_target
)
259 sig
->curr_target
= next_thread(tsk
);
261 spin_unlock(&sig
->siglock
);
262 flush_sigqueue(&sig
->shared_pending
);
263 kmem_cache_free(sigact_cachep
, sig
);
266 * If there is any task waiting for the group exit
269 if (sig
->group_exit_task
&& atomic_read(&sig
->count
) <= 2) {
270 wake_up_process(sig
->group_exit_task
);
271 sig
->group_exit_task
= NULL
;
273 if (tsk
== sig
->curr_target
)
274 sig
->curr_target
= next_thread(tsk
);
276 spin_unlock(&sig
->siglock
);
278 clear_tsk_thread_flag(tsk
,TIF_SIGPENDING
);
279 flush_sigqueue(&tsk
->pending
);
282 void exit_sighand(struct task_struct
*tsk
)
284 write_lock_irq(&tasklist_lock
);
286 write_unlock_irq(&tasklist_lock
);
290 * Flush all handlers for a task.
294 flush_signal_handlers(struct task_struct
*t
)
297 struct k_sigaction
*ka
= &t
->sig
->action
[0];
298 for (i
= _NSIG
; i
!= 0 ; i
--) {
299 if (ka
->sa
.sa_handler
!= SIG_IGN
)
300 ka
->sa
.sa_handler
= SIG_DFL
;
302 sigemptyset(&ka
->sa
.sa_mask
);
308 * sig_exit - cause the current task to exit due to a signal.
312 sig_exit(int sig
, int exit_code
, struct siginfo
*info
)
314 sigaddset(¤t
->pending
.signal
, sig
);
316 current
->flags
|= PF_SIGNALED
;
318 if (current
->sig
->group_exit
)
319 exit_code
= current
->sig
->group_exit_code
;
325 /* Notify the system that a driver wants to block all signals for this
326 * process, and wants to be notified if any signals at all were to be
327 * sent/acted upon. If the notifier routine returns non-zero, then the
328 * signal will be acted upon after all. If the notifier routine returns 0,
329 * then then signal will be blocked. Only one block per process is
330 * allowed. priv is a pointer to private data that the notifier routine
331 * can use to determine if the signal should be blocked or not. */
334 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
338 spin_lock_irqsave(¤t
->sig
->siglock
, flags
);
339 current
->notifier_mask
= mask
;
340 current
->notifier_data
= priv
;
341 current
->notifier
= notifier
;
342 spin_unlock_irqrestore(¤t
->sig
->siglock
, flags
);
345 /* Notify the system that blocking has ended. */
348 unblock_all_signals(void)
352 spin_lock_irqsave(¤t
->sig
->siglock
, flags
);
353 current
->notifier
= NULL
;
354 current
->notifier_data
= NULL
;
356 spin_unlock_irqrestore(¤t
->sig
->siglock
, flags
);
359 static inline int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
361 if (sigismember(&list
->signal
, sig
)) {
362 /* Collect the siginfo appropriate to this signal. */
363 struct sigqueue
*q
, **pp
;
365 while ((q
= *pp
) != NULL
) {
366 if (q
->info
.si_signo
== sig
)
371 /* Ok, it wasn't in the queue. This must be
372 a fast-pathed signal or we must have been
373 out of queue space. So zero out the info.
375 sigdelset(&list
->signal
, sig
);
376 info
->si_signo
= sig
;
384 if ((*pp
= q
->next
) == NULL
)
387 /* Copy the sigqueue information and free the queue entry */
388 copy_siginfo(info
, &q
->info
);
389 kmem_cache_free(sigqueue_cachep
,q
);
390 atomic_dec(&nr_queued_signals
);
392 /* Non-RT signals can exist multiple times.. */
393 if (sig
>= SIGRTMIN
) {
394 while ((q
= *pp
) != NULL
) {
395 if (q
->info
.si_signo
== sig
)
401 sigdelset(&list
->signal
, sig
);
408 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
413 sig
= next_signal(pending
, mask
);
415 if (current
->notifier
) {
416 if (sigismember(current
->notifier_mask
, sig
)) {
417 if (!(current
->notifier
)(current
->notifier_data
)) {
418 clear_thread_flag(TIF_SIGPENDING
);
424 if (!collect_signal(sig
, pending
, info
))
427 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
428 we need to xchg out the timer overrun values. */
436 * Dequeue a signal and return the element to the caller, which is
437 * expected to free it.
439 * All callers have to hold the siglock.
441 int dequeue_signal(sigset_t
*mask
, siginfo_t
*info
)
444 * Here we handle shared pending signals. To implement the full
445 * semantics we need to unqueue and resend them. It will likely
446 * get into our own pending queue.
448 if (current
->sig
->shared_pending
.head
) {
449 int signr
= __dequeue_signal(¤t
->sig
->shared_pending
, mask
, info
);
451 __send_sig_info(signr
, info
, current
);
453 return __dequeue_signal(¤t
->pending
, mask
, info
);
456 static int rm_from_queue(int sig
, struct sigpending
*s
)
458 struct sigqueue
*q
, **pp
;
460 if (!sigismember(&s
->signal
, sig
))
463 sigdelset(&s
->signal
, sig
);
467 while ((q
= *pp
) != NULL
) {
468 if (q
->info
.si_signo
== sig
) {
469 if ((*pp
= q
->next
) == NULL
)
471 kmem_cache_free(sigqueue_cachep
,q
);
472 atomic_dec(&nr_queued_signals
);
481 * Remove signal sig from t->pending.
482 * Returns 1 if sig was found.
484 * All callers must be holding the siglock.
486 static int rm_sig_from_queue(int sig
, struct task_struct
*t
)
488 return rm_from_queue(sig
, &t
->pending
);
492 * Bad permissions for sending the signal
494 static inline int bad_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
496 return (!info
|| ((unsigned long)info
!= 1 &&
497 (unsigned long)info
!= 2 && SI_FROMUSER(info
)))
498 && ((sig
!= SIGCONT
) || (current
->session
!= t
->session
))
499 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
500 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
501 && !capable(CAP_KILL
);
506 * < 0 : global action (kill - spread to all non-blocked threads)
510 static int signal_type(int sig
, struct signal_struct
*signals
)
512 unsigned long handler
;
517 handler
= (unsigned long) signals
->action
[sig
-1].sa
.sa_handler
;
521 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
523 return sig
== SIGCHLD
;
525 /* Default handler. Normally lethal, but.. */
529 case SIGCONT
: case SIGWINCH
:
530 case SIGCHLD
: case SIGURG
:
533 /* Implicit behaviour */
534 case SIGTSTP
: case SIGTTIN
: case SIGTTOU
:
537 /* Implicit actions (kill or do special stuff) */
545 * Determine whether a signal should be posted or not.
547 * Signals with SIG_IGN can be ignored, except for the
548 * special case of a SIGCHLD.
550 * Some signals with SIG_DFL default to a non-action.
552 static int ignored_signal(int sig
, struct task_struct
*t
)
554 /* Don't ignore traced or blocked signals */
555 if ((t
->ptrace
& PT_PTRACED
) || sigismember(&t
->blocked
, sig
))
558 return signal_type(sig
, t
->sig
) == 0;
562 * Handle TASK_STOPPED cases etc implicit behaviour
563 * of certain magical signals.
565 * SIGKILL gets spread out to every thread.
567 static void handle_stop_signal(int sig
, struct task_struct
*t
)
570 case SIGKILL
: case SIGCONT
:
571 /* Wake up the process if stopped. */
572 if (t
->state
== TASK_STOPPED
)
575 rm_sig_from_queue(SIGSTOP
, t
);
576 rm_sig_from_queue(SIGTSTP
, t
);
577 rm_sig_from_queue(SIGTTOU
, t
);
578 rm_sig_from_queue(SIGTTIN
, t
);
581 case SIGSTOP
: case SIGTSTP
:
582 case SIGTTIN
: case SIGTTOU
:
583 /* If we're stopping again, cancel SIGCONT */
584 rm_sig_from_queue(SIGCONT
, t
);
589 static int send_signal(int sig
, struct siginfo
*info
, struct sigpending
*signals
)
591 struct sigqueue
* q
= NULL
;
594 * fast-pathed signals for kernel-internal things like SIGSTOP
597 if ((unsigned long)info
== 2)
600 /* Real-time signals must be queued if sent by sigqueue, or
601 some other real-time mechanism. It is implementation
602 defined whether kill() does so. We attempt to do so, on
603 the principle of least surprise, but since kill is not
604 allowed to fail with EAGAIN when low on memory we just
605 make sure at least one signal gets delivered and don't
606 pass on the info struct. */
608 if (atomic_read(&nr_queued_signals
) < max_queued_signals
)
609 q
= kmem_cache_alloc(sigqueue_cachep
, GFP_ATOMIC
);
612 atomic_inc(&nr_queued_signals
);
615 signals
->tail
= &q
->next
;
616 switch ((unsigned long) info
) {
618 q
->info
.si_signo
= sig
;
619 q
->info
.si_errno
= 0;
620 q
->info
.si_code
= SI_USER
;
621 q
->info
.si_pid
= current
->pid
;
622 q
->info
.si_uid
= current
->uid
;
625 q
->info
.si_signo
= sig
;
626 q
->info
.si_errno
= 0;
627 q
->info
.si_code
= SI_KERNEL
;
632 copy_siginfo(&q
->info
, info
);
635 } else if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
636 && info
->si_code
!= SI_USER
)
638 * Queue overflow, abort. We may abort if the signal was rt
639 * and sent by user using something other than kill().
644 sigaddset(&signals
->signal
, sig
);
649 * Tell a process that it has a new active signal..
651 * NOTE! we rely on the previous spin_lock to
652 * lock interrupts for us! We can only be called with
653 * "siglock" held, and the local interrupt must
654 * have been disabled when that got acquired!
656 * No need to set need_resched since signal event passing
657 * goes through ->blocked
659 inline void signal_wake_up(struct task_struct
*t
)
661 set_tsk_thread_flag(t
,TIF_SIGPENDING
);
664 * If the task is running on a different CPU
665 * force a reschedule on the other CPU to make
666 * it notice the new signal quickly.
668 * The code below is a tad loose and might occasionally
669 * kick the wrong CPU if we catch the process in the
670 * process of changing - but no harm is done by that
671 * other than doing an extra (lightweight) IPI interrupt.
673 if (t
->state
== TASK_RUNNING
)
675 if (t
->state
& TASK_INTERRUPTIBLE
) {
681 static int deliver_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
)
683 int retval
= send_signal(sig
, info
, &t
->pending
);
685 if (!retval
&& !sigismember(&t
->blocked
, sig
))
692 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
, int shared
)
696 if (!irqs_disabled())
699 if (!spin_is_locked(&t
->sig
->siglock
))
703 if (sig
< 0 || sig
> _NSIG
)
705 /* The somewhat baroque permissions check... */
707 if (bad_signal(sig
, info
, t
))
709 ret
= security_ops
->task_kill(t
, info
, sig
);
713 /* The null signal is a permissions and process existence probe.
714 No signal is actually delivered. Same goes for zombies. */
719 handle_stop_signal(sig
, t
);
721 /* Optimize away the signal, if it's a signal that can be
722 handled immediately (ie non-blocked and untraced) and
723 that is ignored (either explicitly or by default). */
725 if (ignored_signal(sig
, t
))
728 #define LEGACY_QUEUE(sigptr, sig) \
729 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
732 /* Support queueing exactly one non-rt signal, so that we
733 can get more detailed information about the cause of
735 if (LEGACY_QUEUE(&t
->pending
, sig
))
738 ret
= deliver_signal(sig
, info
, t
);
740 if (LEGACY_QUEUE(&t
->sig
->shared_pending
, sig
))
742 ret
= send_signal(sig
, info
, &t
->sig
->shared_pending
);
749 * Force a signal that the process can't ignore: if necessary
750 * we unblock the signal and change any SIG_IGN to SIG_DFL.
754 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
756 unsigned long int flags
;
759 spin_lock_irqsave(&t
->sig
->siglock
, flags
);
760 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
761 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
762 sigdelset(&t
->blocked
, sig
);
763 recalc_sigpending_tsk(t
);
764 ret
= __send_sig_info(sig
, info
, t
);
765 spin_unlock_irqrestore(&t
->sig
->siglock
, flags
);
771 specific_force_sig_info(int sig
, struct task_struct
*t
)
776 if (t
->sig
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
777 t
->sig
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
778 sigdelset(&t
->blocked
, sig
);
779 recalc_sigpending_tsk(t
);
781 return specific_send_sig_info(sig
, (void *)2, t
, 0);
784 #define can_take_signal(p, sig) \
785 (((unsigned long) p->sig->action[sig-1].sa.sa_handler > 1) && \
786 !sigismember(&p->blocked, sig) && (task_curr(p) || !signal_pending(p)))
789 int load_balance_thread_group(struct task_struct
*p
, int sig
,
790 struct siginfo
*info
)
792 struct task_struct
*tmp
;
796 * if the specified thread is not blocking this signal
799 if (can_take_signal(p
, sig
))
800 return specific_send_sig_info(sig
, info
, p
, 0);
803 * Otherwise try to find a suitable thread.
804 * If no such thread is found then deliver to
805 * the original thread.
808 tmp
= p
->sig
->curr_target
;
810 if (!tmp
|| tmp
->tgid
!= p
->tgid
)
811 /* restart balancing at this thread */
812 p
->sig
->curr_target
= p
;
815 if (thread_group_empty(p
))
817 if (!tmp
|| tmp
->tgid
!= p
->tgid
)
821 * Do not send signals that are ignored or blocked,
822 * or to not-running threads that are overworked:
824 if (!can_take_signal(tmp
, sig
)) {
825 tmp
= next_thread(tmp
);
826 p
->sig
->curr_target
= tmp
;
831 ret
= specific_send_sig_info(sig
, info
, tmp
, 0);
835 * No suitable thread was found - put the signal
836 * into the shared-pending queue.
838 return specific_send_sig_info(sig
, info
, p
, 1);
841 int __broadcast_thread_group(struct task_struct
*p
, int sig
)
843 struct task_struct
*tmp
;
848 for_each_task_pid(p
->tgid
, PIDTYPE_TGID
, tmp
, l
, pid
)
849 err
= specific_force_sig_info(sig
, tmp
);
854 struct task_struct
* find_unblocked_thread(struct task_struct
*p
, int signr
)
856 struct task_struct
*tmp
;
860 for_each_task_pid(p
->tgid
, PIDTYPE_TGID
, tmp
, l
, pid
)
861 if (!sigismember(&tmp
->blocked
, signr
))
867 __send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
869 struct task_struct
*t
;
873 if (!spin_is_locked(&p
->sig
->siglock
))
876 /* not a thread group - normal signal behavior */
877 if (thread_group_empty(p
) || !sig
)
880 if (sig_user_defined(p
, sig
)) {
881 if (sig_user_specific(sig
))
883 if (sig_user_load_balance(sig
)) {
884 ret
= load_balance_thread_group(p
, sig
, info
);
888 /* must not happen */
891 /* optimize away ignored signals: */
892 if (sig_ignored(p
, sig
))
895 if (sig_kernel_specific(sig
))
898 /* Does any of the threads unblock the signal? */
899 t
= find_unblocked_thread(p
, sig
);
901 ret
= specific_send_sig_info(sig
, info
, p
, 1);
904 if (sigismember(&t
->real_blocked
,sig
)) {
905 ret
= specific_send_sig_info(sig
, info
, t
, 0);
908 if (sig_kernel_broadcast(sig
) || sig_kernel_coredump(sig
)) {
909 ret
= __broadcast_thread_group(p
, sig
);
913 /* must not happen */
916 ret
= specific_send_sig_info(sig
, info
, p
, 0);
922 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
927 spin_lock_irqsave(&p
->sig
->siglock
, flags
);
928 ret
= __send_sig_info(sig
, info
, p
);
929 spin_unlock_irqrestore(&p
->sig
->siglock
, flags
);
935 * kill_pg_info() sends a signal to a process group: this is what the tty
936 * control characters do (^C, ^Z etc)
939 int __kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
941 struct task_struct
*p
;
944 int err
, retval
= -ESRCH
;
949 for_each_task_pid(pgrp
, PIDTYPE_PGID
, p
, l
, pid
) {
950 err
= send_sig_info(sig
, info
, p
);
958 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
962 read_lock(&tasklist_lock
);
963 retval
= __kill_pg_info(sig
, info
, pgrp
);
964 read_unlock(&tasklist_lock
);
970 * kill_sl_info() sends a signal to the session leader: this is used
971 * to send SIGHUP to the controlling process of a terminal when
972 * the connection is lost.
977 kill_sl_info(int sig
, struct siginfo
*info
, pid_t sid
)
979 int err
, retval
= -EINVAL
;
982 struct task_struct
*p
;
988 read_lock(&tasklist_lock
);
989 for_each_task_pid(sid
, PIDTYPE_SID
, p
, l
, pid
) {
992 err
= send_sig_info(sig
, info
, p
);
996 read_unlock(&tasklist_lock
);
1002 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1005 struct task_struct
*p
;
1007 read_lock(&tasklist_lock
);
1008 p
= find_task_by_pid(pid
);
1011 error
= send_sig_info(sig
, info
, p
);
1012 read_unlock(&tasklist_lock
);
1018 * kill_something_info() interprets pid in interesting ways just like kill(2).
1020 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1021 * is probably wrong. Should make it like BSD or SYSV.
1024 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1027 return kill_pg_info(sig
, info
, current
->pgrp
);
1028 } else if (pid
== -1) {
1029 int retval
= 0, count
= 0;
1030 struct task_struct
* p
;
1032 read_lock(&tasklist_lock
);
1033 for_each_process(p
) {
1034 if (p
->pid
> 1 && p
!= current
) {
1035 int err
= send_sig_info(sig
, info
, p
);
1041 read_unlock(&tasklist_lock
);
1042 return count
? retval
: -ESRCH
;
1043 } else if (pid
< 0) {
1044 return kill_pg_info(sig
, info
, -pid
);
1046 return kill_proc_info(sig
, info
, pid
);
1051 * These are for backward compatibility with the rest of the kernel source.
1055 send_sig(int sig
, struct task_struct
*p
, int priv
)
1057 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
1061 force_sig(int sig
, struct task_struct
*p
)
1063 force_sig_info(sig
, (void*)1L, p
);
1067 kill_pg(pid_t pgrp
, int sig
, int priv
)
1069 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
1073 kill_sl(pid_t sess
, int sig
, int priv
)
1075 return kill_sl_info(sig
, (void *)(long)(priv
!= 0), sess
);
1079 kill_proc(pid_t pid
, int sig
, int priv
)
1081 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
1085 * Joy. Or not. Pthread wants us to wake up every thread
1086 * in our parent group.
1088 static inline void __wake_up_parent(struct task_struct
*p
)
1090 struct task_struct
*parent
= p
->parent
, *tsk
= parent
;
1093 * Fortunately this is not necessary for thread groups:
1095 if (p
->tgid
== tsk
->tgid
) {
1096 wake_up_interruptible(&tsk
->wait_chldexit
);
1101 wake_up_interruptible(&tsk
->wait_chldexit
);
1102 tsk
= next_thread(tsk
);
1103 if (tsk
->sig
!= parent
->sig
)
1105 } while (tsk
!= parent
);
1109 * Let a parent know about a status change of a child.
1112 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1114 struct siginfo info
;
1115 unsigned long flags
;
1121 info
.si_signo
= sig
;
1123 info
.si_pid
= tsk
->pid
;
1124 info
.si_uid
= tsk
->uid
;
1126 /* FIXME: find out whether or not this is supposed to be c*time. */
1127 info
.si_utime
= tsk
->utime
;
1128 info
.si_stime
= tsk
->stime
;
1130 status
= tsk
->exit_code
& 0x7f;
1131 why
= SI_KERNEL
; /* shouldn't happen */
1132 switch (tsk
->state
) {
1134 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1135 if (tsk
->ptrace
& PT_PTRACED
)
1142 if (tsk
->exit_code
& 0x80)
1144 else if (tsk
->exit_code
& 0x7f)
1148 status
= tsk
->exit_code
>> 8;
1153 info
.si_status
= status
;
1155 spin_lock_irqsave(&tsk
->parent
->sig
->siglock
, flags
);
1156 __send_sig_info(sig
, &info
, tsk
->parent
);
1157 __wake_up_parent(tsk
);
1158 spin_unlock_irqrestore(&tsk
->parent
->sig
->siglock
, flags
);
1163 * We need the tasklist lock because it's the only
1164 * thing that protects out "parent" pointer.
1166 * exit.c calls "do_notify_parent()" directly, because
1167 * it already has the tasklist lock.
1170 notify_parent(struct task_struct
*tsk
, int sig
)
1173 read_lock(&tasklist_lock
);
1174 do_notify_parent(tsk
, sig
);
1175 read_unlock(&tasklist_lock
);
1179 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1181 int get_signal_to_deliver(siginfo_t
*info
, struct pt_regs
*regs
)
1183 sigset_t
*mask
= ¤t
->blocked
;
1186 unsigned long signr
= 0;
1187 struct k_sigaction
*ka
;
1189 spin_lock_irq(¤t
->sig
->siglock
);
1190 signr
= dequeue_signal(mask
, info
);
1191 spin_unlock_irq(¤t
->sig
->siglock
);
1196 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1197 /* Let the debugger run. */
1198 current
->exit_code
= signr
;
1199 set_current_state(TASK_STOPPED
);
1200 notify_parent(current
, SIGCHLD
);
1203 /* We're back. Did the debugger cancel the sig? */
1204 signr
= current
->exit_code
;
1207 current
->exit_code
= 0;
1209 /* The debugger continued. Ignore SIGSTOP. */
1210 if (signr
== SIGSTOP
)
1213 /* Update the siginfo structure. Is this good? */
1214 if (signr
!= info
->si_signo
) {
1215 info
->si_signo
= signr
;
1217 info
->si_code
= SI_USER
;
1218 info
->si_pid
= current
->parent
->pid
;
1219 info
->si_uid
= current
->parent
->uid
;
1222 /* If the (new) signal is now blocked, requeue it. */
1223 if (sigismember(¤t
->blocked
, signr
)) {
1224 send_sig_info(signr
, info
, current
);
1229 ka
= ¤t
->sig
->action
[signr
-1];
1230 if (ka
->sa
.sa_handler
== SIG_IGN
) {
1231 if (signr
!= SIGCHLD
)
1233 /* Check for SIGCHLD: it's special. */
1234 while (sys_wait4(-1, NULL
, WNOHANG
, NULL
) > 0)
1239 if (ka
->sa
.sa_handler
== SIG_DFL
) {
1240 int exit_code
= signr
;
1242 /* Init gets no signals it doesn't want. */
1243 if (current
->pid
== 1)
1247 case SIGCONT
: case SIGCHLD
: case SIGWINCH
: case SIGURG
:
1250 case SIGTSTP
: case SIGTTIN
: case SIGTTOU
:
1251 if (is_orphaned_pgrp(current
->pgrp
))
1256 struct signal_struct
*sig
;
1257 set_current_state(TASK_STOPPED
);
1258 current
->exit_code
= signr
;
1259 sig
= current
->parent
->sig
;
1260 if (sig
&& !(sig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1261 notify_parent(current
, SIGCHLD
);
1266 case SIGQUIT
: case SIGILL
: case SIGTRAP
:
1267 case SIGABRT
: case SIGFPE
: case SIGSEGV
:
1268 case SIGBUS
: case SIGSYS
: case SIGXCPU
: case SIGXFSZ
:
1269 if (do_coredump(signr
, regs
))
1274 sig_exit(signr
, exit_code
, info
);
1285 EXPORT_SYMBOL(recalc_sigpending
);
1286 EXPORT_SYMBOL_GPL(dequeue_signal
);
1287 EXPORT_SYMBOL(flush_signals
);
1288 EXPORT_SYMBOL(force_sig
);
1289 EXPORT_SYMBOL(force_sig_info
);
1290 EXPORT_SYMBOL(kill_pg
);
1291 EXPORT_SYMBOL(kill_pg_info
);
1292 EXPORT_SYMBOL(kill_proc
);
1293 EXPORT_SYMBOL(kill_proc_info
);
1294 EXPORT_SYMBOL(kill_sl
);
1295 EXPORT_SYMBOL(kill_sl_info
);
1296 EXPORT_SYMBOL(notify_parent
);
1297 EXPORT_SYMBOL(send_sig
);
1298 EXPORT_SYMBOL(send_sig_info
);
1299 EXPORT_SYMBOL(block_all_signals
);
1300 EXPORT_SYMBOL(unblock_all_signals
);
1304 * System call entry points.
1308 * We don't need to get the kernel lock - this is all local to this
1309 * particular thread.. (and that's good, because this is _heavily_
1310 * used by various programs)
1314 sys_rt_sigprocmask(int how
, sigset_t
*set
, sigset_t
*oset
, size_t sigsetsize
)
1316 int error
= -EINVAL
;
1317 sigset_t old_set
, new_set
;
1319 /* XXX: Don't preclude handling different sized sigset_t's. */
1320 if (sigsetsize
!= sizeof(sigset_t
))
1325 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1327 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1329 spin_lock_irq(¤t
->sig
->siglock
);
1330 old_set
= current
->blocked
;
1338 sigorsets(&new_set
, &old_set
, &new_set
);
1341 signandsets(&new_set
, &old_set
, &new_set
);
1347 current
->blocked
= new_set
;
1348 recalc_sigpending();
1349 spin_unlock_irq(¤t
->sig
->siglock
);
1355 spin_lock_irq(¤t
->sig
->siglock
);
1356 old_set
= current
->blocked
;
1357 spin_unlock_irq(¤t
->sig
->siglock
);
1361 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1369 long do_sigpending(void *set
, unsigned long sigsetsize
)
1371 long error
= -EINVAL
;
1374 if (sigsetsize
> sizeof(sigset_t
))
1377 spin_lock_irq(¤t
->sig
->siglock
);
1378 sigandsets(&pending
, ¤t
->blocked
, ¤t
->pending
.signal
);
1379 spin_unlock_irq(¤t
->sig
->siglock
);
1382 if (!copy_to_user(set
, &pending
, sigsetsize
))
1389 sys_rt_sigpending(sigset_t
*set
, size_t sigsetsize
)
1391 return do_sigpending(set
, sigsetsize
);
1394 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1396 int copy_siginfo_to_user(siginfo_t
*to
, siginfo_t
*from
)
1400 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
1402 if (from
->si_code
< 0)
1403 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
1406 * If you change siginfo_t structure, please be sure
1407 * this code is fixed accordingly.
1408 * It should never copy any pad contained in the structure
1409 * to avoid security leaks, but must copy the generic
1410 * 3 ints plus the relevant union member.
1412 err
= __put_user(from
->si_signo
, &to
->si_signo
);
1413 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
1414 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
1415 switch (from
->si_code
& __SI_MASK
) {
1417 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
1418 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
1421 err
|= __put_user(from
->si_timer1
, &to
->si_timer1
);
1422 err
|= __put_user(from
->si_timer2
, &to
->si_timer2
);
1425 err
|= __put_user(from
->si_band
, &to
->si_band
);
1426 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
1429 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
1432 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
1433 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
1434 err
|= __put_user(from
->si_status
, &to
->si_status
);
1435 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
1436 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
1438 case __SI_RT
: /* This is not generated by the kernel as of now. */
1439 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
1440 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
1441 err
|= __put_user(from
->si_int
, &to
->si_int
);
1442 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
1444 default: /* this is just in case for now ... */
1445 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
1446 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
1455 sys_rt_sigtimedwait(const sigset_t
*uthese
, siginfo_t
*uinfo
,
1456 const struct timespec
*uts
, size_t sigsetsize
)
1464 /* XXX: Don't preclude handling different sized sigset_t's. */
1465 if (sigsetsize
!= sizeof(sigset_t
))
1468 if (copy_from_user(&these
, uthese
, sizeof(these
)))
1472 * Invert the set of allowed signals to get those we
1475 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1479 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
1481 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
1486 spin_lock_irq(¤t
->sig
->siglock
);
1487 sig
= dequeue_signal(&these
, &info
);
1489 timeout
= MAX_SCHEDULE_TIMEOUT
;
1491 timeout
= (timespec_to_jiffies(&ts
)
1492 + (ts
.tv_sec
|| ts
.tv_nsec
));
1495 /* None ready -- temporarily unblock those we're
1496 * interested while we are sleeping in so that we'll
1497 * be awakened when they arrive. */
1498 current
->real_blocked
= current
->blocked
;
1499 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
1500 recalc_sigpending();
1501 spin_unlock_irq(¤t
->sig
->siglock
);
1503 current
->state
= TASK_INTERRUPTIBLE
;
1504 timeout
= schedule_timeout(timeout
);
1506 spin_lock_irq(¤t
->sig
->siglock
);
1507 sig
= dequeue_signal(&these
, &info
);
1508 current
->blocked
= current
->real_blocked
;
1509 siginitset(¤t
->real_blocked
, 0);
1510 recalc_sigpending();
1513 spin_unlock_irq(¤t
->sig
->siglock
);
1518 if (copy_siginfo_to_user(uinfo
, &info
))
1531 sys_kill(int pid
, int sig
)
1533 struct siginfo info
;
1535 info
.si_signo
= sig
;
1537 info
.si_code
= SI_USER
;
1538 info
.si_pid
= current
->pid
;
1539 info
.si_uid
= current
->uid
;
1541 return kill_something_info(sig
, &info
, pid
);
1545 * Send a signal to only one task, even if it's a CLONE_THREAD task.
1548 sys_tkill(int pid
, int sig
)
1550 struct siginfo info
;
1552 struct task_struct
*p
;
1554 /* This is only valid for single tasks */
1558 info
.si_signo
= sig
;
1560 info
.si_code
= SI_TKILL
;
1561 info
.si_pid
= current
->pid
;
1562 info
.si_uid
= current
->uid
;
1564 read_lock(&tasklist_lock
);
1565 p
= find_task_by_pid(pid
);
1568 spin_lock_irq(&p
->sig
->siglock
);
1569 error
= specific_send_sig_info(sig
, &info
, p
, 0);
1570 spin_unlock_irq(&p
->sig
->siglock
);
1572 read_unlock(&tasklist_lock
);
1577 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t
*uinfo
)
1581 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
1584 /* Not even root can pretend to send signals from the kernel.
1585 Nor can they impersonate a kill(), which adds source info. */
1586 if (info
.si_code
>= 0)
1588 info
.si_signo
= sig
;
1590 /* POSIX.1b doesn't mention process groups. */
1591 return kill_proc_info(sig
, &info
, pid
);
1595 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
1597 struct k_sigaction
*k
;
1599 if (sig
< 1 || sig
> _NSIG
|| (act
&& sig_kernel_only(sig
)))
1602 k
= ¤t
->sig
->action
[sig
-1];
1604 spin_lock_irq(¤t
->sig
->siglock
);
1611 sigdelsetmask(&k
->sa
.sa_mask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
1615 * "Setting a signal action to SIG_IGN for a signal that is
1616 * pending shall cause the pending signal to be discarded,
1617 * whether or not it is blocked."
1619 * "Setting a signal action to SIG_DFL for a signal that is
1620 * pending and whose default action is to ignore the signal
1621 * (for example, SIGCHLD), shall cause the pending signal to
1622 * be discarded, whether or not it is blocked"
1624 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1625 * signal isn't actually ignored, but does automatic child
1626 * reaping, while SIG_DFL is explicitly said by POSIX to force
1627 * the signal to be ignored.
1630 if (k
->sa
.sa_handler
== SIG_IGN
1631 || (k
->sa
.sa_handler
== SIG_DFL
1632 && (sig
== SIGCONT
||
1636 if (rm_sig_from_queue(sig
, current
))
1637 recalc_sigpending();
1641 spin_unlock_irq(¤t
->sig
->siglock
);
1646 do_sigaltstack (const stack_t
*uss
, stack_t
*uoss
, unsigned long sp
)
1652 oss
.ss_sp
= (void *) current
->sas_ss_sp
;
1653 oss
.ss_size
= current
->sas_ss_size
;
1654 oss
.ss_flags
= sas_ss_flags(sp
);
1663 if (verify_area(VERIFY_READ
, uss
, sizeof(*uss
))
1664 || __get_user(ss_sp
, &uss
->ss_sp
)
1665 || __get_user(ss_flags
, &uss
->ss_flags
)
1666 || __get_user(ss_size
, &uss
->ss_size
))
1670 if (on_sig_stack (sp
))
1676 * Note - this code used to test ss_flags incorrectly
1677 * old code may have been written using ss_flags==0
1678 * to mean ss_flags==SS_ONSTACK (as this was the only
1679 * way that worked) - this fix preserves that older
1682 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
1685 if (ss_flags
== SS_DISABLE
) {
1690 if (ss_size
< MINSIGSTKSZ
)
1694 current
->sas_ss_sp
= (unsigned long) ss_sp
;
1695 current
->sas_ss_size
= ss_size
;
1700 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
1710 sys_sigpending(old_sigset_t
*set
)
1712 return do_sigpending(set
, sizeof(*set
));
1715 #if !defined(__alpha__)
1716 /* Alpha has its own versions with special arguments. */
1719 sys_sigprocmask(int how
, old_sigset_t
*set
, old_sigset_t
*oset
)
1722 old_sigset_t old_set
, new_set
;
1726 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1728 new_set
&= ~(sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1730 spin_lock_irq(¤t
->sig
->siglock
);
1731 old_set
= current
->blocked
.sig
[0];
1739 sigaddsetmask(¤t
->blocked
, new_set
);
1742 sigdelsetmask(¤t
->blocked
, new_set
);
1745 current
->blocked
.sig
[0] = new_set
;
1749 recalc_sigpending();
1750 spin_unlock_irq(¤t
->sig
->siglock
);
1756 old_set
= current
->blocked
.sig
[0];
1759 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1769 sys_rt_sigaction(int sig
, const struct sigaction
*act
, struct sigaction
*oact
,
1772 struct k_sigaction new_sa
, old_sa
;
1775 /* XXX: Don't preclude handling different sized sigset_t's. */
1776 if (sigsetsize
!= sizeof(sigset_t
))
1780 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
1784 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
1787 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
1793 #endif /* __sparc__ */
1796 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__arm__)
1798 * For backwards compatibility. Functionality superseded by sigprocmask.
1804 return current
->blocked
.sig
[0];
1808 sys_ssetmask(int newmask
)
1812 spin_lock_irq(¤t
->sig
->siglock
);
1813 old
= current
->blocked
.sig
[0];
1815 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
1817 recalc_sigpending();
1818 spin_unlock_irq(¤t
->sig
->siglock
);
1822 #endif /* !defined(__alpha__) */
1824 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
1827 * For backwards compatibility. Functionality superseded by sigaction.
1829 asmlinkage
unsigned long
1830 sys_signal(int sig
, __sighandler_t handler
)
1832 struct k_sigaction new_sa
, old_sa
;
1835 new_sa
.sa
.sa_handler
= handler
;
1836 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
1838 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
1840 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
1842 #endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
1844 #ifndef HAVE_ARCH_SYS_PAUSE
1849 current
->state
= TASK_INTERRUPTIBLE
;
1851 return -ERESTARTNOHAND
;
1854 #endif /* HAVE_ARCH_SYS_PAUSE */
1856 void __init
signals_init(void)
1859 kmem_cache_create("sigqueue",
1860 sizeof(struct sigqueue
),
1861 __alignof__(struct sigqueue
),
1863 if (!sigqueue_cachep
)
1864 panic("signals_init(): cannot create sigqueue SLAB cache");