2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache
*sigqueue_cachep
;
42 static int __sig_ignored(struct task_struct
*t
, int sig
)
46 /* Is it explicitly or implicitly ignored? */
48 handler
= t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
49 return handler
== SIG_IGN
||
50 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
53 static int sig_ignored(struct task_struct
*t
, int sig
)
56 * Tracers always want to know about signals..
58 if (t
->ptrace
& PT_PTRACED
)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
69 return __sig_ignored(t
, sig
);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
81 switch (_NSIG_WORDS
) {
83 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
84 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
87 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
88 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
89 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
90 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
93 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
94 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
97 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct
*t
)
106 if (t
->signal
->group_stop_count
> 0 ||
107 PENDING(&t
->pending
, &t
->blocked
) ||
108 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
109 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct
*t
)
126 if (recalc_sigpending_tsk(t
))
127 signal_wake_up(t
, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current
) && !freezing(current
))
133 clear_thread_flag(TIF_SIGPENDING
);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
141 unsigned long i
, *s
, *m
, x
;
144 s
= pending
->signal
.sig
;
146 switch (_NSIG_WORDS
) {
148 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
149 if ((x
= *s
&~ *m
) != 0) {
150 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
155 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
157 else if ((x
= s
[1] &~ m
[1]) != 0)
164 case 1: if ((x
= *s
&~ *m
) != 0)
172 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
175 struct sigqueue
*q
= NULL
;
176 struct user_struct
*user
;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user
->sigpending
);
185 if (override_rlimit
||
186 atomic_read(&user
->sigpending
) <=
187 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
188 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
189 if (unlikely(q
== NULL
)) {
190 atomic_dec(&user
->sigpending
);
192 INIT_LIST_HEAD(&q
->list
);
194 q
->user
= get_uid(user
);
199 static void __sigqueue_free(struct sigqueue
*q
)
201 if (q
->flags
& SIGQUEUE_PREALLOC
)
203 atomic_dec(&q
->user
->sigpending
);
205 kmem_cache_free(sigqueue_cachep
, q
);
208 void flush_sigqueue(struct sigpending
*queue
)
212 sigemptyset(&queue
->signal
);
213 while (!list_empty(&queue
->list
)) {
214 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
215 list_del_init(&q
->list
);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct
*t
)
227 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
228 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
229 flush_sigqueue(&t
->pending
);
230 flush_sigqueue(&t
->signal
->shared_pending
);
231 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
234 static void __flush_itimer_signals(struct sigpending
*pending
)
236 sigset_t signal
, retain
;
237 struct sigqueue
*q
, *n
;
239 signal
= pending
->signal
;
240 sigemptyset(&retain
);
242 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
243 int sig
= q
->info
.si_signo
;
245 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
246 sigaddset(&retain
, sig
);
248 sigdelset(&signal
, sig
);
249 list_del_init(&q
->list
);
254 sigorsets(&pending
->signal
, &signal
, &retain
);
257 void flush_itimer_signals(void)
259 struct task_struct
*tsk
= current
;
262 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
263 __flush_itimer_signals(&tsk
->pending
);
264 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
265 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
268 void ignore_signals(struct task_struct
*t
)
272 for (i
= 0; i
< _NSIG
; ++i
)
273 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
279 * Flush all handlers for a task.
283 flush_signal_handlers(struct task_struct
*t
, int force_default
)
286 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
287 for (i
= _NSIG
; i
!= 0 ; i
--) {
288 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
289 ka
->sa
.sa_handler
= SIG_DFL
;
291 sigemptyset(&ka
->sa
.sa_mask
);
296 int unhandled_signal(struct task_struct
*tsk
, int sig
)
298 if (is_global_init(tsk
))
300 if (tsk
->ptrace
& PT_PTRACED
)
302 return (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
) ||
303 (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_DFL
);
307 /* Notify the system that a driver wants to block all signals for this
308 * process, and wants to be notified if any signals at all were to be
309 * sent/acted upon. If the notifier routine returns non-zero, then the
310 * signal will be acted upon after all. If the notifier routine returns 0,
311 * then then signal will be blocked. Only one block per process is
312 * allowed. priv is a pointer to private data that the notifier routine
313 * can use to determine if the signal should be blocked or not. */
316 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
320 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
321 current
->notifier_mask
= mask
;
322 current
->notifier_data
= priv
;
323 current
->notifier
= notifier
;
324 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
327 /* Notify the system that blocking has ended. */
330 unblock_all_signals(void)
334 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
335 current
->notifier
= NULL
;
336 current
->notifier_data
= NULL
;
338 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
341 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
343 struct sigqueue
*q
, *first
= NULL
;
344 int still_pending
= 0;
347 * Collect the siginfo appropriate to this signal. Check if
348 * there is another siginfo for the same signal.
350 list_for_each_entry(q
, &list
->list
, list
) {
351 if (q
->info
.si_signo
== sig
) {
360 list_del_init(&first
->list
);
361 copy_siginfo(info
, &first
->info
);
362 __sigqueue_free(first
);
364 sigdelset(&list
->signal
, sig
);
367 /* Ok, it wasn't in the queue. This must be
368 a fast-pathed signal or we must have been
369 out of queue space. So zero out the info.
371 sigdelset(&list
->signal
, sig
);
372 info
->si_signo
= sig
;
381 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
384 int sig
= next_signal(pending
, mask
);
387 if (current
->notifier
) {
388 if (sigismember(current
->notifier_mask
, sig
)) {
389 if (!(current
->notifier
)(current
->notifier_data
)) {
390 clear_thread_flag(TIF_SIGPENDING
);
396 if (!collect_signal(sig
, pending
, info
))
404 * Dequeue a signal and return the element to the caller, which is
405 * expected to free it.
407 * All callers have to hold the siglock.
409 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
413 /* We only dequeue private signals from ourselves, we don't let
414 * signalfd steal them
416 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
418 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
423 * itimers are process shared and we restart periodic
424 * itimers in the signal delivery path to prevent DoS
425 * attacks in the high resolution timer case. This is
426 * compliant with the old way of self restarting
427 * itimers, as the SIGALRM is a legacy signal and only
428 * queued once. Changing the restart behaviour to
429 * restart the timer in the signal dequeue path is
430 * reducing the timer noise on heavy loaded !highres
433 if (unlikely(signr
== SIGALRM
)) {
434 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
436 if (!hrtimer_is_queued(tmr
) &&
437 tsk
->signal
->it_real_incr
.tv64
!= 0) {
438 hrtimer_forward(tmr
, tmr
->base
->get_time(),
439 tsk
->signal
->it_real_incr
);
440 hrtimer_restart(tmr
);
449 if (unlikely(sig_kernel_stop(signr
))) {
451 * Set a marker that we have dequeued a stop signal. Our
452 * caller might release the siglock and then the pending
453 * stop signal it is about to process is no longer in the
454 * pending bitmasks, but must still be cleared by a SIGCONT
455 * (and overruled by a SIGKILL). So those cases clear this
456 * shared flag after we've set it. Note that this flag may
457 * remain set after the signal we return is ignored or
458 * handled. That doesn't matter because its only purpose
459 * is to alert stop-signal processing code when another
460 * processor has come along and cleared the flag.
462 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
))
463 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
465 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
467 * Release the siglock to ensure proper locking order
468 * of timer locks outside of siglocks. Note, we leave
469 * irqs disabled here, since the posix-timers code is
470 * about to disable them again anyway.
472 spin_unlock(&tsk
->sighand
->siglock
);
473 do_schedule_next_timer(info
);
474 spin_lock(&tsk
->sighand
->siglock
);
480 * Tell a process that it has a new active signal..
482 * NOTE! we rely on the previous spin_lock to
483 * lock interrupts for us! We can only be called with
484 * "siglock" held, and the local interrupt must
485 * have been disabled when that got acquired!
487 * No need to set need_resched since signal event passing
488 * goes through ->blocked
490 void signal_wake_up(struct task_struct
*t
, int resume
)
494 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
497 * For SIGKILL, we want to wake it up in the stopped/traced/killable
498 * case. We don't check t->state here because there is a race with it
499 * executing another processor and just now entering stopped state.
500 * By using wake_up_state, we ensure the process will wake up and
501 * handle its death signal.
503 mask
= TASK_INTERRUPTIBLE
;
505 mask
|= TASK_WAKEKILL
;
506 if (!wake_up_state(t
, mask
))
511 * Remove signals in mask from the pending set and queue.
512 * Returns 1 if any signals were found.
514 * All callers must be holding the siglock.
516 * This version takes a sigset mask and looks at all signals,
517 * not just those in the first mask word.
519 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
521 struct sigqueue
*q
, *n
;
524 sigandsets(&m
, mask
, &s
->signal
);
525 if (sigisemptyset(&m
))
528 signandsets(&s
->signal
, &s
->signal
, mask
);
529 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
530 if (sigismember(mask
, q
->info
.si_signo
)) {
531 list_del_init(&q
->list
);
538 * Remove signals in mask from the pending set and queue.
539 * Returns 1 if any signals were found.
541 * All callers must be holding the siglock.
543 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
545 struct sigqueue
*q
, *n
;
547 if (!sigtestsetmask(&s
->signal
, mask
))
550 sigdelsetmask(&s
->signal
, mask
);
551 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
552 if (q
->info
.si_signo
< SIGRTMIN
&&
553 (mask
& sigmask(q
->info
.si_signo
))) {
554 list_del_init(&q
->list
);
562 * Bad permissions for sending the signal
564 static int check_kill_permission(int sig
, struct siginfo
*info
,
565 struct task_struct
*t
)
570 if (!valid_signal(sig
))
573 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
576 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
580 if ((current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
) &&
581 (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
) &&
582 !capable(CAP_KILL
)) {
585 sid
= task_session(t
);
587 * We don't return the error if sid == NULL. The
588 * task was unhashed, the caller must notice this.
590 if (!sid
|| sid
== task_session(current
))
597 return security_task_kill(t
, info
, sig
, 0);
601 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
);
604 * Handle magic process-wide effects of stop/continue signals. Unlike
605 * the signal actions, these happen immediately at signal-generation
606 * time regardless of blocking, ignoring, or handling. This does the
607 * actual continuing for SIGCONT, but not the actual stopping for stop
608 * signals. The process stop is done as a signal action for SIG_DFL.
610 * Returns true if the signal should be actually delivered, otherwise
611 * it should be dropped.
613 static int prepare_signal(int sig
, struct task_struct
*p
)
615 struct signal_struct
*signal
= p
->signal
;
616 struct task_struct
*t
;
618 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
620 * The process is in the middle of dying, nothing to do.
622 } else if (sig_kernel_stop(sig
)) {
624 * This is a stop signal. Remove SIGCONT from all queues.
626 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
629 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
630 } while_each_thread(p
, t
);
631 } else if (sig
== SIGCONT
) {
634 * Remove all stop signals from all queues,
635 * and wake all threads.
637 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
641 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
643 * If there is a handler for SIGCONT, we must make
644 * sure that no thread returns to user mode before
645 * we post the signal, in case it was the only
646 * thread eligible to run the signal handler--then
647 * it must not do anything between resuming and
648 * running the handler. With the TIF_SIGPENDING
649 * flag set, the thread will pause and acquire the
650 * siglock that we hold now and until we've queued
651 * the pending signal.
653 * Wake up the stopped thread _after_ setting
656 state
= __TASK_STOPPED
;
657 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
658 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
659 state
|= TASK_INTERRUPTIBLE
;
661 wake_up_state(t
, state
);
662 } while_each_thread(p
, t
);
665 * Notify the parent with CLD_CONTINUED if we were stopped.
667 * If we were in the middle of a group stop, we pretend it
668 * was already finished, and then continued. Since SIGCHLD
669 * doesn't queue we report only CLD_STOPPED, as if the next
670 * CLD_CONTINUED was dropped.
673 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
674 why
|= SIGNAL_CLD_CONTINUED
;
675 else if (signal
->group_stop_count
)
676 why
|= SIGNAL_CLD_STOPPED
;
680 * The first thread which returns from finish_stop()
681 * will take ->siglock, notice SIGNAL_CLD_MASK, and
682 * notify its parent. See get_signal_to_deliver().
684 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
685 signal
->group_stop_count
= 0;
686 signal
->group_exit_code
= 0;
689 * We are not stopped, but there could be a stop
690 * signal in the middle of being processed after
691 * being removed from the queue. Clear that too.
693 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
697 return !sig_ignored(p
, sig
);
701 * Test if P wants to take SIG. After we've checked all threads with this,
702 * it's equivalent to finding no threads not blocking SIG. Any threads not
703 * blocking SIG were ruled out because they are not running and already
704 * have pending signals. Such threads will dequeue from the shared queue
705 * as soon as they're available, so putting the signal on the shared queue
706 * will be equivalent to sending it to one such thread.
708 static inline int wants_signal(int sig
, struct task_struct
*p
)
710 if (sigismember(&p
->blocked
, sig
))
712 if (p
->flags
& PF_EXITING
)
716 if (task_is_stopped_or_traced(p
))
718 return task_curr(p
) || !signal_pending(p
);
721 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
723 struct signal_struct
*signal
= p
->signal
;
724 struct task_struct
*t
;
727 * Now find a thread we can wake up to take the signal off the queue.
729 * If the main thread wants the signal, it gets first crack.
730 * Probably the least surprising to the average bear.
732 if (wants_signal(sig
, p
))
734 else if (!group
|| thread_group_empty(p
))
736 * There is just one thread and it does not need to be woken.
737 * It will dequeue unblocked signals before it runs again.
742 * Otherwise try to find a suitable thread.
744 t
= signal
->curr_target
;
745 while (!wants_signal(sig
, t
)) {
747 if (t
== signal
->curr_target
)
749 * No thread needs to be woken.
750 * Any eligible threads will see
751 * the signal in the queue soon.
755 signal
->curr_target
= t
;
759 * Found a killable thread. If the signal will be fatal,
760 * then start taking the whole group down immediately.
762 if (sig_fatal(p
, sig
) &&
763 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
764 !sigismember(&t
->real_blocked
, sig
) &&
765 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
767 * This signal will be fatal to the whole group.
769 if (!sig_kernel_coredump(sig
)) {
771 * Start a group exit and wake everybody up.
772 * This way we don't have other threads
773 * running and doing things after a slower
774 * thread has the fatal signal pending.
776 signal
->flags
= SIGNAL_GROUP_EXIT
;
777 signal
->group_exit_code
= sig
;
778 signal
->group_stop_count
= 0;
781 sigaddset(&t
->pending
.signal
, SIGKILL
);
782 signal_wake_up(t
, 1);
783 } while_each_thread(p
, t
);
789 * The signal is already in the shared-pending queue.
790 * Tell the chosen thread to wake up and dequeue it.
792 signal_wake_up(t
, sig
== SIGKILL
);
796 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
798 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
801 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
804 struct sigpending
*pending
;
807 assert_spin_locked(&t
->sighand
->siglock
);
808 if (!prepare_signal(sig
, t
))
811 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
813 * Short-circuit ignored signals and support queuing
814 * exactly one non-rt signal, so that we can get more
815 * detailed information about the cause of the signal.
817 if (legacy_queue(pending
, sig
))
820 * fast-pathed signals for kernel-internal things like SIGSTOP
823 if (info
== SEND_SIG_FORCED
)
826 /* Real-time signals must be queued if sent by sigqueue, or
827 some other real-time mechanism. It is implementation
828 defined whether kill() does so. We attempt to do so, on
829 the principle of least surprise, but since kill is not
830 allowed to fail with EAGAIN when low on memory we just
831 make sure at least one signal gets delivered and don't
832 pass on the info struct. */
834 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
835 (is_si_special(info
) ||
836 info
->si_code
>= 0)));
838 list_add_tail(&q
->list
, &pending
->list
);
839 switch ((unsigned long) info
) {
840 case (unsigned long) SEND_SIG_NOINFO
:
841 q
->info
.si_signo
= sig
;
842 q
->info
.si_errno
= 0;
843 q
->info
.si_code
= SI_USER
;
844 q
->info
.si_pid
= task_pid_vnr(current
);
845 q
->info
.si_uid
= current
->uid
;
847 case (unsigned long) SEND_SIG_PRIV
:
848 q
->info
.si_signo
= sig
;
849 q
->info
.si_errno
= 0;
850 q
->info
.si_code
= SI_KERNEL
;
855 copy_siginfo(&q
->info
, info
);
858 } else if (!is_si_special(info
)) {
859 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
861 * Queue overflow, abort. We may abort if the signal was rt
862 * and sent by user using something other than kill().
868 signalfd_notify(t
, sig
);
869 sigaddset(&pending
->signal
, sig
);
870 complete_signal(sig
, t
, group
);
874 int print_fatal_signals
;
876 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
878 printk("%s/%d: potentially unexpected fatal signal %d.\n",
879 current
->comm
, task_pid_nr(current
), signr
);
881 #if defined(__i386__) && !defined(__arch_um__)
882 printk("code at %08lx: ", regs
->ip
);
885 for (i
= 0; i
< 16; i
++) {
888 __get_user(insn
, (unsigned char *)(regs
->ip
+ i
));
889 printk("%02x ", insn
);
897 static int __init
setup_print_fatal_signals(char *str
)
899 get_option (&str
, &print_fatal_signals
);
904 __setup("print-fatal-signals=", setup_print_fatal_signals
);
907 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
909 return send_signal(sig
, info
, p
, 1);
913 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
915 return send_signal(sig
, info
, t
, 0);
919 * Force a signal that the process can't ignore: if necessary
920 * we unblock the signal and change any SIG_IGN to SIG_DFL.
922 * Note: If we unblock the signal, we always reset it to SIG_DFL,
923 * since we do not want to have a signal handler that was blocked
924 * be invoked when user space had explicitly blocked it.
926 * We don't want to have recursive SIGSEGV's etc, for example,
927 * that is why we also clear SIGNAL_UNKILLABLE.
930 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
932 unsigned long int flags
;
933 int ret
, blocked
, ignored
;
934 struct k_sigaction
*action
;
936 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
937 action
= &t
->sighand
->action
[sig
-1];
938 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
939 blocked
= sigismember(&t
->blocked
, sig
);
940 if (blocked
|| ignored
) {
941 action
->sa
.sa_handler
= SIG_DFL
;
943 sigdelset(&t
->blocked
, sig
);
944 recalc_sigpending_and_wake(t
);
947 if (action
->sa
.sa_handler
== SIG_DFL
)
948 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
949 ret
= specific_send_sig_info(sig
, info
, t
);
950 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
956 force_sig_specific(int sig
, struct task_struct
*t
)
958 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
962 * Nuke all other threads in the group.
964 void zap_other_threads(struct task_struct
*p
)
966 struct task_struct
*t
;
968 p
->signal
->group_stop_count
= 0;
970 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
972 * Don't bother with already dead threads
977 /* SIGKILL will be handled before any pending SIGSTOP */
978 sigaddset(&t
->pending
.signal
, SIGKILL
);
979 signal_wake_up(t
, 1);
983 int __fatal_signal_pending(struct task_struct
*tsk
)
985 return sigismember(&tsk
->pending
.signal
, SIGKILL
);
987 EXPORT_SYMBOL(__fatal_signal_pending
);
989 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
991 struct sighand_struct
*sighand
;
995 sighand
= rcu_dereference(tsk
->sighand
);
996 if (unlikely(sighand
== NULL
))
999 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1000 if (likely(sighand
== tsk
->sighand
))
1002 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1009 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1011 unsigned long flags
;
1014 ret
= check_kill_permission(sig
, info
, p
);
1018 if (lock_task_sighand(p
, &flags
)) {
1019 ret
= __group_send_sig_info(sig
, info
, p
);
1020 unlock_task_sighand(p
, &flags
);
1028 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1029 * control characters do (^C, ^Z etc)
1032 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1034 struct task_struct
*p
= NULL
;
1035 int retval
, success
;
1039 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1040 int err
= group_send_sig_info(sig
, info
, p
);
1043 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1044 return success
? 0 : retval
;
1047 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1050 struct task_struct
*p
;
1054 p
= pid_task(pid
, PIDTYPE_PID
);
1056 error
= group_send_sig_info(sig
, info
, p
);
1057 if (unlikely(error
== -ESRCH
))
1059 * The task was unhashed in between, try again.
1060 * If it is dead, pid_task() will return NULL,
1061 * if we race with de_thread() it will find the
1072 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1076 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1081 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1082 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1083 uid_t uid
, uid_t euid
, u32 secid
)
1086 struct task_struct
*p
;
1088 if (!valid_signal(sig
))
1091 read_lock(&tasklist_lock
);
1092 p
= pid_task(pid
, PIDTYPE_PID
);
1097 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1098 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1099 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1103 ret
= security_task_kill(p
, info
, sig
, secid
);
1106 if (sig
&& p
->sighand
) {
1107 unsigned long flags
;
1108 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1109 ret
= __group_send_sig_info(sig
, info
, p
);
1110 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1113 read_unlock(&tasklist_lock
);
1116 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1119 * kill_something_info() interprets pid in interesting ways just like kill(2).
1121 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1122 * is probably wrong. Should make it like BSD or SYSV.
1125 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1131 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1136 read_lock(&tasklist_lock
);
1138 ret
= __kill_pgrp_info(sig
, info
,
1139 pid
? find_vpid(-pid
) : task_pgrp(current
));
1141 int retval
= 0, count
= 0;
1142 struct task_struct
* p
;
1144 for_each_process(p
) {
1145 if (p
->pid
> 1 && !same_thread_group(p
, current
)) {
1146 int err
= group_send_sig_info(sig
, info
, p
);
1152 ret
= count
? retval
: -ESRCH
;
1154 read_unlock(&tasklist_lock
);
1160 * These are for backward compatibility with the rest of the kernel source.
1164 * The caller must ensure the task can't exit.
1167 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1170 unsigned long flags
;
1173 * Make sure legacy kernel users don't send in bad values
1174 * (normal paths check this in check_kill_permission).
1176 if (!valid_signal(sig
))
1179 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1180 ret
= specific_send_sig_info(sig
, info
, p
);
1181 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1185 #define __si_special(priv) \
1186 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1189 send_sig(int sig
, struct task_struct
*p
, int priv
)
1191 return send_sig_info(sig
, __si_special(priv
), p
);
1195 force_sig(int sig
, struct task_struct
*p
)
1197 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1201 * When things go south during signal handling, we
1202 * will force a SIGSEGV. And if the signal that caused
1203 * the problem was already a SIGSEGV, we'll want to
1204 * make sure we don't even try to deliver the signal..
1207 force_sigsegv(int sig
, struct task_struct
*p
)
1209 if (sig
== SIGSEGV
) {
1210 unsigned long flags
;
1211 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1212 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1213 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1215 force_sig(SIGSEGV
, p
);
1219 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1223 read_lock(&tasklist_lock
);
1224 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1225 read_unlock(&tasklist_lock
);
1229 EXPORT_SYMBOL(kill_pgrp
);
1231 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1233 return kill_pid_info(sig
, __si_special(priv
), pid
);
1235 EXPORT_SYMBOL(kill_pid
);
1238 kill_proc(pid_t pid
, int sig
, int priv
)
1243 ret
= kill_pid_info(sig
, __si_special(priv
), find_pid(pid
));
1249 * These functions support sending signals using preallocated sigqueue
1250 * structures. This is needed "because realtime applications cannot
1251 * afford to lose notifications of asynchronous events, like timer
1252 * expirations or I/O completions". In the case of Posix Timers
1253 * we allocate the sigqueue structure from the timer_create. If this
1254 * allocation fails we are able to report the failure to the application
1255 * with an EAGAIN error.
1258 struct sigqueue
*sigqueue_alloc(void)
1262 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1263 q
->flags
|= SIGQUEUE_PREALLOC
;
1267 void sigqueue_free(struct sigqueue
*q
)
1269 unsigned long flags
;
1270 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1272 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1274 * We must hold ->siglock while testing q->list
1275 * to serialize with collect_signal() or with
1276 * __exit_signal()->flush_sigqueue().
1278 spin_lock_irqsave(lock
, flags
);
1279 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1281 * If it is queued it will be freed when dequeued,
1282 * like the "regular" sigqueue.
1284 if (!list_empty(&q
->list
))
1286 spin_unlock_irqrestore(lock
, flags
);
1292 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1294 int sig
= q
->info
.si_signo
;
1295 struct sigpending
*pending
;
1296 unsigned long flags
;
1299 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1302 if (!likely(lock_task_sighand(t
, &flags
)))
1305 ret
= 1; /* the signal is ignored */
1306 if (!prepare_signal(sig
, t
))
1310 if (unlikely(!list_empty(&q
->list
))) {
1312 * If an SI_TIMER entry is already queue just increment
1313 * the overrun count.
1315 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1316 q
->info
.si_overrun
++;
1320 signalfd_notify(t
, sig
);
1321 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1322 list_add_tail(&q
->list
, &pending
->list
);
1323 sigaddset(&pending
->signal
, sig
);
1324 complete_signal(sig
, t
, group
);
1326 unlock_task_sighand(t
, &flags
);
1332 * Wake up any threads in the parent blocked in wait* syscalls.
1334 static inline void __wake_up_parent(struct task_struct
*p
,
1335 struct task_struct
*parent
)
1337 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1341 * Let a parent know about the death of a child.
1342 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1345 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1347 struct siginfo info
;
1348 unsigned long flags
;
1349 struct sighand_struct
*psig
;
1353 /* do_notify_parent_cldstop should have been called instead. */
1354 BUG_ON(task_is_stopped_or_traced(tsk
));
1356 BUG_ON(!tsk
->ptrace
&&
1357 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1359 info
.si_signo
= sig
;
1362 * we are under tasklist_lock here so our parent is tied to
1363 * us and cannot exit and release its namespace.
1365 * the only it can is to switch its nsproxy with sys_unshare,
1366 * bu uncharing pid namespaces is not allowed, so we'll always
1367 * see relevant namespace
1369 * write_lock() currently calls preempt_disable() which is the
1370 * same as rcu_read_lock(), but according to Oleg, this is not
1371 * correct to rely on this
1374 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1377 info
.si_uid
= tsk
->uid
;
1379 /* FIXME: find out whether or not this is supposed to be c*time. */
1380 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1381 tsk
->signal
->utime
));
1382 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1383 tsk
->signal
->stime
));
1385 info
.si_status
= tsk
->exit_code
& 0x7f;
1386 if (tsk
->exit_code
& 0x80)
1387 info
.si_code
= CLD_DUMPED
;
1388 else if (tsk
->exit_code
& 0x7f)
1389 info
.si_code
= CLD_KILLED
;
1391 info
.si_code
= CLD_EXITED
;
1392 info
.si_status
= tsk
->exit_code
>> 8;
1395 psig
= tsk
->parent
->sighand
;
1396 spin_lock_irqsave(&psig
->siglock
, flags
);
1397 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1398 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1399 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1401 * We are exiting and our parent doesn't care. POSIX.1
1402 * defines special semantics for setting SIGCHLD to SIG_IGN
1403 * or setting the SA_NOCLDWAIT flag: we should be reaped
1404 * automatically and not left for our parent's wait4 call.
1405 * Rather than having the parent do it as a magic kind of
1406 * signal handler, we just set this to tell do_exit that we
1407 * can be cleaned up without becoming a zombie. Note that
1408 * we still call __wake_up_parent in this case, because a
1409 * blocked sys_wait4 might now return -ECHILD.
1411 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1412 * is implementation-defined: we do (if you don't want
1413 * it, just use SIG_IGN instead).
1415 tsk
->exit_signal
= -1;
1416 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1419 if (valid_signal(sig
) && sig
> 0)
1420 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1421 __wake_up_parent(tsk
, tsk
->parent
);
1422 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1425 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1427 struct siginfo info
;
1428 unsigned long flags
;
1429 struct task_struct
*parent
;
1430 struct sighand_struct
*sighand
;
1432 if (tsk
->ptrace
& PT_PTRACED
)
1433 parent
= tsk
->parent
;
1435 tsk
= tsk
->group_leader
;
1436 parent
= tsk
->real_parent
;
1439 info
.si_signo
= SIGCHLD
;
1442 * see comment in do_notify_parent() abot the following 3 lines
1445 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1448 info
.si_uid
= tsk
->uid
;
1450 /* FIXME: find out whether or not this is supposed to be c*time. */
1451 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1452 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1457 info
.si_status
= SIGCONT
;
1460 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1463 info
.si_status
= tsk
->exit_code
& 0x7f;
1469 sighand
= parent
->sighand
;
1470 spin_lock_irqsave(&sighand
->siglock
, flags
);
1471 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1472 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1473 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1475 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1477 __wake_up_parent(tsk
, parent
);
1478 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1481 static inline int may_ptrace_stop(void)
1483 if (!likely(current
->ptrace
& PT_PTRACED
))
1486 * Are we in the middle of do_coredump?
1487 * If so and our tracer is also part of the coredump stopping
1488 * is a deadlock situation, and pointless because our tracer
1489 * is dead so don't allow us to stop.
1490 * If SIGKILL was already sent before the caller unlocked
1491 * ->siglock we must see ->core_waiters != 0. Otherwise it
1492 * is safe to enter schedule().
1494 if (unlikely(current
->mm
->core_waiters
) &&
1495 unlikely(current
->mm
== current
->parent
->mm
))
1502 * Return nonzero if there is a SIGKILL that should be waking us up.
1503 * Called with the siglock held.
1505 static int sigkill_pending(struct task_struct
*tsk
)
1507 return ((sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1508 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
)) &&
1509 !unlikely(sigismember(&tsk
->blocked
, SIGKILL
)));
1513 * This must be called with current->sighand->siglock held.
1515 * This should be the path for all ptrace stops.
1516 * We always set current->last_siginfo while stopped here.
1517 * That makes it a way to test a stopped process for
1518 * being ptrace-stopped vs being job-control-stopped.
1520 * If we actually decide not to stop at all because the tracer
1521 * is gone, we keep current->exit_code unless clear_code.
1523 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1527 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1529 * The arch code has something special to do before a
1530 * ptrace stop. This is allowed to block, e.g. for faults
1531 * on user stack pages. We can't keep the siglock while
1532 * calling arch_ptrace_stop, so we must release it now.
1533 * To preserve proper semantics, we must do this before
1534 * any signal bookkeeping like checking group_stop_count.
1535 * Meanwhile, a SIGKILL could come in before we retake the
1536 * siglock. That must prevent us from sleeping in TASK_TRACED.
1537 * So after regaining the lock, we must check for SIGKILL.
1539 spin_unlock_irq(¤t
->sighand
->siglock
);
1540 arch_ptrace_stop(exit_code
, info
);
1541 spin_lock_irq(¤t
->sighand
->siglock
);
1542 killed
= sigkill_pending(current
);
1546 * If there is a group stop in progress,
1547 * we must participate in the bookkeeping.
1549 if (current
->signal
->group_stop_count
> 0)
1550 --current
->signal
->group_stop_count
;
1552 current
->last_siginfo
= info
;
1553 current
->exit_code
= exit_code
;
1555 /* Let the debugger run. */
1556 __set_current_state(TASK_TRACED
);
1557 spin_unlock_irq(¤t
->sighand
->siglock
);
1558 read_lock(&tasklist_lock
);
1559 if (!unlikely(killed
) && may_ptrace_stop()) {
1560 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1561 read_unlock(&tasklist_lock
);
1565 * By the time we got the lock, our tracer went away.
1566 * Don't drop the lock yet, another tracer may come.
1568 __set_current_state(TASK_RUNNING
);
1570 current
->exit_code
= 0;
1571 read_unlock(&tasklist_lock
);
1575 * While in TASK_TRACED, we were considered "frozen enough".
1576 * Now that we woke up, it's crucial if we're supposed to be
1577 * frozen that we freeze now before running anything substantial.
1582 * We are back. Now reacquire the siglock before touching
1583 * last_siginfo, so that we are sure to have synchronized with
1584 * any signal-sending on another CPU that wants to examine it.
1586 spin_lock_irq(¤t
->sighand
->siglock
);
1587 current
->last_siginfo
= NULL
;
1590 * Queued signals ignored us while we were stopped for tracing.
1591 * So check for any that we should take before resuming user mode.
1592 * This sets TIF_SIGPENDING, but never clears it.
1594 recalc_sigpending_tsk(current
);
1597 void ptrace_notify(int exit_code
)
1601 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1603 memset(&info
, 0, sizeof info
);
1604 info
.si_signo
= SIGTRAP
;
1605 info
.si_code
= exit_code
;
1606 info
.si_pid
= task_pid_vnr(current
);
1607 info
.si_uid
= current
->uid
;
1609 /* Let the debugger run. */
1610 spin_lock_irq(¤t
->sighand
->siglock
);
1611 ptrace_stop(exit_code
, 1, &info
);
1612 spin_unlock_irq(¤t
->sighand
->siglock
);
1616 finish_stop(int stop_count
)
1619 * If there are no other threads in the group, or if there is
1620 * a group stop in progress and we are the last to stop,
1621 * report to the parent. When ptraced, every thread reports itself.
1623 if (stop_count
== 0 || (current
->ptrace
& PT_PTRACED
)) {
1624 read_lock(&tasklist_lock
);
1625 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1626 read_unlock(&tasklist_lock
);
1631 } while (try_to_freeze());
1633 * Now we don't run again until continued.
1635 current
->exit_code
= 0;
1639 * This performs the stopping for SIGSTOP and other stop signals.
1640 * We have to stop all threads in the thread group.
1641 * Returns nonzero if we've actually stopped and released the siglock.
1642 * Returns zero if we didn't stop and still hold the siglock.
1644 static int do_signal_stop(int signr
)
1646 struct signal_struct
*sig
= current
->signal
;
1649 if (sig
->group_stop_count
> 0) {
1651 * There is a group stop in progress. We don't need to
1652 * start another one.
1654 stop_count
= --sig
->group_stop_count
;
1656 struct task_struct
*t
;
1658 if (unlikely((sig
->flags
& (SIGNAL_STOP_DEQUEUED
| SIGNAL_UNKILLABLE
))
1659 != SIGNAL_STOP_DEQUEUED
) ||
1660 unlikely(signal_group_exit(sig
)))
1663 * There is no group stop already in progress.
1664 * We must initiate one now.
1666 sig
->group_exit_code
= signr
;
1669 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1671 * Setting state to TASK_STOPPED for a group
1672 * stop is always done with the siglock held,
1673 * so this check has no races.
1675 if (!(t
->flags
& PF_EXITING
) &&
1676 !task_is_stopped_or_traced(t
)) {
1678 signal_wake_up(t
, 0);
1680 sig
->group_stop_count
= stop_count
;
1683 if (stop_count
== 0)
1684 sig
->flags
= SIGNAL_STOP_STOPPED
;
1685 current
->exit_code
= sig
->group_exit_code
;
1686 __set_current_state(TASK_STOPPED
);
1688 spin_unlock_irq(¤t
->sighand
->siglock
);
1689 finish_stop(stop_count
);
1693 static int ptrace_signal(int signr
, siginfo_t
*info
,
1694 struct pt_regs
*regs
, void *cookie
)
1696 if (!(current
->ptrace
& PT_PTRACED
))
1699 ptrace_signal_deliver(regs
, cookie
);
1701 /* Let the debugger run. */
1702 ptrace_stop(signr
, 0, info
);
1704 /* We're back. Did the debugger cancel the sig? */
1705 signr
= current
->exit_code
;
1709 current
->exit_code
= 0;
1711 /* Update the siginfo structure if the signal has
1712 changed. If the debugger wanted something
1713 specific in the siginfo structure then it should
1714 have updated *info via PTRACE_SETSIGINFO. */
1715 if (signr
!= info
->si_signo
) {
1716 info
->si_signo
= signr
;
1718 info
->si_code
= SI_USER
;
1719 info
->si_pid
= task_pid_vnr(current
->parent
);
1720 info
->si_uid
= current
->parent
->uid
;
1723 /* If the (new) signal is now blocked, requeue it. */
1724 if (sigismember(¤t
->blocked
, signr
)) {
1725 specific_send_sig_info(signr
, info
, current
);
1732 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1733 struct pt_regs
*regs
, void *cookie
)
1735 struct sighand_struct
*sighand
= current
->sighand
;
1736 struct signal_struct
*signal
= current
->signal
;
1741 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1742 * While in TASK_STOPPED, we were considered "frozen enough".
1743 * Now that we woke up, it's crucial if we're supposed to be
1744 * frozen that we freeze now before running anything substantial.
1748 spin_lock_irq(&sighand
->siglock
);
1750 * Every stopped thread goes here after wakeup. Check to see if
1751 * we should notify the parent, prepare_signal(SIGCONT) encodes
1752 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1754 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1755 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1756 ? CLD_CONTINUED
: CLD_STOPPED
;
1757 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1758 spin_unlock_irq(&sighand
->siglock
);
1760 read_lock(&tasklist_lock
);
1761 do_notify_parent_cldstop(current
->group_leader
, why
);
1762 read_unlock(&tasklist_lock
);
1767 struct k_sigaction
*ka
;
1769 if (unlikely(signal
->group_stop_count
> 0) &&
1773 signr
= dequeue_signal(current
, ¤t
->blocked
, info
);
1775 break; /* will return 0 */
1777 if (signr
!= SIGKILL
) {
1778 signr
= ptrace_signal(signr
, info
, regs
, cookie
);
1783 ka
= &sighand
->action
[signr
-1];
1784 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1786 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1787 /* Run the handler. */
1790 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1791 ka
->sa
.sa_handler
= SIG_DFL
;
1793 break; /* will return non-zero "signr" value */
1797 * Now we are doing the default action for this signal.
1799 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1803 * Global init gets no signals it doesn't want.
1805 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1806 !signal_group_exit(signal
))
1809 if (sig_kernel_stop(signr
)) {
1811 * The default action is to stop all threads in
1812 * the thread group. The job control signals
1813 * do nothing in an orphaned pgrp, but SIGSTOP
1814 * always works. Note that siglock needs to be
1815 * dropped during the call to is_orphaned_pgrp()
1816 * because of lock ordering with tasklist_lock.
1817 * This allows an intervening SIGCONT to be posted.
1818 * We need to check for that and bail out if necessary.
1820 if (signr
!= SIGSTOP
) {
1821 spin_unlock_irq(&sighand
->siglock
);
1823 /* signals can be posted during this window */
1825 if (is_current_pgrp_orphaned())
1828 spin_lock_irq(&sighand
->siglock
);
1831 if (likely(do_signal_stop(signr
))) {
1832 /* It released the siglock. */
1837 * We didn't actually stop, due to a race
1838 * with SIGCONT or something like that.
1843 spin_unlock_irq(&sighand
->siglock
);
1846 * Anything else is fatal, maybe with a core dump.
1848 current
->flags
|= PF_SIGNALED
;
1850 if (sig_kernel_coredump(signr
)) {
1851 if (print_fatal_signals
)
1852 print_fatal_signal(regs
, signr
);
1854 * If it was able to dump core, this kills all
1855 * other threads in the group and synchronizes with
1856 * their demise. If we lost the race with another
1857 * thread getting here, it set group_exit_code
1858 * first and our do_group_exit call below will use
1859 * that value and ignore the one we pass it.
1861 do_coredump((long)signr
, signr
, regs
);
1865 * Death signals, no core dump.
1867 do_group_exit(signr
);
1870 spin_unlock_irq(&sighand
->siglock
);
1874 void exit_signals(struct task_struct
*tsk
)
1877 struct task_struct
*t
;
1879 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1880 tsk
->flags
|= PF_EXITING
;
1884 spin_lock_irq(&tsk
->sighand
->siglock
);
1886 * From now this task is not visible for group-wide signals,
1887 * see wants_signal(), do_signal_stop().
1889 tsk
->flags
|= PF_EXITING
;
1890 if (!signal_pending(tsk
))
1893 /* It could be that __group_complete_signal() choose us to
1894 * notify about group-wide signal. Another thread should be
1895 * woken now to take the signal since we will not.
1897 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1898 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1899 recalc_sigpending_and_wake(t
);
1901 if (unlikely(tsk
->signal
->group_stop_count
) &&
1902 !--tsk
->signal
->group_stop_count
) {
1903 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1907 spin_unlock_irq(&tsk
->sighand
->siglock
);
1909 if (unlikely(group_stop
)) {
1910 read_lock(&tasklist_lock
);
1911 do_notify_parent_cldstop(tsk
, CLD_STOPPED
);
1912 read_unlock(&tasklist_lock
);
1916 EXPORT_SYMBOL(recalc_sigpending
);
1917 EXPORT_SYMBOL_GPL(dequeue_signal
);
1918 EXPORT_SYMBOL(flush_signals
);
1919 EXPORT_SYMBOL(force_sig
);
1920 EXPORT_SYMBOL(kill_proc
);
1921 EXPORT_SYMBOL(ptrace_notify
);
1922 EXPORT_SYMBOL(send_sig
);
1923 EXPORT_SYMBOL(send_sig_info
);
1924 EXPORT_SYMBOL(sigprocmask
);
1925 EXPORT_SYMBOL(block_all_signals
);
1926 EXPORT_SYMBOL(unblock_all_signals
);
1930 * System call entry points.
1933 asmlinkage
long sys_restart_syscall(void)
1935 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1936 return restart
->fn(restart
);
1939 long do_no_restart_syscall(struct restart_block
*param
)
1945 * We don't need to get the kernel lock - this is all local to this
1946 * particular thread.. (and that's good, because this is _heavily_
1947 * used by various programs)
1951 * This is also useful for kernel threads that want to temporarily
1952 * (or permanently) block certain signals.
1954 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1955 * interface happily blocks "unblockable" signals like SIGKILL
1958 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1962 spin_lock_irq(¤t
->sighand
->siglock
);
1964 *oldset
= current
->blocked
;
1969 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1972 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1975 current
->blocked
= *set
;
1980 recalc_sigpending();
1981 spin_unlock_irq(¤t
->sighand
->siglock
);
1987 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1989 int error
= -EINVAL
;
1990 sigset_t old_set
, new_set
;
1992 /* XXX: Don't preclude handling different sized sigset_t's. */
1993 if (sigsetsize
!= sizeof(sigset_t
))
1998 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2000 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2002 error
= sigprocmask(how
, &new_set
, &old_set
);
2008 spin_lock_irq(¤t
->sighand
->siglock
);
2009 old_set
= current
->blocked
;
2010 spin_unlock_irq(¤t
->sighand
->siglock
);
2014 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2022 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2024 long error
= -EINVAL
;
2027 if (sigsetsize
> sizeof(sigset_t
))
2030 spin_lock_irq(¤t
->sighand
->siglock
);
2031 sigorsets(&pending
, ¤t
->pending
.signal
,
2032 ¤t
->signal
->shared_pending
.signal
);
2033 spin_unlock_irq(¤t
->sighand
->siglock
);
2035 /* Outside the lock because only this thread touches it. */
2036 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2039 if (!copy_to_user(set
, &pending
, sigsetsize
))
2047 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2049 return do_sigpending(set
, sigsetsize
);
2052 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2054 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2058 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2060 if (from
->si_code
< 0)
2061 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2064 * If you change siginfo_t structure, please be sure
2065 * this code is fixed accordingly.
2066 * Please remember to update the signalfd_copyinfo() function
2067 * inside fs/signalfd.c too, in case siginfo_t changes.
2068 * It should never copy any pad contained in the structure
2069 * to avoid security leaks, but must copy the generic
2070 * 3 ints plus the relevant union member.
2072 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2073 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2074 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2075 switch (from
->si_code
& __SI_MASK
) {
2077 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2078 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2081 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2082 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2083 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2086 err
|= __put_user(from
->si_band
, &to
->si_band
);
2087 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2090 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2091 #ifdef __ARCH_SI_TRAPNO
2092 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2096 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2097 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2098 err
|= __put_user(from
->si_status
, &to
->si_status
);
2099 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2100 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2102 case __SI_RT
: /* This is not generated by the kernel as of now. */
2103 case __SI_MESGQ
: /* But this is */
2104 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2105 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2106 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2108 default: /* this is just in case for now ... */
2109 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2110 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2119 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2120 siginfo_t __user
*uinfo
,
2121 const struct timespec __user
*uts
,
2130 /* XXX: Don't preclude handling different sized sigset_t's. */
2131 if (sigsetsize
!= sizeof(sigset_t
))
2134 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2138 * Invert the set of allowed signals to get those we
2141 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2145 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2147 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2152 spin_lock_irq(¤t
->sighand
->siglock
);
2153 sig
= dequeue_signal(current
, &these
, &info
);
2155 timeout
= MAX_SCHEDULE_TIMEOUT
;
2157 timeout
= (timespec_to_jiffies(&ts
)
2158 + (ts
.tv_sec
|| ts
.tv_nsec
));
2161 /* None ready -- temporarily unblock those we're
2162 * interested while we are sleeping in so that we'll
2163 * be awakened when they arrive. */
2164 current
->real_blocked
= current
->blocked
;
2165 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2166 recalc_sigpending();
2167 spin_unlock_irq(¤t
->sighand
->siglock
);
2169 timeout
= schedule_timeout_interruptible(timeout
);
2171 spin_lock_irq(¤t
->sighand
->siglock
);
2172 sig
= dequeue_signal(current
, &these
, &info
);
2173 current
->blocked
= current
->real_blocked
;
2174 siginitset(¤t
->real_blocked
, 0);
2175 recalc_sigpending();
2178 spin_unlock_irq(¤t
->sighand
->siglock
);
2183 if (copy_siginfo_to_user(uinfo
, &info
))
2196 sys_kill(int pid
, int sig
)
2198 struct siginfo info
;
2200 info
.si_signo
= sig
;
2202 info
.si_code
= SI_USER
;
2203 info
.si_pid
= task_tgid_vnr(current
);
2204 info
.si_uid
= current
->uid
;
2206 return kill_something_info(sig
, &info
, pid
);
2209 static int do_tkill(int tgid
, int pid
, int sig
)
2212 struct siginfo info
;
2213 struct task_struct
*p
;
2214 unsigned long flags
;
2217 info
.si_signo
= sig
;
2219 info
.si_code
= SI_TKILL
;
2220 info
.si_pid
= task_tgid_vnr(current
);
2221 info
.si_uid
= current
->uid
;
2224 p
= find_task_by_vpid(pid
);
2225 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2226 error
= check_kill_permission(sig
, &info
, p
);
2228 * The null signal is a permissions and process existence
2229 * probe. No signal is actually delivered.
2231 * If lock_task_sighand() fails we pretend the task dies
2232 * after receiving the signal. The window is tiny, and the
2233 * signal is private anyway.
2235 if (!error
&& sig
&& lock_task_sighand(p
, &flags
)) {
2236 error
= specific_send_sig_info(sig
, &info
, p
);
2237 unlock_task_sighand(p
, &flags
);
2246 * sys_tgkill - send signal to one specific thread
2247 * @tgid: the thread group ID of the thread
2248 * @pid: the PID of the thread
2249 * @sig: signal to be sent
2251 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2252 * exists but it's not belonging to the target process anymore. This
2253 * method solves the problem of threads exiting and PIDs getting reused.
2255 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2257 /* This is only valid for single tasks */
2258 if (pid
<= 0 || tgid
<= 0)
2261 return do_tkill(tgid
, pid
, sig
);
2265 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2268 sys_tkill(int pid
, int sig
)
2270 /* This is only valid for single tasks */
2274 return do_tkill(0, pid
, sig
);
2278 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2282 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2285 /* Not even root can pretend to send signals from the kernel.
2286 Nor can they impersonate a kill(), which adds source info. */
2287 if (info
.si_code
>= 0)
2289 info
.si_signo
= sig
;
2291 /* POSIX.1b doesn't mention process groups. */
2292 return kill_proc_info(sig
, &info
, pid
);
2295 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2297 struct task_struct
*t
= current
;
2298 struct k_sigaction
*k
;
2301 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2304 k
= &t
->sighand
->action
[sig
-1];
2306 spin_lock_irq(¤t
->sighand
->siglock
);
2311 sigdelsetmask(&act
->sa
.sa_mask
,
2312 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2316 * "Setting a signal action to SIG_IGN for a signal that is
2317 * pending shall cause the pending signal to be discarded,
2318 * whether or not it is blocked."
2320 * "Setting a signal action to SIG_DFL for a signal that is
2321 * pending and whose default action is to ignore the signal
2322 * (for example, SIGCHLD), shall cause the pending signal to
2323 * be discarded, whether or not it is blocked"
2325 if (__sig_ignored(t
, sig
)) {
2327 sigaddset(&mask
, sig
);
2328 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2330 rm_from_queue_full(&mask
, &t
->pending
);
2332 } while (t
!= current
);
2336 spin_unlock_irq(¤t
->sighand
->siglock
);
2341 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2347 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2348 oss
.ss_size
= current
->sas_ss_size
;
2349 oss
.ss_flags
= sas_ss_flags(sp
);
2358 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2359 || __get_user(ss_sp
, &uss
->ss_sp
)
2360 || __get_user(ss_flags
, &uss
->ss_flags
)
2361 || __get_user(ss_size
, &uss
->ss_size
))
2365 if (on_sig_stack(sp
))
2371 * Note - this code used to test ss_flags incorrectly
2372 * old code may have been written using ss_flags==0
2373 * to mean ss_flags==SS_ONSTACK (as this was the only
2374 * way that worked) - this fix preserves that older
2377 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2380 if (ss_flags
== SS_DISABLE
) {
2385 if (ss_size
< MINSIGSTKSZ
)
2389 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2390 current
->sas_ss_size
= ss_size
;
2395 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2404 #ifdef __ARCH_WANT_SYS_SIGPENDING
2407 sys_sigpending(old_sigset_t __user
*set
)
2409 return do_sigpending(set
, sizeof(*set
));
2414 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2415 /* Some platforms have their own version with special arguments others
2416 support only sys_rt_sigprocmask. */
2419 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2422 old_sigset_t old_set
, new_set
;
2426 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2428 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2430 spin_lock_irq(¤t
->sighand
->siglock
);
2431 old_set
= current
->blocked
.sig
[0];
2439 sigaddsetmask(¤t
->blocked
, new_set
);
2442 sigdelsetmask(¤t
->blocked
, new_set
);
2445 current
->blocked
.sig
[0] = new_set
;
2449 recalc_sigpending();
2450 spin_unlock_irq(¤t
->sighand
->siglock
);
2456 old_set
= current
->blocked
.sig
[0];
2459 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2466 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2468 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2470 sys_rt_sigaction(int sig
,
2471 const struct sigaction __user
*act
,
2472 struct sigaction __user
*oact
,
2475 struct k_sigaction new_sa
, old_sa
;
2478 /* XXX: Don't preclude handling different sized sigset_t's. */
2479 if (sigsetsize
!= sizeof(sigset_t
))
2483 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2487 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2490 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2496 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2498 #ifdef __ARCH_WANT_SYS_SGETMASK
2501 * For backwards compatibility. Functionality superseded by sigprocmask.
2507 return current
->blocked
.sig
[0];
2511 sys_ssetmask(int newmask
)
2515 spin_lock_irq(¤t
->sighand
->siglock
);
2516 old
= current
->blocked
.sig
[0];
2518 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2520 recalc_sigpending();
2521 spin_unlock_irq(¤t
->sighand
->siglock
);
2525 #endif /* __ARCH_WANT_SGETMASK */
2527 #ifdef __ARCH_WANT_SYS_SIGNAL
2529 * For backwards compatibility. Functionality superseded by sigaction.
2531 asmlinkage
unsigned long
2532 sys_signal(int sig
, __sighandler_t handler
)
2534 struct k_sigaction new_sa
, old_sa
;
2537 new_sa
.sa
.sa_handler
= handler
;
2538 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2539 sigemptyset(&new_sa
.sa
.sa_mask
);
2541 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2543 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2545 #endif /* __ARCH_WANT_SYS_SIGNAL */
2547 #ifdef __ARCH_WANT_SYS_PAUSE
2552 current
->state
= TASK_INTERRUPTIBLE
;
2554 return -ERESTARTNOHAND
;
2559 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2560 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2564 /* XXX: Don't preclude handling different sized sigset_t's. */
2565 if (sigsetsize
!= sizeof(sigset_t
))
2568 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2570 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2572 spin_lock_irq(¤t
->sighand
->siglock
);
2573 current
->saved_sigmask
= current
->blocked
;
2574 current
->blocked
= newset
;
2575 recalc_sigpending();
2576 spin_unlock_irq(¤t
->sighand
->siglock
);
2578 current
->state
= TASK_INTERRUPTIBLE
;
2580 set_restore_sigmask();
2581 return -ERESTARTNOHAND
;
2583 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2585 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2590 void __init
signals_init(void)
2592 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);