2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache
*sigqueue_cachep
;
44 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
46 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
49 static int sig_handler_ignored(void __user
*handler
, int sig
)
51 /* Is it explicitly or implicitly ignored? */
52 return handler
== SIG_IGN
||
53 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
56 static int sig_ignored(struct task_struct
*t
, int sig
)
61 * Blocked signals are never ignored, since the
62 * signal handler may change by the time it is
65 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
68 handler
= sig_handler(t
, sig
);
69 if (!sig_handler_ignored(handler
, sig
))
73 * Tracers may want to know about even ignored signals.
75 return !tracehook_consider_ignored_signal(t
, sig
, handler
);
79 * Re-calculate pending state from the set of locally pending
80 * signals, globally pending signals, and blocked signals.
82 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
87 switch (_NSIG_WORDS
) {
89 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
90 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
93 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
94 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
95 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
96 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
99 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
100 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
103 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
108 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
110 static int recalc_sigpending_tsk(struct task_struct
*t
)
112 if (t
->signal
->group_stop_count
> 0 ||
113 PENDING(&t
->pending
, &t
->blocked
) ||
114 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
115 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
119 * We must never clear the flag in another thread, or in current
120 * when it's possible the current syscall is returning -ERESTART*.
121 * So we don't clear it here, and only callers who know they should do.
127 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128 * This is superfluous when called on current, the wakeup is a harmless no-op.
130 void recalc_sigpending_and_wake(struct task_struct
*t
)
132 if (recalc_sigpending_tsk(t
))
133 signal_wake_up(t
, 0);
136 void recalc_sigpending(void)
138 if (unlikely(tracehook_force_sigpending()))
139 set_thread_flag(TIF_SIGPENDING
);
140 else if (!recalc_sigpending_tsk(current
) && !freezing(current
))
141 clear_thread_flag(TIF_SIGPENDING
);
145 /* Given the mask, find the first available signal that should be serviced. */
147 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
149 unsigned long i
, *s
, *m
, x
;
152 s
= pending
->signal
.sig
;
154 switch (_NSIG_WORDS
) {
156 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
157 if ((x
= *s
&~ *m
) != 0) {
158 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
163 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
165 else if ((x
= s
[1] &~ m
[1]) != 0)
172 case 1: if ((x
= *s
&~ *m
) != 0)
180 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
183 struct sigqueue
*q
= NULL
;
184 struct user_struct
*user
;
187 * In order to avoid problems with "switch_user()", we want to make
188 * sure that the compiler doesn't re-load "t->user"
192 atomic_inc(&user
->sigpending
);
193 if (override_rlimit
||
194 atomic_read(&user
->sigpending
) <=
195 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
196 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
197 if (unlikely(q
== NULL
)) {
198 atomic_dec(&user
->sigpending
);
200 INIT_LIST_HEAD(&q
->list
);
202 q
->user
= get_uid(user
);
207 static void __sigqueue_free(struct sigqueue
*q
)
209 if (q
->flags
& SIGQUEUE_PREALLOC
)
211 atomic_dec(&q
->user
->sigpending
);
213 kmem_cache_free(sigqueue_cachep
, q
);
216 void flush_sigqueue(struct sigpending
*queue
)
220 sigemptyset(&queue
->signal
);
221 while (!list_empty(&queue
->list
)) {
222 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
223 list_del_init(&q
->list
);
229 * Flush all pending signals for a task.
231 void flush_signals(struct task_struct
*t
)
235 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
236 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
237 flush_sigqueue(&t
->pending
);
238 flush_sigqueue(&t
->signal
->shared_pending
);
239 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
242 static void __flush_itimer_signals(struct sigpending
*pending
)
244 sigset_t signal
, retain
;
245 struct sigqueue
*q
, *n
;
247 signal
= pending
->signal
;
248 sigemptyset(&retain
);
250 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
251 int sig
= q
->info
.si_signo
;
253 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
254 sigaddset(&retain
, sig
);
256 sigdelset(&signal
, sig
);
257 list_del_init(&q
->list
);
262 sigorsets(&pending
->signal
, &signal
, &retain
);
265 void flush_itimer_signals(void)
267 struct task_struct
*tsk
= current
;
270 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
271 __flush_itimer_signals(&tsk
->pending
);
272 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
273 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
276 void ignore_signals(struct task_struct
*t
)
280 for (i
= 0; i
< _NSIG
; ++i
)
281 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
287 * Flush all handlers for a task.
291 flush_signal_handlers(struct task_struct
*t
, int force_default
)
294 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
295 for (i
= _NSIG
; i
!= 0 ; i
--) {
296 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
297 ka
->sa
.sa_handler
= SIG_DFL
;
299 sigemptyset(&ka
->sa
.sa_mask
);
304 int unhandled_signal(struct task_struct
*tsk
, int sig
)
306 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
307 if (is_global_init(tsk
))
309 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
311 return !tracehook_consider_fatal_signal(tsk
, sig
, handler
);
315 /* Notify the system that a driver wants to block all signals for this
316 * process, and wants to be notified if any signals at all were to be
317 * sent/acted upon. If the notifier routine returns non-zero, then the
318 * signal will be acted upon after all. If the notifier routine returns 0,
319 * then then signal will be blocked. Only one block per process is
320 * allowed. priv is a pointer to private data that the notifier routine
321 * can use to determine if the signal should be blocked or not. */
324 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
328 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
329 current
->notifier_mask
= mask
;
330 current
->notifier_data
= priv
;
331 current
->notifier
= notifier
;
332 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
335 /* Notify the system that blocking has ended. */
338 unblock_all_signals(void)
342 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
343 current
->notifier
= NULL
;
344 current
->notifier_data
= NULL
;
346 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
349 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
351 struct sigqueue
*q
, *first
= NULL
;
354 * Collect the siginfo appropriate to this signal. Check if
355 * there is another siginfo for the same signal.
357 list_for_each_entry(q
, &list
->list
, list
) {
358 if (q
->info
.si_signo
== sig
) {
365 sigdelset(&list
->signal
, sig
);
369 list_del_init(&first
->list
);
370 copy_siginfo(info
, &first
->info
);
371 __sigqueue_free(first
);
373 /* Ok, it wasn't in the queue. This must be
374 a fast-pathed signal or we must have been
375 out of queue space. So zero out the info.
377 info
->si_signo
= sig
;
385 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
388 int sig
= next_signal(pending
, mask
);
391 if (current
->notifier
) {
392 if (sigismember(current
->notifier_mask
, sig
)) {
393 if (!(current
->notifier
)(current
->notifier_data
)) {
394 clear_thread_flag(TIF_SIGPENDING
);
400 collect_signal(sig
, pending
, info
);
407 * Dequeue a signal and return the element to the caller, which is
408 * expected to free it.
410 * All callers have to hold the siglock.
412 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
416 /* We only dequeue private signals from ourselves, we don't let
417 * signalfd steal them
419 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
421 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
426 * itimers are process shared and we restart periodic
427 * itimers in the signal delivery path to prevent DoS
428 * attacks in the high resolution timer case. This is
429 * compliant with the old way of self restarting
430 * itimers, as the SIGALRM is a legacy signal and only
431 * queued once. Changing the restart behaviour to
432 * restart the timer in the signal dequeue path is
433 * reducing the timer noise on heavy loaded !highres
436 if (unlikely(signr
== SIGALRM
)) {
437 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
439 if (!hrtimer_is_queued(tmr
) &&
440 tsk
->signal
->it_real_incr
.tv64
!= 0) {
441 hrtimer_forward(tmr
, tmr
->base
->get_time(),
442 tsk
->signal
->it_real_incr
);
443 hrtimer_restart(tmr
);
452 if (unlikely(sig_kernel_stop(signr
))) {
454 * Set a marker that we have dequeued a stop signal. Our
455 * caller might release the siglock and then the pending
456 * stop signal it is about to process is no longer in the
457 * pending bitmasks, but must still be cleared by a SIGCONT
458 * (and overruled by a SIGKILL). So those cases clear this
459 * shared flag after we've set it. Note that this flag may
460 * remain set after the signal we return is ignored or
461 * handled. That doesn't matter because its only purpose
462 * is to alert stop-signal processing code when another
463 * processor has come along and cleared the flag.
465 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
467 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
469 * Release the siglock to ensure proper locking order
470 * of timer locks outside of siglocks. Note, we leave
471 * irqs disabled here, since the posix-timers code is
472 * about to disable them again anyway.
474 spin_unlock(&tsk
->sighand
->siglock
);
475 do_schedule_next_timer(info
);
476 spin_lock(&tsk
->sighand
->siglock
);
482 * Tell a process that it has a new active signal..
484 * NOTE! we rely on the previous spin_lock to
485 * lock interrupts for us! We can only be called with
486 * "siglock" held, and the local interrupt must
487 * have been disabled when that got acquired!
489 * No need to set need_resched since signal event passing
490 * goes through ->blocked
492 void signal_wake_up(struct task_struct
*t
, int resume
)
496 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
499 * For SIGKILL, we want to wake it up in the stopped/traced/killable
500 * case. We don't check t->state here because there is a race with it
501 * executing another processor and just now entering stopped state.
502 * By using wake_up_state, we ensure the process will wake up and
503 * handle its death signal.
505 mask
= TASK_INTERRUPTIBLE
;
507 mask
|= TASK_WAKEKILL
;
508 if (!wake_up_state(t
, mask
))
513 * Remove signals in mask from the pending set and queue.
514 * Returns 1 if any signals were found.
516 * All callers must be holding the siglock.
518 * This version takes a sigset mask and looks at all signals,
519 * not just those in the first mask word.
521 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
523 struct sigqueue
*q
, *n
;
526 sigandsets(&m
, mask
, &s
->signal
);
527 if (sigisemptyset(&m
))
530 signandsets(&s
->signal
, &s
->signal
, mask
);
531 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
532 if (sigismember(mask
, q
->info
.si_signo
)) {
533 list_del_init(&q
->list
);
540 * Remove signals in mask from the pending set and queue.
541 * Returns 1 if any signals were found.
543 * All callers must be holding the siglock.
545 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
547 struct sigqueue
*q
, *n
;
549 if (!sigtestsetmask(&s
->signal
, mask
))
552 sigdelsetmask(&s
->signal
, mask
);
553 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
554 if (q
->info
.si_signo
< SIGRTMIN
&&
555 (mask
& sigmask(q
->info
.si_signo
))) {
556 list_del_init(&q
->list
);
564 * Bad permissions for sending the signal
566 static int check_kill_permission(int sig
, struct siginfo
*info
,
567 struct task_struct
*t
)
572 if (!valid_signal(sig
))
575 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
578 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
582 if ((current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
) &&
583 (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
) &&
584 !capable(CAP_KILL
)) {
587 sid
= task_session(t
);
589 * We don't return the error if sid == NULL. The
590 * task was unhashed, the caller must notice this.
592 if (!sid
|| sid
== task_session(current
))
599 return security_task_kill(t
, info
, sig
, 0);
603 * Handle magic process-wide effects of stop/continue signals. Unlike
604 * the signal actions, these happen immediately at signal-generation
605 * time regardless of blocking, ignoring, or handling. This does the
606 * actual continuing for SIGCONT, but not the actual stopping for stop
607 * signals. The process stop is done as a signal action for SIG_DFL.
609 * Returns true if the signal should be actually delivered, otherwise
610 * it should be dropped.
612 static int prepare_signal(int sig
, struct task_struct
*p
)
614 struct signal_struct
*signal
= p
->signal
;
615 struct task_struct
*t
;
617 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
619 * The process is in the middle of dying, nothing to do.
621 } else if (sig_kernel_stop(sig
)) {
623 * This is a stop signal. Remove SIGCONT from all queues.
625 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
628 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
629 } while_each_thread(p
, t
);
630 } else if (sig
== SIGCONT
) {
633 * Remove all stop signals from all queues,
634 * and wake all threads.
636 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
640 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
642 * If there is a handler for SIGCONT, we must make
643 * sure that no thread returns to user mode before
644 * we post the signal, in case it was the only
645 * thread eligible to run the signal handler--then
646 * it must not do anything between resuming and
647 * running the handler. With the TIF_SIGPENDING
648 * flag set, the thread will pause and acquire the
649 * siglock that we hold now and until we've queued
650 * the pending signal.
652 * Wake up the stopped thread _after_ setting
655 state
= __TASK_STOPPED
;
656 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
657 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
658 state
|= TASK_INTERRUPTIBLE
;
660 wake_up_state(t
, state
);
661 } while_each_thread(p
, t
);
664 * Notify the parent with CLD_CONTINUED if we were stopped.
666 * If we were in the middle of a group stop, we pretend it
667 * was already finished, and then continued. Since SIGCHLD
668 * doesn't queue we report only CLD_STOPPED, as if the next
669 * CLD_CONTINUED was dropped.
672 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
673 why
|= SIGNAL_CLD_CONTINUED
;
674 else if (signal
->group_stop_count
)
675 why
|= SIGNAL_CLD_STOPPED
;
679 * The first thread which returns from finish_stop()
680 * will take ->siglock, notice SIGNAL_CLD_MASK, and
681 * notify its parent. See get_signal_to_deliver().
683 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
684 signal
->group_stop_count
= 0;
685 signal
->group_exit_code
= 0;
688 * We are not stopped, but there could be a stop
689 * signal in the middle of being processed after
690 * being removed from the queue. Clear that too.
692 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
696 return !sig_ignored(p
, sig
);
700 * Test if P wants to take SIG. After we've checked all threads with this,
701 * it's equivalent to finding no threads not blocking SIG. Any threads not
702 * blocking SIG were ruled out because they are not running and already
703 * have pending signals. Such threads will dequeue from the shared queue
704 * as soon as they're available, so putting the signal on the shared queue
705 * will be equivalent to sending it to one such thread.
707 static inline int wants_signal(int sig
, struct task_struct
*p
)
709 if (sigismember(&p
->blocked
, sig
))
711 if (p
->flags
& PF_EXITING
)
715 if (task_is_stopped_or_traced(p
))
717 return task_curr(p
) || !signal_pending(p
);
720 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
722 struct signal_struct
*signal
= p
->signal
;
723 struct task_struct
*t
;
726 * Now find a thread we can wake up to take the signal off the queue.
728 * If the main thread wants the signal, it gets first crack.
729 * Probably the least surprising to the average bear.
731 if (wants_signal(sig
, p
))
733 else if (!group
|| thread_group_empty(p
))
735 * There is just one thread and it does not need to be woken.
736 * It will dequeue unblocked signals before it runs again.
741 * Otherwise try to find a suitable thread.
743 t
= signal
->curr_target
;
744 while (!wants_signal(sig
, t
)) {
746 if (t
== signal
->curr_target
)
748 * No thread needs to be woken.
749 * Any eligible threads will see
750 * the signal in the queue soon.
754 signal
->curr_target
= t
;
758 * Found a killable thread. If the signal will be fatal,
759 * then start taking the whole group down immediately.
761 if (sig_fatal(p
, sig
) &&
762 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
763 !sigismember(&t
->real_blocked
, sig
) &&
765 !tracehook_consider_fatal_signal(t
, sig
, SIG_DFL
))) {
767 * This signal will be fatal to the whole group.
769 if (!sig_kernel_coredump(sig
)) {
771 * Start a group exit and wake everybody up.
772 * This way we don't have other threads
773 * running and doing things after a slower
774 * thread has the fatal signal pending.
776 signal
->flags
= SIGNAL_GROUP_EXIT
;
777 signal
->group_exit_code
= sig
;
778 signal
->group_stop_count
= 0;
781 sigaddset(&t
->pending
.signal
, SIGKILL
);
782 signal_wake_up(t
, 1);
783 } while_each_thread(p
, t
);
789 * The signal is already in the shared-pending queue.
790 * Tell the chosen thread to wake up and dequeue it.
792 signal_wake_up(t
, sig
== SIGKILL
);
796 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
798 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
801 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
804 struct sigpending
*pending
;
807 trace_sched_signal_send(sig
, t
);
809 assert_spin_locked(&t
->sighand
->siglock
);
810 if (!prepare_signal(sig
, t
))
813 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
815 * Short-circuit ignored signals and support queuing
816 * exactly one non-rt signal, so that we can get more
817 * detailed information about the cause of the signal.
819 if (legacy_queue(pending
, sig
))
822 * fast-pathed signals for kernel-internal things like SIGSTOP
825 if (info
== SEND_SIG_FORCED
)
828 /* Real-time signals must be queued if sent by sigqueue, or
829 some other real-time mechanism. It is implementation
830 defined whether kill() does so. We attempt to do so, on
831 the principle of least surprise, but since kill is not
832 allowed to fail with EAGAIN when low on memory we just
833 make sure at least one signal gets delivered and don't
834 pass on the info struct. */
836 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
837 (is_si_special(info
) ||
838 info
->si_code
>= 0)));
840 list_add_tail(&q
->list
, &pending
->list
);
841 switch ((unsigned long) info
) {
842 case (unsigned long) SEND_SIG_NOINFO
:
843 q
->info
.si_signo
= sig
;
844 q
->info
.si_errno
= 0;
845 q
->info
.si_code
= SI_USER
;
846 q
->info
.si_pid
= task_pid_vnr(current
);
847 q
->info
.si_uid
= current
->uid
;
849 case (unsigned long) SEND_SIG_PRIV
:
850 q
->info
.si_signo
= sig
;
851 q
->info
.si_errno
= 0;
852 q
->info
.si_code
= SI_KERNEL
;
857 copy_siginfo(&q
->info
, info
);
860 } else if (!is_si_special(info
)) {
861 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
863 * Queue overflow, abort. We may abort if the signal was rt
864 * and sent by user using something other than kill().
870 signalfd_notify(t
, sig
);
871 sigaddset(&pending
->signal
, sig
);
872 complete_signal(sig
, t
, group
);
876 int print_fatal_signals
;
878 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
880 printk("%s/%d: potentially unexpected fatal signal %d.\n",
881 current
->comm
, task_pid_nr(current
), signr
);
883 #if defined(__i386__) && !defined(__arch_um__)
884 printk("code at %08lx: ", regs
->ip
);
887 for (i
= 0; i
< 16; i
++) {
890 __get_user(insn
, (unsigned char *)(regs
->ip
+ i
));
891 printk("%02x ", insn
);
899 static int __init
setup_print_fatal_signals(char *str
)
901 get_option (&str
, &print_fatal_signals
);
906 __setup("print-fatal-signals=", setup_print_fatal_signals
);
909 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
911 return send_signal(sig
, info
, p
, 1);
915 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
917 return send_signal(sig
, info
, t
, 0);
921 * Force a signal that the process can't ignore: if necessary
922 * we unblock the signal and change any SIG_IGN to SIG_DFL.
924 * Note: If we unblock the signal, we always reset it to SIG_DFL,
925 * since we do not want to have a signal handler that was blocked
926 * be invoked when user space had explicitly blocked it.
928 * We don't want to have recursive SIGSEGV's etc, for example,
929 * that is why we also clear SIGNAL_UNKILLABLE.
932 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
934 unsigned long int flags
;
935 int ret
, blocked
, ignored
;
936 struct k_sigaction
*action
;
938 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
939 action
= &t
->sighand
->action
[sig
-1];
940 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
941 blocked
= sigismember(&t
->blocked
, sig
);
942 if (blocked
|| ignored
) {
943 action
->sa
.sa_handler
= SIG_DFL
;
945 sigdelset(&t
->blocked
, sig
);
946 recalc_sigpending_and_wake(t
);
949 if (action
->sa
.sa_handler
== SIG_DFL
)
950 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
951 ret
= specific_send_sig_info(sig
, info
, t
);
952 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
958 force_sig_specific(int sig
, struct task_struct
*t
)
960 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
964 * Nuke all other threads in the group.
966 void zap_other_threads(struct task_struct
*p
)
968 struct task_struct
*t
;
970 p
->signal
->group_stop_count
= 0;
972 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
974 * Don't bother with already dead threads
979 /* SIGKILL will be handled before any pending SIGSTOP */
980 sigaddset(&t
->pending
.signal
, SIGKILL
);
981 signal_wake_up(t
, 1);
985 int __fatal_signal_pending(struct task_struct
*tsk
)
987 return sigismember(&tsk
->pending
.signal
, SIGKILL
);
989 EXPORT_SYMBOL(__fatal_signal_pending
);
991 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
993 struct sighand_struct
*sighand
;
997 sighand
= rcu_dereference(tsk
->sighand
);
998 if (unlikely(sighand
== NULL
))
1001 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1002 if (likely(sighand
== tsk
->sighand
))
1004 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1011 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1013 unsigned long flags
;
1016 ret
= check_kill_permission(sig
, info
, p
);
1020 if (lock_task_sighand(p
, &flags
)) {
1021 ret
= __group_send_sig_info(sig
, info
, p
);
1022 unlock_task_sighand(p
, &flags
);
1030 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1031 * control characters do (^C, ^Z etc)
1034 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1036 struct task_struct
*p
= NULL
;
1037 int retval
, success
;
1041 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1042 int err
= group_send_sig_info(sig
, info
, p
);
1045 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1046 return success
? 0 : retval
;
1049 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1052 struct task_struct
*p
;
1056 p
= pid_task(pid
, PIDTYPE_PID
);
1058 error
= group_send_sig_info(sig
, info
, p
);
1059 if (unlikely(error
== -ESRCH
))
1061 * The task was unhashed in between, try again.
1062 * If it is dead, pid_task() will return NULL,
1063 * if we race with de_thread() it will find the
1074 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1078 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1083 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1084 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1085 uid_t uid
, uid_t euid
, u32 secid
)
1088 struct task_struct
*p
;
1090 if (!valid_signal(sig
))
1093 read_lock(&tasklist_lock
);
1094 p
= pid_task(pid
, PIDTYPE_PID
);
1099 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1100 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1101 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1105 ret
= security_task_kill(p
, info
, sig
, secid
);
1108 if (sig
&& p
->sighand
) {
1109 unsigned long flags
;
1110 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1111 ret
= __group_send_sig_info(sig
, info
, p
);
1112 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1115 read_unlock(&tasklist_lock
);
1118 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1121 * kill_something_info() interprets pid in interesting ways just like kill(2).
1123 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1124 * is probably wrong. Should make it like BSD or SYSV.
1127 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1133 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1138 read_lock(&tasklist_lock
);
1140 ret
= __kill_pgrp_info(sig
, info
,
1141 pid
? find_vpid(-pid
) : task_pgrp(current
));
1143 int retval
= 0, count
= 0;
1144 struct task_struct
* p
;
1146 for_each_process(p
) {
1147 if (task_pid_vnr(p
) > 1 &&
1148 !same_thread_group(p
, current
)) {
1149 int err
= group_send_sig_info(sig
, info
, p
);
1155 ret
= count
? retval
: -ESRCH
;
1157 read_unlock(&tasklist_lock
);
1163 * These are for backward compatibility with the rest of the kernel source.
1167 * The caller must ensure the task can't exit.
1170 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1173 unsigned long flags
;
1176 * Make sure legacy kernel users don't send in bad values
1177 * (normal paths check this in check_kill_permission).
1179 if (!valid_signal(sig
))
1182 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1183 ret
= specific_send_sig_info(sig
, info
, p
);
1184 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1188 #define __si_special(priv) \
1189 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1192 send_sig(int sig
, struct task_struct
*p
, int priv
)
1194 return send_sig_info(sig
, __si_special(priv
), p
);
1198 force_sig(int sig
, struct task_struct
*p
)
1200 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1204 * When things go south during signal handling, we
1205 * will force a SIGSEGV. And if the signal that caused
1206 * the problem was already a SIGSEGV, we'll want to
1207 * make sure we don't even try to deliver the signal..
1210 force_sigsegv(int sig
, struct task_struct
*p
)
1212 if (sig
== SIGSEGV
) {
1213 unsigned long flags
;
1214 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1215 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1216 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1218 force_sig(SIGSEGV
, p
);
1222 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1226 read_lock(&tasklist_lock
);
1227 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1228 read_unlock(&tasklist_lock
);
1232 EXPORT_SYMBOL(kill_pgrp
);
1234 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1236 return kill_pid_info(sig
, __si_special(priv
), pid
);
1238 EXPORT_SYMBOL(kill_pid
);
1241 * These functions support sending signals using preallocated sigqueue
1242 * structures. This is needed "because realtime applications cannot
1243 * afford to lose notifications of asynchronous events, like timer
1244 * expirations or I/O completions". In the case of Posix Timers
1245 * we allocate the sigqueue structure from the timer_create. If this
1246 * allocation fails we are able to report the failure to the application
1247 * with an EAGAIN error.
1250 struct sigqueue
*sigqueue_alloc(void)
1254 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1255 q
->flags
|= SIGQUEUE_PREALLOC
;
1259 void sigqueue_free(struct sigqueue
*q
)
1261 unsigned long flags
;
1262 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1264 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1266 * We must hold ->siglock while testing q->list
1267 * to serialize with collect_signal() or with
1268 * __exit_signal()->flush_sigqueue().
1270 spin_lock_irqsave(lock
, flags
);
1271 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1273 * If it is queued it will be freed when dequeued,
1274 * like the "regular" sigqueue.
1276 if (!list_empty(&q
->list
))
1278 spin_unlock_irqrestore(lock
, flags
);
1284 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1286 int sig
= q
->info
.si_signo
;
1287 struct sigpending
*pending
;
1288 unsigned long flags
;
1291 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1294 if (!likely(lock_task_sighand(t
, &flags
)))
1297 ret
= 1; /* the signal is ignored */
1298 if (!prepare_signal(sig
, t
))
1302 if (unlikely(!list_empty(&q
->list
))) {
1304 * If an SI_TIMER entry is already queue just increment
1305 * the overrun count.
1307 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1308 q
->info
.si_overrun
++;
1311 q
->info
.si_overrun
= 0;
1313 signalfd_notify(t
, sig
);
1314 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1315 list_add_tail(&q
->list
, &pending
->list
);
1316 sigaddset(&pending
->signal
, sig
);
1317 complete_signal(sig
, t
, group
);
1319 unlock_task_sighand(t
, &flags
);
1325 * Wake up any threads in the parent blocked in wait* syscalls.
1327 static inline void __wake_up_parent(struct task_struct
*p
,
1328 struct task_struct
*parent
)
1330 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1334 * Let a parent know about the death of a child.
1335 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1337 * Returns -1 if our parent ignored us and so we've switched to
1338 * self-reaping, or else @sig.
1340 int do_notify_parent(struct task_struct
*tsk
, int sig
)
1342 struct siginfo info
;
1343 unsigned long flags
;
1344 struct sighand_struct
*psig
;
1345 struct task_cputime cputime
;
1350 /* do_notify_parent_cldstop should have been called instead. */
1351 BUG_ON(task_is_stopped_or_traced(tsk
));
1353 BUG_ON(!tsk
->ptrace
&&
1354 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1356 info
.si_signo
= sig
;
1359 * we are under tasklist_lock here so our parent is tied to
1360 * us and cannot exit and release its namespace.
1362 * the only it can is to switch its nsproxy with sys_unshare,
1363 * bu uncharing pid namespaces is not allowed, so we'll always
1364 * see relevant namespace
1366 * write_lock() currently calls preempt_disable() which is the
1367 * same as rcu_read_lock(), but according to Oleg, this is not
1368 * correct to rely on this
1371 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1374 info
.si_uid
= tsk
->uid
;
1376 thread_group_cputime(tsk
, &cputime
);
1377 info
.si_utime
= cputime_to_jiffies(cputime
.utime
);
1378 info
.si_stime
= cputime_to_jiffies(cputime
.stime
);
1380 info
.si_status
= tsk
->exit_code
& 0x7f;
1381 if (tsk
->exit_code
& 0x80)
1382 info
.si_code
= CLD_DUMPED
;
1383 else if (tsk
->exit_code
& 0x7f)
1384 info
.si_code
= CLD_KILLED
;
1386 info
.si_code
= CLD_EXITED
;
1387 info
.si_status
= tsk
->exit_code
>> 8;
1390 psig
= tsk
->parent
->sighand
;
1391 spin_lock_irqsave(&psig
->siglock
, flags
);
1392 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1393 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1394 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1396 * We are exiting and our parent doesn't care. POSIX.1
1397 * defines special semantics for setting SIGCHLD to SIG_IGN
1398 * or setting the SA_NOCLDWAIT flag: we should be reaped
1399 * automatically and not left for our parent's wait4 call.
1400 * Rather than having the parent do it as a magic kind of
1401 * signal handler, we just set this to tell do_exit that we
1402 * can be cleaned up without becoming a zombie. Note that
1403 * we still call __wake_up_parent in this case, because a
1404 * blocked sys_wait4 might now return -ECHILD.
1406 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1407 * is implementation-defined: we do (if you don't want
1408 * it, just use SIG_IGN instead).
1410 ret
= tsk
->exit_signal
= -1;
1411 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1414 if (valid_signal(sig
) && sig
> 0)
1415 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1416 __wake_up_parent(tsk
, tsk
->parent
);
1417 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1422 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1424 struct siginfo info
;
1425 unsigned long flags
;
1426 struct task_struct
*parent
;
1427 struct sighand_struct
*sighand
;
1429 if (tsk
->ptrace
& PT_PTRACED
)
1430 parent
= tsk
->parent
;
1432 tsk
= tsk
->group_leader
;
1433 parent
= tsk
->real_parent
;
1436 info
.si_signo
= SIGCHLD
;
1439 * see comment in do_notify_parent() abot the following 3 lines
1442 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1445 info
.si_uid
= tsk
->uid
;
1447 info
.si_utime
= cputime_to_clock_t(tsk
->utime
);
1448 info
.si_stime
= cputime_to_clock_t(tsk
->stime
);
1453 info
.si_status
= SIGCONT
;
1456 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1459 info
.si_status
= tsk
->exit_code
& 0x7f;
1465 sighand
= parent
->sighand
;
1466 spin_lock_irqsave(&sighand
->siglock
, flags
);
1467 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1468 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1469 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1471 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1473 __wake_up_parent(tsk
, parent
);
1474 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1477 static inline int may_ptrace_stop(void)
1479 if (!likely(current
->ptrace
& PT_PTRACED
))
1482 * Are we in the middle of do_coredump?
1483 * If so and our tracer is also part of the coredump stopping
1484 * is a deadlock situation, and pointless because our tracer
1485 * is dead so don't allow us to stop.
1486 * If SIGKILL was already sent before the caller unlocked
1487 * ->siglock we must see ->core_state != NULL. Otherwise it
1488 * is safe to enter schedule().
1490 if (unlikely(current
->mm
->core_state
) &&
1491 unlikely(current
->mm
== current
->parent
->mm
))
1498 * Return nonzero if there is a SIGKILL that should be waking us up.
1499 * Called with the siglock held.
1501 static int sigkill_pending(struct task_struct
*tsk
)
1503 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1504 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1508 * This must be called with current->sighand->siglock held.
1510 * This should be the path for all ptrace stops.
1511 * We always set current->last_siginfo while stopped here.
1512 * That makes it a way to test a stopped process for
1513 * being ptrace-stopped vs being job-control-stopped.
1515 * If we actually decide not to stop at all because the tracer
1516 * is gone, we keep current->exit_code unless clear_code.
1518 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1520 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1522 * The arch code has something special to do before a
1523 * ptrace stop. This is allowed to block, e.g. for faults
1524 * on user stack pages. We can't keep the siglock while
1525 * calling arch_ptrace_stop, so we must release it now.
1526 * To preserve proper semantics, we must do this before
1527 * any signal bookkeeping like checking group_stop_count.
1528 * Meanwhile, a SIGKILL could come in before we retake the
1529 * siglock. That must prevent us from sleeping in TASK_TRACED.
1530 * So after regaining the lock, we must check for SIGKILL.
1532 spin_unlock_irq(¤t
->sighand
->siglock
);
1533 arch_ptrace_stop(exit_code
, info
);
1534 spin_lock_irq(¤t
->sighand
->siglock
);
1535 if (sigkill_pending(current
))
1540 * If there is a group stop in progress,
1541 * we must participate in the bookkeeping.
1543 if (current
->signal
->group_stop_count
> 0)
1544 --current
->signal
->group_stop_count
;
1546 current
->last_siginfo
= info
;
1547 current
->exit_code
= exit_code
;
1549 /* Let the debugger run. */
1550 __set_current_state(TASK_TRACED
);
1551 spin_unlock_irq(¤t
->sighand
->siglock
);
1552 read_lock(&tasklist_lock
);
1553 if (may_ptrace_stop()) {
1554 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1555 read_unlock(&tasklist_lock
);
1559 * By the time we got the lock, our tracer went away.
1560 * Don't drop the lock yet, another tracer may come.
1562 __set_current_state(TASK_RUNNING
);
1564 current
->exit_code
= 0;
1565 read_unlock(&tasklist_lock
);
1569 * While in TASK_TRACED, we were considered "frozen enough".
1570 * Now that we woke up, it's crucial if we're supposed to be
1571 * frozen that we freeze now before running anything substantial.
1576 * We are back. Now reacquire the siglock before touching
1577 * last_siginfo, so that we are sure to have synchronized with
1578 * any signal-sending on another CPU that wants to examine it.
1580 spin_lock_irq(¤t
->sighand
->siglock
);
1581 current
->last_siginfo
= NULL
;
1584 * Queued signals ignored us while we were stopped for tracing.
1585 * So check for any that we should take before resuming user mode.
1586 * This sets TIF_SIGPENDING, but never clears it.
1588 recalc_sigpending_tsk(current
);
1591 void ptrace_notify(int exit_code
)
1595 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1597 memset(&info
, 0, sizeof info
);
1598 info
.si_signo
= SIGTRAP
;
1599 info
.si_code
= exit_code
;
1600 info
.si_pid
= task_pid_vnr(current
);
1601 info
.si_uid
= current
->uid
;
1603 /* Let the debugger run. */
1604 spin_lock_irq(¤t
->sighand
->siglock
);
1605 ptrace_stop(exit_code
, 1, &info
);
1606 spin_unlock_irq(¤t
->sighand
->siglock
);
1610 finish_stop(int stop_count
)
1613 * If there are no other threads in the group, or if there is
1614 * a group stop in progress and we are the last to stop,
1615 * report to the parent. When ptraced, every thread reports itself.
1617 if (tracehook_notify_jctl(stop_count
== 0, CLD_STOPPED
)) {
1618 read_lock(&tasklist_lock
);
1619 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1620 read_unlock(&tasklist_lock
);
1625 } while (try_to_freeze());
1627 * Now we don't run again until continued.
1629 current
->exit_code
= 0;
1633 * This performs the stopping for SIGSTOP and other stop signals.
1634 * We have to stop all threads in the thread group.
1635 * Returns nonzero if we've actually stopped and released the siglock.
1636 * Returns zero if we didn't stop and still hold the siglock.
1638 static int do_signal_stop(int signr
)
1640 struct signal_struct
*sig
= current
->signal
;
1643 if (sig
->group_stop_count
> 0) {
1645 * There is a group stop in progress. We don't need to
1646 * start another one.
1648 stop_count
= --sig
->group_stop_count
;
1650 struct task_struct
*t
;
1652 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
) ||
1653 unlikely(signal_group_exit(sig
)))
1656 * There is no group stop already in progress.
1657 * We must initiate one now.
1659 sig
->group_exit_code
= signr
;
1662 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1664 * Setting state to TASK_STOPPED for a group
1665 * stop is always done with the siglock held,
1666 * so this check has no races.
1668 if (!(t
->flags
& PF_EXITING
) &&
1669 !task_is_stopped_or_traced(t
)) {
1671 signal_wake_up(t
, 0);
1673 sig
->group_stop_count
= stop_count
;
1676 if (stop_count
== 0)
1677 sig
->flags
= SIGNAL_STOP_STOPPED
;
1678 current
->exit_code
= sig
->group_exit_code
;
1679 __set_current_state(TASK_STOPPED
);
1681 spin_unlock_irq(¤t
->sighand
->siglock
);
1682 finish_stop(stop_count
);
1686 static int ptrace_signal(int signr
, siginfo_t
*info
,
1687 struct pt_regs
*regs
, void *cookie
)
1689 if (!(current
->ptrace
& PT_PTRACED
))
1692 ptrace_signal_deliver(regs
, cookie
);
1694 /* Let the debugger run. */
1695 ptrace_stop(signr
, 0, info
);
1697 /* We're back. Did the debugger cancel the sig? */
1698 signr
= current
->exit_code
;
1702 current
->exit_code
= 0;
1704 /* Update the siginfo structure if the signal has
1705 changed. If the debugger wanted something
1706 specific in the siginfo structure then it should
1707 have updated *info via PTRACE_SETSIGINFO. */
1708 if (signr
!= info
->si_signo
) {
1709 info
->si_signo
= signr
;
1711 info
->si_code
= SI_USER
;
1712 info
->si_pid
= task_pid_vnr(current
->parent
);
1713 info
->si_uid
= current
->parent
->uid
;
1716 /* If the (new) signal is now blocked, requeue it. */
1717 if (sigismember(¤t
->blocked
, signr
)) {
1718 specific_send_sig_info(signr
, info
, current
);
1725 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1726 struct pt_regs
*regs
, void *cookie
)
1728 struct sighand_struct
*sighand
= current
->sighand
;
1729 struct signal_struct
*signal
= current
->signal
;
1734 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1735 * While in TASK_STOPPED, we were considered "frozen enough".
1736 * Now that we woke up, it's crucial if we're supposed to be
1737 * frozen that we freeze now before running anything substantial.
1741 spin_lock_irq(&sighand
->siglock
);
1743 * Every stopped thread goes here after wakeup. Check to see if
1744 * we should notify the parent, prepare_signal(SIGCONT) encodes
1745 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1747 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1748 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1749 ? CLD_CONTINUED
: CLD_STOPPED
;
1750 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1751 spin_unlock_irq(&sighand
->siglock
);
1753 if (unlikely(!tracehook_notify_jctl(1, why
)))
1756 read_lock(&tasklist_lock
);
1757 do_notify_parent_cldstop(current
->group_leader
, why
);
1758 read_unlock(&tasklist_lock
);
1763 struct k_sigaction
*ka
;
1765 if (unlikely(signal
->group_stop_count
> 0) &&
1770 * Tracing can induce an artifical signal and choose sigaction.
1771 * The return value in @signr determines the default action,
1772 * but @info->si_signo is the signal number we will report.
1774 signr
= tracehook_get_signal(current
, regs
, info
, return_ka
);
1775 if (unlikely(signr
< 0))
1777 if (unlikely(signr
!= 0))
1780 signr
= dequeue_signal(current
, ¤t
->blocked
,
1784 break; /* will return 0 */
1786 if (signr
!= SIGKILL
) {
1787 signr
= ptrace_signal(signr
, info
,
1793 ka
= &sighand
->action
[signr
-1];
1796 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1798 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1799 /* Run the handler. */
1802 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1803 ka
->sa
.sa_handler
= SIG_DFL
;
1805 break; /* will return non-zero "signr" value */
1809 * Now we are doing the default action for this signal.
1811 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1815 * Global init gets no signals it doesn't want.
1817 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1818 !signal_group_exit(signal
))
1821 if (sig_kernel_stop(signr
)) {
1823 * The default action is to stop all threads in
1824 * the thread group. The job control signals
1825 * do nothing in an orphaned pgrp, but SIGSTOP
1826 * always works. Note that siglock needs to be
1827 * dropped during the call to is_orphaned_pgrp()
1828 * because of lock ordering with tasklist_lock.
1829 * This allows an intervening SIGCONT to be posted.
1830 * We need to check for that and bail out if necessary.
1832 if (signr
!= SIGSTOP
) {
1833 spin_unlock_irq(&sighand
->siglock
);
1835 /* signals can be posted during this window */
1837 if (is_current_pgrp_orphaned())
1840 spin_lock_irq(&sighand
->siglock
);
1843 if (likely(do_signal_stop(info
->si_signo
))) {
1844 /* It released the siglock. */
1849 * We didn't actually stop, due to a race
1850 * with SIGCONT or something like that.
1855 spin_unlock_irq(&sighand
->siglock
);
1858 * Anything else is fatal, maybe with a core dump.
1860 current
->flags
|= PF_SIGNALED
;
1862 if (sig_kernel_coredump(signr
)) {
1863 if (print_fatal_signals
)
1864 print_fatal_signal(regs
, info
->si_signo
);
1866 * If it was able to dump core, this kills all
1867 * other threads in the group and synchronizes with
1868 * their demise. If we lost the race with another
1869 * thread getting here, it set group_exit_code
1870 * first and our do_group_exit call below will use
1871 * that value and ignore the one we pass it.
1873 do_coredump(info
->si_signo
, info
->si_signo
, regs
);
1877 * Death signals, no core dump.
1879 do_group_exit(info
->si_signo
);
1882 spin_unlock_irq(&sighand
->siglock
);
1886 void exit_signals(struct task_struct
*tsk
)
1889 struct task_struct
*t
;
1891 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1892 tsk
->flags
|= PF_EXITING
;
1896 spin_lock_irq(&tsk
->sighand
->siglock
);
1898 * From now this task is not visible for group-wide signals,
1899 * see wants_signal(), do_signal_stop().
1901 tsk
->flags
|= PF_EXITING
;
1902 if (!signal_pending(tsk
))
1905 /* It could be that __group_complete_signal() choose us to
1906 * notify about group-wide signal. Another thread should be
1907 * woken now to take the signal since we will not.
1909 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1910 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1911 recalc_sigpending_and_wake(t
);
1913 if (unlikely(tsk
->signal
->group_stop_count
) &&
1914 !--tsk
->signal
->group_stop_count
) {
1915 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1919 spin_unlock_irq(&tsk
->sighand
->siglock
);
1921 if (unlikely(group_stop
) && tracehook_notify_jctl(1, CLD_STOPPED
)) {
1922 read_lock(&tasklist_lock
);
1923 do_notify_parent_cldstop(tsk
, CLD_STOPPED
);
1924 read_unlock(&tasklist_lock
);
1928 EXPORT_SYMBOL(recalc_sigpending
);
1929 EXPORT_SYMBOL_GPL(dequeue_signal
);
1930 EXPORT_SYMBOL(flush_signals
);
1931 EXPORT_SYMBOL(force_sig
);
1932 EXPORT_SYMBOL(send_sig
);
1933 EXPORT_SYMBOL(send_sig_info
);
1934 EXPORT_SYMBOL(sigprocmask
);
1935 EXPORT_SYMBOL(block_all_signals
);
1936 EXPORT_SYMBOL(unblock_all_signals
);
1940 * System call entry points.
1943 asmlinkage
long sys_restart_syscall(void)
1945 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1946 return restart
->fn(restart
);
1949 long do_no_restart_syscall(struct restart_block
*param
)
1955 * We don't need to get the kernel lock - this is all local to this
1956 * particular thread.. (and that's good, because this is _heavily_
1957 * used by various programs)
1961 * This is also useful for kernel threads that want to temporarily
1962 * (or permanently) block certain signals.
1964 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1965 * interface happily blocks "unblockable" signals like SIGKILL
1968 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1972 spin_lock_irq(¤t
->sighand
->siglock
);
1974 *oldset
= current
->blocked
;
1979 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1982 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1985 current
->blocked
= *set
;
1990 recalc_sigpending();
1991 spin_unlock_irq(¤t
->sighand
->siglock
);
1997 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1999 int error
= -EINVAL
;
2000 sigset_t old_set
, new_set
;
2002 /* XXX: Don't preclude handling different sized sigset_t's. */
2003 if (sigsetsize
!= sizeof(sigset_t
))
2008 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2010 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2012 error
= sigprocmask(how
, &new_set
, &old_set
);
2018 spin_lock_irq(¤t
->sighand
->siglock
);
2019 old_set
= current
->blocked
;
2020 spin_unlock_irq(¤t
->sighand
->siglock
);
2024 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2032 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2034 long error
= -EINVAL
;
2037 if (sigsetsize
> sizeof(sigset_t
))
2040 spin_lock_irq(¤t
->sighand
->siglock
);
2041 sigorsets(&pending
, ¤t
->pending
.signal
,
2042 ¤t
->signal
->shared_pending
.signal
);
2043 spin_unlock_irq(¤t
->sighand
->siglock
);
2045 /* Outside the lock because only this thread touches it. */
2046 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2049 if (!copy_to_user(set
, &pending
, sigsetsize
))
2057 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2059 return do_sigpending(set
, sigsetsize
);
2062 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2064 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2068 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2070 if (from
->si_code
< 0)
2071 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2074 * If you change siginfo_t structure, please be sure
2075 * this code is fixed accordingly.
2076 * Please remember to update the signalfd_copyinfo() function
2077 * inside fs/signalfd.c too, in case siginfo_t changes.
2078 * It should never copy any pad contained in the structure
2079 * to avoid security leaks, but must copy the generic
2080 * 3 ints plus the relevant union member.
2082 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2083 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2084 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2085 switch (from
->si_code
& __SI_MASK
) {
2087 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2088 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2091 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2092 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2093 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2096 err
|= __put_user(from
->si_band
, &to
->si_band
);
2097 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2100 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2101 #ifdef __ARCH_SI_TRAPNO
2102 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2106 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2107 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2108 err
|= __put_user(from
->si_status
, &to
->si_status
);
2109 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2110 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2112 case __SI_RT
: /* This is not generated by the kernel as of now. */
2113 case __SI_MESGQ
: /* But this is */
2114 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2115 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2116 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2118 default: /* this is just in case for now ... */
2119 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2120 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2129 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2130 siginfo_t __user
*uinfo
,
2131 const struct timespec __user
*uts
,
2140 /* XXX: Don't preclude handling different sized sigset_t's. */
2141 if (sigsetsize
!= sizeof(sigset_t
))
2144 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2148 * Invert the set of allowed signals to get those we
2151 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2155 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2157 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2162 spin_lock_irq(¤t
->sighand
->siglock
);
2163 sig
= dequeue_signal(current
, &these
, &info
);
2165 timeout
= MAX_SCHEDULE_TIMEOUT
;
2167 timeout
= (timespec_to_jiffies(&ts
)
2168 + (ts
.tv_sec
|| ts
.tv_nsec
));
2171 /* None ready -- temporarily unblock those we're
2172 * interested while we are sleeping in so that we'll
2173 * be awakened when they arrive. */
2174 current
->real_blocked
= current
->blocked
;
2175 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2176 recalc_sigpending();
2177 spin_unlock_irq(¤t
->sighand
->siglock
);
2179 timeout
= schedule_timeout_interruptible(timeout
);
2181 spin_lock_irq(¤t
->sighand
->siglock
);
2182 sig
= dequeue_signal(current
, &these
, &info
);
2183 current
->blocked
= current
->real_blocked
;
2184 siginitset(¤t
->real_blocked
, 0);
2185 recalc_sigpending();
2188 spin_unlock_irq(¤t
->sighand
->siglock
);
2193 if (copy_siginfo_to_user(uinfo
, &info
))
2206 sys_kill(pid_t pid
, int sig
)
2208 struct siginfo info
;
2210 info
.si_signo
= sig
;
2212 info
.si_code
= SI_USER
;
2213 info
.si_pid
= task_tgid_vnr(current
);
2214 info
.si_uid
= current
->uid
;
2216 return kill_something_info(sig
, &info
, pid
);
2219 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2222 struct siginfo info
;
2223 struct task_struct
*p
;
2224 unsigned long flags
;
2227 info
.si_signo
= sig
;
2229 info
.si_code
= SI_TKILL
;
2230 info
.si_pid
= task_tgid_vnr(current
);
2231 info
.si_uid
= current
->uid
;
2234 p
= find_task_by_vpid(pid
);
2235 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2236 error
= check_kill_permission(sig
, &info
, p
);
2238 * The null signal is a permissions and process existence
2239 * probe. No signal is actually delivered.
2241 * If lock_task_sighand() fails we pretend the task dies
2242 * after receiving the signal. The window is tiny, and the
2243 * signal is private anyway.
2245 if (!error
&& sig
&& lock_task_sighand(p
, &flags
)) {
2246 error
= specific_send_sig_info(sig
, &info
, p
);
2247 unlock_task_sighand(p
, &flags
);
2256 * sys_tgkill - send signal to one specific thread
2257 * @tgid: the thread group ID of the thread
2258 * @pid: the PID of the thread
2259 * @sig: signal to be sent
2261 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2262 * exists but it's not belonging to the target process anymore. This
2263 * method solves the problem of threads exiting and PIDs getting reused.
2265 asmlinkage
long sys_tgkill(pid_t tgid
, pid_t pid
, int sig
)
2267 /* This is only valid for single tasks */
2268 if (pid
<= 0 || tgid
<= 0)
2271 return do_tkill(tgid
, pid
, sig
);
2275 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2278 sys_tkill(pid_t pid
, int sig
)
2280 /* This is only valid for single tasks */
2284 return do_tkill(0, pid
, sig
);
2288 sys_rt_sigqueueinfo(pid_t pid
, int sig
, siginfo_t __user
*uinfo
)
2292 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2295 /* Not even root can pretend to send signals from the kernel.
2296 Nor can they impersonate a kill(), which adds source info. */
2297 if (info
.si_code
>= 0)
2299 info
.si_signo
= sig
;
2301 /* POSIX.1b doesn't mention process groups. */
2302 return kill_proc_info(sig
, &info
, pid
);
2305 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2307 struct task_struct
*t
= current
;
2308 struct k_sigaction
*k
;
2311 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2314 k
= &t
->sighand
->action
[sig
-1];
2316 spin_lock_irq(¤t
->sighand
->siglock
);
2321 sigdelsetmask(&act
->sa
.sa_mask
,
2322 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2326 * "Setting a signal action to SIG_IGN for a signal that is
2327 * pending shall cause the pending signal to be discarded,
2328 * whether or not it is blocked."
2330 * "Setting a signal action to SIG_DFL for a signal that is
2331 * pending and whose default action is to ignore the signal
2332 * (for example, SIGCHLD), shall cause the pending signal to
2333 * be discarded, whether or not it is blocked"
2335 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
2337 sigaddset(&mask
, sig
);
2338 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2340 rm_from_queue_full(&mask
, &t
->pending
);
2342 } while (t
!= current
);
2346 spin_unlock_irq(¤t
->sighand
->siglock
);
2351 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2357 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2358 oss
.ss_size
= current
->sas_ss_size
;
2359 oss
.ss_flags
= sas_ss_flags(sp
);
2368 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2369 || __get_user(ss_sp
, &uss
->ss_sp
)
2370 || __get_user(ss_flags
, &uss
->ss_flags
)
2371 || __get_user(ss_size
, &uss
->ss_size
))
2375 if (on_sig_stack(sp
))
2381 * Note - this code used to test ss_flags incorrectly
2382 * old code may have been written using ss_flags==0
2383 * to mean ss_flags==SS_ONSTACK (as this was the only
2384 * way that worked) - this fix preserves that older
2387 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2390 if (ss_flags
== SS_DISABLE
) {
2395 if (ss_size
< MINSIGSTKSZ
)
2399 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2400 current
->sas_ss_size
= ss_size
;
2405 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2414 #ifdef __ARCH_WANT_SYS_SIGPENDING
2417 sys_sigpending(old_sigset_t __user
*set
)
2419 return do_sigpending(set
, sizeof(*set
));
2424 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2425 /* Some platforms have their own version with special arguments others
2426 support only sys_rt_sigprocmask. */
2429 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2432 old_sigset_t old_set
, new_set
;
2436 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2438 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2440 spin_lock_irq(¤t
->sighand
->siglock
);
2441 old_set
= current
->blocked
.sig
[0];
2449 sigaddsetmask(¤t
->blocked
, new_set
);
2452 sigdelsetmask(¤t
->blocked
, new_set
);
2455 current
->blocked
.sig
[0] = new_set
;
2459 recalc_sigpending();
2460 spin_unlock_irq(¤t
->sighand
->siglock
);
2466 old_set
= current
->blocked
.sig
[0];
2469 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2476 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2478 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2480 sys_rt_sigaction(int sig
,
2481 const struct sigaction __user
*act
,
2482 struct sigaction __user
*oact
,
2485 struct k_sigaction new_sa
, old_sa
;
2488 /* XXX: Don't preclude handling different sized sigset_t's. */
2489 if (sigsetsize
!= sizeof(sigset_t
))
2493 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2497 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2500 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2506 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2508 #ifdef __ARCH_WANT_SYS_SGETMASK
2511 * For backwards compatibility. Functionality superseded by sigprocmask.
2517 return current
->blocked
.sig
[0];
2521 sys_ssetmask(int newmask
)
2525 spin_lock_irq(¤t
->sighand
->siglock
);
2526 old
= current
->blocked
.sig
[0];
2528 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2530 recalc_sigpending();
2531 spin_unlock_irq(¤t
->sighand
->siglock
);
2535 #endif /* __ARCH_WANT_SGETMASK */
2537 #ifdef __ARCH_WANT_SYS_SIGNAL
2539 * For backwards compatibility. Functionality superseded by sigaction.
2541 asmlinkage
unsigned long
2542 sys_signal(int sig
, __sighandler_t handler
)
2544 struct k_sigaction new_sa
, old_sa
;
2547 new_sa
.sa
.sa_handler
= handler
;
2548 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2549 sigemptyset(&new_sa
.sa
.sa_mask
);
2551 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2553 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2555 #endif /* __ARCH_WANT_SYS_SIGNAL */
2557 #ifdef __ARCH_WANT_SYS_PAUSE
2562 current
->state
= TASK_INTERRUPTIBLE
;
2564 return -ERESTARTNOHAND
;
2569 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2574 /* XXX: Don't preclude handling different sized sigset_t's. */
2575 if (sigsetsize
!= sizeof(sigset_t
))
2578 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2580 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2582 spin_lock_irq(¤t
->sighand
->siglock
);
2583 current
->saved_sigmask
= current
->blocked
;
2584 current
->blocked
= newset
;
2585 recalc_sigpending();
2586 spin_unlock_irq(¤t
->sighand
->siglock
);
2588 current
->state
= TASK_INTERRUPTIBLE
;
2590 set_restore_sigmask();
2591 return -ERESTARTNOHAND
;
2593 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2595 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2600 void __init
signals_init(void)
2602 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);