2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h" /* audit_signal_info() */
41 * SLAB caches for signal bits.
44 static struct kmem_cache
*sigqueue_cachep
;
46 int print_fatal_signals __read_mostly
;
48 static void __user
*sig_handler(struct task_struct
*t
, int sig
)
50 return t
->sighand
->action
[sig
- 1].sa
.sa_handler
;
53 static int sig_handler_ignored(void __user
*handler
, int sig
)
55 /* Is it explicitly or implicitly ignored? */
56 return handler
== SIG_IGN
||
57 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
60 static int sig_task_ignored(struct task_struct
*t
, int sig
,
65 handler
= sig_handler(t
, sig
);
67 if (unlikely(t
->signal
->flags
& SIGNAL_UNKILLABLE
) &&
68 handler
== SIG_DFL
&& !from_ancestor_ns
)
71 return sig_handler_ignored(handler
, sig
);
74 static int sig_ignored(struct task_struct
*t
, int sig
, int from_ancestor_ns
)
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
81 if (sigismember(&t
->blocked
, sig
) || sigismember(&t
->real_blocked
, sig
))
84 if (!sig_task_ignored(t
, sig
, from_ancestor_ns
))
88 * Tracers may want to know about even ignored signals.
90 return !tracehook_consider_ignored_signal(t
, sig
);
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
97 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
102 switch (_NSIG_WORDS
) {
104 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
105 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
108 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
109 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
110 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
111 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
114 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
115 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
118 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
125 static int recalc_sigpending_tsk(struct task_struct
*t
)
127 if (t
->signal
->group_stop_count
> 0 ||
128 PENDING(&t
->pending
, &t
->blocked
) ||
129 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
130 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
145 void recalc_sigpending_and_wake(struct task_struct
*t
)
147 if (recalc_sigpending_tsk(t
))
148 signal_wake_up(t
, 0);
151 void recalc_sigpending(void)
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING
);
155 else if (!recalc_sigpending_tsk(current
) && !freezing(current
))
156 clear_thread_flag(TIF_SIGPENDING
);
160 /* Given the mask, find the first available signal that should be serviced. */
162 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
164 unsigned long i
, *s
, *m
, x
;
167 s
= pending
->signal
.sig
;
169 switch (_NSIG_WORDS
) {
171 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
172 if ((x
= *s
&~ *m
) != 0) {
173 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
178 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
180 else if ((x
= s
[1] &~ m
[1]) != 0)
187 case 1: if ((x
= *s
&~ *m
) != 0)
195 static inline void print_dropped_signal(int sig
)
197 static DEFINE_RATELIMIT_STATE(ratelimit_state
, 5 * HZ
, 10);
199 if (!print_fatal_signals
)
202 if (!__ratelimit(&ratelimit_state
))
205 printk(KERN_INFO
"%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current
->comm
, current
->pid
, sig
);
210 * allocate a new signal queue record
211 * - this may be called without locks if and only if t == current, otherwise an
212 * appopriate lock must be held to stop the target task from exiting
214 static struct sigqueue
*
215 __sigqueue_alloc(int sig
, struct task_struct
*t
, gfp_t flags
, int override_rlimit
)
217 struct sigqueue
*q
= NULL
;
218 struct user_struct
*user
;
221 * We won't get problems with the target's UID changing under us
222 * because changing it requires RCU be used, and if t != current, the
223 * caller must be holding the RCU readlock (by way of a spinlock) and
224 * we use RCU protection here
226 user
= get_uid(__task_cred(t
)->user
);
227 atomic_inc(&user
->sigpending
);
229 if (override_rlimit
||
230 atomic_read(&user
->sigpending
) <=
231 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
) {
232 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
234 print_dropped_signal(sig
);
237 if (unlikely(q
== NULL
)) {
238 atomic_dec(&user
->sigpending
);
241 INIT_LIST_HEAD(&q
->list
);
249 static void __sigqueue_free(struct sigqueue
*q
)
251 if (q
->flags
& SIGQUEUE_PREALLOC
)
253 atomic_dec(&q
->user
->sigpending
);
255 kmem_cache_free(sigqueue_cachep
, q
);
258 void flush_sigqueue(struct sigpending
*queue
)
262 sigemptyset(&queue
->signal
);
263 while (!list_empty(&queue
->list
)) {
264 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
265 list_del_init(&q
->list
);
271 * Flush all pending signals for a task.
273 void __flush_signals(struct task_struct
*t
)
275 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
276 flush_sigqueue(&t
->pending
);
277 flush_sigqueue(&t
->signal
->shared_pending
);
280 void flush_signals(struct task_struct
*t
)
284 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
286 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
289 static void __flush_itimer_signals(struct sigpending
*pending
)
291 sigset_t signal
, retain
;
292 struct sigqueue
*q
, *n
;
294 signal
= pending
->signal
;
295 sigemptyset(&retain
);
297 list_for_each_entry_safe(q
, n
, &pending
->list
, list
) {
298 int sig
= q
->info
.si_signo
;
300 if (likely(q
->info
.si_code
!= SI_TIMER
)) {
301 sigaddset(&retain
, sig
);
303 sigdelset(&signal
, sig
);
304 list_del_init(&q
->list
);
309 sigorsets(&pending
->signal
, &signal
, &retain
);
312 void flush_itimer_signals(void)
314 struct task_struct
*tsk
= current
;
317 spin_lock_irqsave(&tsk
->sighand
->siglock
, flags
);
318 __flush_itimer_signals(&tsk
->pending
);
319 __flush_itimer_signals(&tsk
->signal
->shared_pending
);
320 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, flags
);
323 void ignore_signals(struct task_struct
*t
)
327 for (i
= 0; i
< _NSIG
; ++i
)
328 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
334 * Flush all handlers for a task.
338 flush_signal_handlers(struct task_struct
*t
, int force_default
)
341 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
342 for (i
= _NSIG
; i
!= 0 ; i
--) {
343 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
344 ka
->sa
.sa_handler
= SIG_DFL
;
346 sigemptyset(&ka
->sa
.sa_mask
);
351 int unhandled_signal(struct task_struct
*tsk
, int sig
)
353 void __user
*handler
= tsk
->sighand
->action
[sig
-1].sa
.sa_handler
;
354 if (is_global_init(tsk
))
356 if (handler
!= SIG_IGN
&& handler
!= SIG_DFL
)
358 return !tracehook_consider_fatal_signal(tsk
, sig
);
362 /* Notify the system that a driver wants to block all signals for this
363 * process, and wants to be notified if any signals at all were to be
364 * sent/acted upon. If the notifier routine returns non-zero, then the
365 * signal will be acted upon after all. If the notifier routine returns 0,
366 * then then signal will be blocked. Only one block per process is
367 * allowed. priv is a pointer to private data that the notifier routine
368 * can use to determine if the signal should be blocked or not. */
371 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
375 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
376 current
->notifier_mask
= mask
;
377 current
->notifier_data
= priv
;
378 current
->notifier
= notifier
;
379 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
382 /* Notify the system that blocking has ended. */
385 unblock_all_signals(void)
389 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
390 current
->notifier
= NULL
;
391 current
->notifier_data
= NULL
;
393 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
396 static void collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
398 struct sigqueue
*q
, *first
= NULL
;
401 * Collect the siginfo appropriate to this signal. Check if
402 * there is another siginfo for the same signal.
404 list_for_each_entry(q
, &list
->list
, list
) {
405 if (q
->info
.si_signo
== sig
) {
412 sigdelset(&list
->signal
, sig
);
416 list_del_init(&first
->list
);
417 copy_siginfo(info
, &first
->info
);
418 __sigqueue_free(first
);
420 /* Ok, it wasn't in the queue. This must be
421 a fast-pathed signal or we must have been
422 out of queue space. So zero out the info.
424 info
->si_signo
= sig
;
432 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
435 int sig
= next_signal(pending
, mask
);
438 if (current
->notifier
) {
439 if (sigismember(current
->notifier_mask
, sig
)) {
440 if (!(current
->notifier
)(current
->notifier_data
)) {
441 clear_thread_flag(TIF_SIGPENDING
);
447 collect_signal(sig
, pending
, info
);
454 * Dequeue a signal and return the element to the caller, which is
455 * expected to free it.
457 * All callers have to hold the siglock.
459 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
463 /* We only dequeue private signals from ourselves, we don't let
464 * signalfd steal them
466 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
468 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
473 * itimers are process shared and we restart periodic
474 * itimers in the signal delivery path to prevent DoS
475 * attacks in the high resolution timer case. This is
476 * compliant with the old way of self restarting
477 * itimers, as the SIGALRM is a legacy signal and only
478 * queued once. Changing the restart behaviour to
479 * restart the timer in the signal dequeue path is
480 * reducing the timer noise on heavy loaded !highres
483 if (unlikely(signr
== SIGALRM
)) {
484 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
486 if (!hrtimer_is_queued(tmr
) &&
487 tsk
->signal
->it_real_incr
.tv64
!= 0) {
488 hrtimer_forward(tmr
, tmr
->base
->get_time(),
489 tsk
->signal
->it_real_incr
);
490 hrtimer_restart(tmr
);
499 if (unlikely(sig_kernel_stop(signr
))) {
501 * Set a marker that we have dequeued a stop signal. Our
502 * caller might release the siglock and then the pending
503 * stop signal it is about to process is no longer in the
504 * pending bitmasks, but must still be cleared by a SIGCONT
505 * (and overruled by a SIGKILL). So those cases clear this
506 * shared flag after we've set it. Note that this flag may
507 * remain set after the signal we return is ignored or
508 * handled. That doesn't matter because its only purpose
509 * is to alert stop-signal processing code when another
510 * processor has come along and cleared the flag.
512 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
514 if ((info
->si_code
& __SI_MASK
) == __SI_TIMER
&& info
->si_sys_private
) {
516 * Release the siglock to ensure proper locking order
517 * of timer locks outside of siglocks. Note, we leave
518 * irqs disabled here, since the posix-timers code is
519 * about to disable them again anyway.
521 spin_unlock(&tsk
->sighand
->siglock
);
522 do_schedule_next_timer(info
);
523 spin_lock(&tsk
->sighand
->siglock
);
529 * Tell a process that it has a new active signal..
531 * NOTE! we rely on the previous spin_lock to
532 * lock interrupts for us! We can only be called with
533 * "siglock" held, and the local interrupt must
534 * have been disabled when that got acquired!
536 * No need to set need_resched since signal event passing
537 * goes through ->blocked
539 void signal_wake_up(struct task_struct
*t
, int resume
)
543 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
546 * For SIGKILL, we want to wake it up in the stopped/traced/killable
547 * case. We don't check t->state here because there is a race with it
548 * executing another processor and just now entering stopped state.
549 * By using wake_up_state, we ensure the process will wake up and
550 * handle its death signal.
552 mask
= TASK_INTERRUPTIBLE
;
554 mask
|= TASK_WAKEKILL
;
555 if (!wake_up_state(t
, mask
))
560 * Remove signals in mask from the pending set and queue.
561 * Returns 1 if any signals were found.
563 * All callers must be holding the siglock.
565 * This version takes a sigset mask and looks at all signals,
566 * not just those in the first mask word.
568 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
570 struct sigqueue
*q
, *n
;
573 sigandsets(&m
, mask
, &s
->signal
);
574 if (sigisemptyset(&m
))
577 signandsets(&s
->signal
, &s
->signal
, mask
);
578 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
579 if (sigismember(mask
, q
->info
.si_signo
)) {
580 list_del_init(&q
->list
);
587 * Remove signals in mask from the pending set and queue.
588 * Returns 1 if any signals were found.
590 * All callers must be holding the siglock.
592 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
594 struct sigqueue
*q
, *n
;
596 if (!sigtestsetmask(&s
->signal
, mask
))
599 sigdelsetmask(&s
->signal
, mask
);
600 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
601 if (q
->info
.si_signo
< SIGRTMIN
&&
602 (mask
& sigmask(q
->info
.si_signo
))) {
603 list_del_init(&q
->list
);
611 * Bad permissions for sending the signal
612 * - the caller must hold at least the RCU read lock
614 static int check_kill_permission(int sig
, struct siginfo
*info
,
615 struct task_struct
*t
)
617 const struct cred
*cred
= current_cred(), *tcred
;
621 if (!valid_signal(sig
))
624 if (info
!= SEND_SIG_NOINFO
&& (is_si_special(info
) || SI_FROMKERNEL(info
)))
627 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
631 tcred
= __task_cred(t
);
632 if ((cred
->euid
^ tcred
->suid
) &&
633 (cred
->euid
^ tcred
->uid
) &&
634 (cred
->uid
^ tcred
->suid
) &&
635 (cred
->uid
^ tcred
->uid
) &&
636 !capable(CAP_KILL
)) {
639 sid
= task_session(t
);
641 * We don't return the error if sid == NULL. The
642 * task was unhashed, the caller must notice this.
644 if (!sid
|| sid
== task_session(current
))
651 return security_task_kill(t
, info
, sig
, 0);
655 * Handle magic process-wide effects of stop/continue signals. Unlike
656 * the signal actions, these happen immediately at signal-generation
657 * time regardless of blocking, ignoring, or handling. This does the
658 * actual continuing for SIGCONT, but not the actual stopping for stop
659 * signals. The process stop is done as a signal action for SIG_DFL.
661 * Returns true if the signal should be actually delivered, otherwise
662 * it should be dropped.
664 static int prepare_signal(int sig
, struct task_struct
*p
, int from_ancestor_ns
)
666 struct signal_struct
*signal
= p
->signal
;
667 struct task_struct
*t
;
669 if (unlikely(signal
->flags
& SIGNAL_GROUP_EXIT
)) {
671 * The process is in the middle of dying, nothing to do.
673 } else if (sig_kernel_stop(sig
)) {
675 * This is a stop signal. Remove SIGCONT from all queues.
677 rm_from_queue(sigmask(SIGCONT
), &signal
->shared_pending
);
680 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
681 } while_each_thread(p
, t
);
682 } else if (sig
== SIGCONT
) {
685 * Remove all stop signals from all queues,
686 * and wake all threads.
688 rm_from_queue(SIG_KERNEL_STOP_MASK
, &signal
->shared_pending
);
692 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
694 * If there is a handler for SIGCONT, we must make
695 * sure that no thread returns to user mode before
696 * we post the signal, in case it was the only
697 * thread eligible to run the signal handler--then
698 * it must not do anything between resuming and
699 * running the handler. With the TIF_SIGPENDING
700 * flag set, the thread will pause and acquire the
701 * siglock that we hold now and until we've queued
702 * the pending signal.
704 * Wake up the stopped thread _after_ setting
707 state
= __TASK_STOPPED
;
708 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
709 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
710 state
|= TASK_INTERRUPTIBLE
;
712 wake_up_state(t
, state
);
713 } while_each_thread(p
, t
);
716 * Notify the parent with CLD_CONTINUED if we were stopped.
718 * If we were in the middle of a group stop, we pretend it
719 * was already finished, and then continued. Since SIGCHLD
720 * doesn't queue we report only CLD_STOPPED, as if the next
721 * CLD_CONTINUED was dropped.
724 if (signal
->flags
& SIGNAL_STOP_STOPPED
)
725 why
|= SIGNAL_CLD_CONTINUED
;
726 else if (signal
->group_stop_count
)
727 why
|= SIGNAL_CLD_STOPPED
;
731 * The first thread which returns from do_signal_stop()
732 * will take ->siglock, notice SIGNAL_CLD_MASK, and
733 * notify its parent. See get_signal_to_deliver().
735 signal
->flags
= why
| SIGNAL_STOP_CONTINUED
;
736 signal
->group_stop_count
= 0;
737 signal
->group_exit_code
= 0;
740 * We are not stopped, but there could be a stop
741 * signal in the middle of being processed after
742 * being removed from the queue. Clear that too.
744 signal
->flags
&= ~SIGNAL_STOP_DEQUEUED
;
748 return !sig_ignored(p
, sig
, from_ancestor_ns
);
752 * Test if P wants to take SIG. After we've checked all threads with this,
753 * it's equivalent to finding no threads not blocking SIG. Any threads not
754 * blocking SIG were ruled out because they are not running and already
755 * have pending signals. Such threads will dequeue from the shared queue
756 * as soon as they're available, so putting the signal on the shared queue
757 * will be equivalent to sending it to one such thread.
759 static inline int wants_signal(int sig
, struct task_struct
*p
)
761 if (sigismember(&p
->blocked
, sig
))
763 if (p
->flags
& PF_EXITING
)
767 if (task_is_stopped_or_traced(p
))
769 return task_curr(p
) || !signal_pending(p
);
772 static void complete_signal(int sig
, struct task_struct
*p
, int group
)
774 struct signal_struct
*signal
= p
->signal
;
775 struct task_struct
*t
;
778 * Now find a thread we can wake up to take the signal off the queue.
780 * If the main thread wants the signal, it gets first crack.
781 * Probably the least surprising to the average bear.
783 if (wants_signal(sig
, p
))
785 else if (!group
|| thread_group_empty(p
))
787 * There is just one thread and it does not need to be woken.
788 * It will dequeue unblocked signals before it runs again.
793 * Otherwise try to find a suitable thread.
795 t
= signal
->curr_target
;
796 while (!wants_signal(sig
, t
)) {
798 if (t
== signal
->curr_target
)
800 * No thread needs to be woken.
801 * Any eligible threads will see
802 * the signal in the queue soon.
806 signal
->curr_target
= t
;
810 * Found a killable thread. If the signal will be fatal,
811 * then start taking the whole group down immediately.
813 if (sig_fatal(p
, sig
) &&
814 !(signal
->flags
& (SIGNAL_UNKILLABLE
| SIGNAL_GROUP_EXIT
)) &&
815 !sigismember(&t
->real_blocked
, sig
) &&
817 !tracehook_consider_fatal_signal(t
, sig
))) {
819 * This signal will be fatal to the whole group.
821 if (!sig_kernel_coredump(sig
)) {
823 * Start a group exit and wake everybody up.
824 * This way we don't have other threads
825 * running and doing things after a slower
826 * thread has the fatal signal pending.
828 signal
->flags
= SIGNAL_GROUP_EXIT
;
829 signal
->group_exit_code
= sig
;
830 signal
->group_stop_count
= 0;
833 sigaddset(&t
->pending
.signal
, SIGKILL
);
834 signal_wake_up(t
, 1);
835 } while_each_thread(p
, t
);
841 * The signal is already in the shared-pending queue.
842 * Tell the chosen thread to wake up and dequeue it.
844 signal_wake_up(t
, sig
== SIGKILL
);
848 static inline int legacy_queue(struct sigpending
*signals
, int sig
)
850 return (sig
< SIGRTMIN
) && sigismember(&signals
->signal
, sig
);
853 static int __send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
854 int group
, int from_ancestor_ns
)
856 struct sigpending
*pending
;
860 trace_signal_generate(sig
, info
, t
);
862 assert_spin_locked(&t
->sighand
->siglock
);
864 if (!prepare_signal(sig
, t
, from_ancestor_ns
))
867 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
869 * Short-circuit ignored signals and support queuing
870 * exactly one non-rt signal, so that we can get more
871 * detailed information about the cause of the signal.
873 if (legacy_queue(pending
, sig
))
876 * fast-pathed signals for kernel-internal things like SIGSTOP
879 if (info
== SEND_SIG_FORCED
)
882 /* Real-time signals must be queued if sent by sigqueue, or
883 some other real-time mechanism. It is implementation
884 defined whether kill() does so. We attempt to do so, on
885 the principle of least surprise, but since kill is not
886 allowed to fail with EAGAIN when low on memory we just
887 make sure at least one signal gets delivered and don't
888 pass on the info struct. */
891 override_rlimit
= (is_si_special(info
) || info
->si_code
>= 0);
895 q
= __sigqueue_alloc(sig
, t
, GFP_ATOMIC
| __GFP_NOTRACK_FALSE_POSITIVE
,
898 list_add_tail(&q
->list
, &pending
->list
);
899 switch ((unsigned long) info
) {
900 case (unsigned long) SEND_SIG_NOINFO
:
901 q
->info
.si_signo
= sig
;
902 q
->info
.si_errno
= 0;
903 q
->info
.si_code
= SI_USER
;
904 q
->info
.si_pid
= task_tgid_nr_ns(current
,
905 task_active_pid_ns(t
));
906 q
->info
.si_uid
= current_uid();
908 case (unsigned long) SEND_SIG_PRIV
:
909 q
->info
.si_signo
= sig
;
910 q
->info
.si_errno
= 0;
911 q
->info
.si_code
= SI_KERNEL
;
916 copy_siginfo(&q
->info
, info
);
917 if (from_ancestor_ns
)
921 } else if (!is_si_special(info
)) {
922 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
) {
924 * Queue overflow, abort. We may abort if the
925 * signal was rt and sent by user using something
928 trace_signal_overflow_fail(sig
, group
, info
);
932 * This is a silent loss of information. We still
933 * send the signal, but the *info bits are lost.
935 trace_signal_lose_info(sig
, group
, info
);
940 signalfd_notify(t
, sig
);
941 sigaddset(&pending
->signal
, sig
);
942 complete_signal(sig
, t
, group
);
946 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
949 int from_ancestor_ns
= 0;
952 if (!is_si_special(info
) && SI_FROMUSER(info
) &&
953 task_pid_nr_ns(current
, task_active_pid_ns(t
)) <= 0)
954 from_ancestor_ns
= 1;
957 return __send_signal(sig
, info
, t
, group
, from_ancestor_ns
);
960 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
962 printk("%s/%d: potentially unexpected fatal signal %d.\n",
963 current
->comm
, task_pid_nr(current
), signr
);
965 #if defined(__i386__) && !defined(__arch_um__)
966 printk("code at %08lx: ", regs
->ip
);
969 for (i
= 0; i
< 16; i
++) {
972 __get_user(insn
, (unsigned char *)(regs
->ip
+ i
));
973 printk("%02x ", insn
);
983 static int __init
setup_print_fatal_signals(char *str
)
985 get_option (&str
, &print_fatal_signals
);
990 __setup("print-fatal-signals=", setup_print_fatal_signals
);
993 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
995 return send_signal(sig
, info
, p
, 1);
999 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1001 return send_signal(sig
, info
, t
, 0);
1004 int do_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
,
1007 unsigned long flags
;
1010 if (lock_task_sighand(p
, &flags
)) {
1011 ret
= send_signal(sig
, info
, p
, group
);
1012 unlock_task_sighand(p
, &flags
);
1019 * Force a signal that the process can't ignore: if necessary
1020 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1022 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1023 * since we do not want to have a signal handler that was blocked
1024 * be invoked when user space had explicitly blocked it.
1026 * We don't want to have recursive SIGSEGV's etc, for example,
1027 * that is why we also clear SIGNAL_UNKILLABLE.
1030 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
1032 unsigned long int flags
;
1033 int ret
, blocked
, ignored
;
1034 struct k_sigaction
*action
;
1036 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
1037 action
= &t
->sighand
->action
[sig
-1];
1038 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
1039 blocked
= sigismember(&t
->blocked
, sig
);
1040 if (blocked
|| ignored
) {
1041 action
->sa
.sa_handler
= SIG_DFL
;
1043 sigdelset(&t
->blocked
, sig
);
1044 recalc_sigpending_and_wake(t
);
1047 if (action
->sa
.sa_handler
== SIG_DFL
)
1048 t
->signal
->flags
&= ~SIGNAL_UNKILLABLE
;
1049 ret
= specific_send_sig_info(sig
, info
, t
);
1050 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
1056 force_sig_specific(int sig
, struct task_struct
*t
)
1058 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
1062 * Nuke all other threads in the group.
1064 void zap_other_threads(struct task_struct
*p
)
1066 struct task_struct
*t
;
1068 p
->signal
->group_stop_count
= 0;
1070 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1072 * Don't bother with already dead threads
1077 /* SIGKILL will be handled before any pending SIGSTOP */
1078 sigaddset(&t
->pending
.signal
, SIGKILL
);
1079 signal_wake_up(t
, 1);
1083 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
1085 struct sighand_struct
*sighand
;
1089 sighand
= rcu_dereference(tsk
->sighand
);
1090 if (unlikely(sighand
== NULL
))
1093 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1094 if (likely(sighand
== tsk
->sighand
))
1096 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1104 * send signal info to all the members of a group
1105 * - the caller must hold the RCU read lock at least
1107 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1109 int ret
= check_kill_permission(sig
, info
, p
);
1112 ret
= do_send_sig_info(sig
, info
, p
, true);
1118 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1119 * control characters do (^C, ^Z etc)
1120 * - the caller must hold at least a readlock on tasklist_lock
1122 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1124 struct task_struct
*p
= NULL
;
1125 int retval
, success
;
1129 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1130 int err
= group_send_sig_info(sig
, info
, p
);
1133 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1134 return success
? 0 : retval
;
1137 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1140 struct task_struct
*p
;
1144 p
= pid_task(pid
, PIDTYPE_PID
);
1146 error
= group_send_sig_info(sig
, info
, p
);
1147 if (unlikely(error
== -ESRCH
))
1149 * The task was unhashed in between, try again.
1150 * If it is dead, pid_task() will return NULL,
1151 * if we race with de_thread() it will find the
1162 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1166 error
= kill_pid_info(sig
, info
, find_vpid(pid
));
1171 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1172 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1173 uid_t uid
, uid_t euid
, u32 secid
)
1176 struct task_struct
*p
;
1177 const struct cred
*pcred
;
1179 if (!valid_signal(sig
))
1182 read_lock(&tasklist_lock
);
1183 p
= pid_task(pid
, PIDTYPE_PID
);
1188 pcred
= __task_cred(p
);
1189 if ((info
== SEND_SIG_NOINFO
||
1190 (!is_si_special(info
) && SI_FROMUSER(info
))) &&
1191 euid
!= pcred
->suid
&& euid
!= pcred
->uid
&&
1192 uid
!= pcred
->suid
&& uid
!= pcred
->uid
) {
1196 ret
= security_task_kill(p
, info
, sig
, secid
);
1199 if (sig
&& p
->sighand
) {
1200 unsigned long flags
;
1201 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1202 ret
= __send_signal(sig
, info
, p
, 1, 0);
1203 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1206 read_unlock(&tasklist_lock
);
1209 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1212 * kill_something_info() interprets pid in interesting ways just like kill(2).
1214 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1215 * is probably wrong. Should make it like BSD or SYSV.
1218 static int kill_something_info(int sig
, struct siginfo
*info
, pid_t pid
)
1224 ret
= kill_pid_info(sig
, info
, find_vpid(pid
));
1229 read_lock(&tasklist_lock
);
1231 ret
= __kill_pgrp_info(sig
, info
,
1232 pid
? find_vpid(-pid
) : task_pgrp(current
));
1234 int retval
= 0, count
= 0;
1235 struct task_struct
* p
;
1237 for_each_process(p
) {
1238 if (task_pid_vnr(p
) > 1 &&
1239 !same_thread_group(p
, current
)) {
1240 int err
= group_send_sig_info(sig
, info
, p
);
1246 ret
= count
? retval
: -ESRCH
;
1248 read_unlock(&tasklist_lock
);
1254 * These are for backward compatibility with the rest of the kernel source.
1258 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1261 * Make sure legacy kernel users don't send in bad values
1262 * (normal paths check this in check_kill_permission).
1264 if (!valid_signal(sig
))
1267 return do_send_sig_info(sig
, info
, p
, false);
1270 #define __si_special(priv) \
1271 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1274 send_sig(int sig
, struct task_struct
*p
, int priv
)
1276 return send_sig_info(sig
, __si_special(priv
), p
);
1280 force_sig(int sig
, struct task_struct
*p
)
1282 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1286 * When things go south during signal handling, we
1287 * will force a SIGSEGV. And if the signal that caused
1288 * the problem was already a SIGSEGV, we'll want to
1289 * make sure we don't even try to deliver the signal..
1292 force_sigsegv(int sig
, struct task_struct
*p
)
1294 if (sig
== SIGSEGV
) {
1295 unsigned long flags
;
1296 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1297 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1298 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1300 force_sig(SIGSEGV
, p
);
1304 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1308 read_lock(&tasklist_lock
);
1309 ret
= __kill_pgrp_info(sig
, __si_special(priv
), pid
);
1310 read_unlock(&tasklist_lock
);
1314 EXPORT_SYMBOL(kill_pgrp
);
1316 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1318 return kill_pid_info(sig
, __si_special(priv
), pid
);
1320 EXPORT_SYMBOL(kill_pid
);
1323 * These functions support sending signals using preallocated sigqueue
1324 * structures. This is needed "because realtime applications cannot
1325 * afford to lose notifications of asynchronous events, like timer
1326 * expirations or I/O completions". In the case of Posix Timers
1327 * we allocate the sigqueue structure from the timer_create. If this
1328 * allocation fails we are able to report the failure to the application
1329 * with an EAGAIN error.
1331 struct sigqueue
*sigqueue_alloc(void)
1333 struct sigqueue
*q
= __sigqueue_alloc(-1, current
, GFP_KERNEL
, 0);
1336 q
->flags
|= SIGQUEUE_PREALLOC
;
1341 void sigqueue_free(struct sigqueue
*q
)
1343 unsigned long flags
;
1344 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1346 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1348 * We must hold ->siglock while testing q->list
1349 * to serialize with collect_signal() or with
1350 * __exit_signal()->flush_sigqueue().
1352 spin_lock_irqsave(lock
, flags
);
1353 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1355 * If it is queued it will be freed when dequeued,
1356 * like the "regular" sigqueue.
1358 if (!list_empty(&q
->list
))
1360 spin_unlock_irqrestore(lock
, flags
);
1366 int send_sigqueue(struct sigqueue
*q
, struct task_struct
*t
, int group
)
1368 int sig
= q
->info
.si_signo
;
1369 struct sigpending
*pending
;
1370 unsigned long flags
;
1373 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1376 if (!likely(lock_task_sighand(t
, &flags
)))
1379 ret
= 1; /* the signal is ignored */
1380 if (!prepare_signal(sig
, t
, 0))
1384 if (unlikely(!list_empty(&q
->list
))) {
1386 * If an SI_TIMER entry is already queue just increment
1387 * the overrun count.
1389 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1390 q
->info
.si_overrun
++;
1393 q
->info
.si_overrun
= 0;
1395 signalfd_notify(t
, sig
);
1396 pending
= group
? &t
->signal
->shared_pending
: &t
->pending
;
1397 list_add_tail(&q
->list
, &pending
->list
);
1398 sigaddset(&pending
->signal
, sig
);
1399 complete_signal(sig
, t
, group
);
1401 unlock_task_sighand(t
, &flags
);
1407 * Let a parent know about the death of a child.
1408 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1410 * Returns -1 if our parent ignored us and so we've switched to
1411 * self-reaping, or else @sig.
1413 int do_notify_parent(struct task_struct
*tsk
, int sig
)
1415 struct siginfo info
;
1416 unsigned long flags
;
1417 struct sighand_struct
*psig
;
1422 /* do_notify_parent_cldstop should have been called instead. */
1423 BUG_ON(task_is_stopped_or_traced(tsk
));
1425 BUG_ON(!task_ptrace(tsk
) &&
1426 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1428 info
.si_signo
= sig
;
1431 * we are under tasklist_lock here so our parent is tied to
1432 * us and cannot exit and release its namespace.
1434 * the only it can is to switch its nsproxy with sys_unshare,
1435 * bu uncharing pid namespaces is not allowed, so we'll always
1436 * see relevant namespace
1438 * write_lock() currently calls preempt_disable() which is the
1439 * same as rcu_read_lock(), but according to Oleg, this is not
1440 * correct to rely on this
1443 info
.si_pid
= task_pid_nr_ns(tsk
, tsk
->parent
->nsproxy
->pid_ns
);
1444 info
.si_uid
= __task_cred(tsk
)->uid
;
1447 info
.si_utime
= cputime_to_clock_t(cputime_add(tsk
->utime
,
1448 tsk
->signal
->utime
));
1449 info
.si_stime
= cputime_to_clock_t(cputime_add(tsk
->stime
,
1450 tsk
->signal
->stime
));
1452 info
.si_status
= tsk
->exit_code
& 0x7f;
1453 if (tsk
->exit_code
& 0x80)
1454 info
.si_code
= CLD_DUMPED
;
1455 else if (tsk
->exit_code
& 0x7f)
1456 info
.si_code
= CLD_KILLED
;
1458 info
.si_code
= CLD_EXITED
;
1459 info
.si_status
= tsk
->exit_code
>> 8;
1462 psig
= tsk
->parent
->sighand
;
1463 spin_lock_irqsave(&psig
->siglock
, flags
);
1464 if (!task_ptrace(tsk
) && sig
== SIGCHLD
&&
1465 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1466 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1468 * We are exiting and our parent doesn't care. POSIX.1
1469 * defines special semantics for setting SIGCHLD to SIG_IGN
1470 * or setting the SA_NOCLDWAIT flag: we should be reaped
1471 * automatically and not left for our parent's wait4 call.
1472 * Rather than having the parent do it as a magic kind of
1473 * signal handler, we just set this to tell do_exit that we
1474 * can be cleaned up without becoming a zombie. Note that
1475 * we still call __wake_up_parent in this case, because a
1476 * blocked sys_wait4 might now return -ECHILD.
1478 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1479 * is implementation-defined: we do (if you don't want
1480 * it, just use SIG_IGN instead).
1482 ret
= tsk
->exit_signal
= -1;
1483 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1486 if (valid_signal(sig
) && sig
> 0)
1487 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1488 __wake_up_parent(tsk
, tsk
->parent
);
1489 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1494 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1496 struct siginfo info
;
1497 unsigned long flags
;
1498 struct task_struct
*parent
;
1499 struct sighand_struct
*sighand
;
1501 if (task_ptrace(tsk
))
1502 parent
= tsk
->parent
;
1504 tsk
= tsk
->group_leader
;
1505 parent
= tsk
->real_parent
;
1508 info
.si_signo
= SIGCHLD
;
1511 * see comment in do_notify_parent() abot the following 3 lines
1514 info
.si_pid
= task_pid_nr_ns(tsk
, parent
->nsproxy
->pid_ns
);
1515 info
.si_uid
= __task_cred(tsk
)->uid
;
1518 info
.si_utime
= cputime_to_clock_t(tsk
->utime
);
1519 info
.si_stime
= cputime_to_clock_t(tsk
->stime
);
1524 info
.si_status
= SIGCONT
;
1527 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1530 info
.si_status
= tsk
->exit_code
& 0x7f;
1536 sighand
= parent
->sighand
;
1537 spin_lock_irqsave(&sighand
->siglock
, flags
);
1538 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1539 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1540 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1542 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1544 __wake_up_parent(tsk
, parent
);
1545 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1548 static inline int may_ptrace_stop(void)
1550 if (!likely(task_ptrace(current
)))
1553 * Are we in the middle of do_coredump?
1554 * If so and our tracer is also part of the coredump stopping
1555 * is a deadlock situation, and pointless because our tracer
1556 * is dead so don't allow us to stop.
1557 * If SIGKILL was already sent before the caller unlocked
1558 * ->siglock we must see ->core_state != NULL. Otherwise it
1559 * is safe to enter schedule().
1561 if (unlikely(current
->mm
->core_state
) &&
1562 unlikely(current
->mm
== current
->parent
->mm
))
1569 * Return nonzero if there is a SIGKILL that should be waking us up.
1570 * Called with the siglock held.
1572 static int sigkill_pending(struct task_struct
*tsk
)
1574 return sigismember(&tsk
->pending
.signal
, SIGKILL
) ||
1575 sigismember(&tsk
->signal
->shared_pending
.signal
, SIGKILL
);
1579 * This must be called with current->sighand->siglock held.
1581 * This should be the path for all ptrace stops.
1582 * We always set current->last_siginfo while stopped here.
1583 * That makes it a way to test a stopped process for
1584 * being ptrace-stopped vs being job-control-stopped.
1586 * If we actually decide not to stop at all because the tracer
1587 * is gone, we keep current->exit_code unless clear_code.
1589 static void ptrace_stop(int exit_code
, int clear_code
, siginfo_t
*info
)
1591 if (arch_ptrace_stop_needed(exit_code
, info
)) {
1593 * The arch code has something special to do before a
1594 * ptrace stop. This is allowed to block, e.g. for faults
1595 * on user stack pages. We can't keep the siglock while
1596 * calling arch_ptrace_stop, so we must release it now.
1597 * To preserve proper semantics, we must do this before
1598 * any signal bookkeeping like checking group_stop_count.
1599 * Meanwhile, a SIGKILL could come in before we retake the
1600 * siglock. That must prevent us from sleeping in TASK_TRACED.
1601 * So after regaining the lock, we must check for SIGKILL.
1603 spin_unlock_irq(¤t
->sighand
->siglock
);
1604 arch_ptrace_stop(exit_code
, info
);
1605 spin_lock_irq(¤t
->sighand
->siglock
);
1606 if (sigkill_pending(current
))
1611 * If there is a group stop in progress,
1612 * we must participate in the bookkeeping.
1614 if (current
->signal
->group_stop_count
> 0)
1615 --current
->signal
->group_stop_count
;
1617 current
->last_siginfo
= info
;
1618 current
->exit_code
= exit_code
;
1620 /* Let the debugger run. */
1621 __set_current_state(TASK_TRACED
);
1622 spin_unlock_irq(¤t
->sighand
->siglock
);
1623 read_lock(&tasklist_lock
);
1624 if (may_ptrace_stop()) {
1625 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1627 * Don't want to allow preemption here, because
1628 * sys_ptrace() needs this task to be inactive.
1630 * XXX: implement read_unlock_no_resched().
1633 read_unlock(&tasklist_lock
);
1634 preempt_enable_no_resched();
1638 * By the time we got the lock, our tracer went away.
1639 * Don't drop the lock yet, another tracer may come.
1641 __set_current_state(TASK_RUNNING
);
1643 current
->exit_code
= 0;
1644 read_unlock(&tasklist_lock
);
1648 * While in TASK_TRACED, we were considered "frozen enough".
1649 * Now that we woke up, it's crucial if we're supposed to be
1650 * frozen that we freeze now before running anything substantial.
1655 * We are back. Now reacquire the siglock before touching
1656 * last_siginfo, so that we are sure to have synchronized with
1657 * any signal-sending on another CPU that wants to examine it.
1659 spin_lock_irq(¤t
->sighand
->siglock
);
1660 current
->last_siginfo
= NULL
;
1663 * Queued signals ignored us while we were stopped for tracing.
1664 * So check for any that we should take before resuming user mode.
1665 * This sets TIF_SIGPENDING, but never clears it.
1667 recalc_sigpending_tsk(current
);
1670 void ptrace_notify(int exit_code
)
1674 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1676 memset(&info
, 0, sizeof info
);
1677 info
.si_signo
= SIGTRAP
;
1678 info
.si_code
= exit_code
;
1679 info
.si_pid
= task_pid_vnr(current
);
1680 info
.si_uid
= current_uid();
1682 /* Let the debugger run. */
1683 spin_lock_irq(¤t
->sighand
->siglock
);
1684 ptrace_stop(exit_code
, 1, &info
);
1685 spin_unlock_irq(¤t
->sighand
->siglock
);
1689 * This performs the stopping for SIGSTOP and other stop signals.
1690 * We have to stop all threads in the thread group.
1691 * Returns nonzero if we've actually stopped and released the siglock.
1692 * Returns zero if we didn't stop and still hold the siglock.
1694 static int do_signal_stop(int signr
)
1696 struct signal_struct
*sig
= current
->signal
;
1699 if (!sig
->group_stop_count
) {
1700 struct task_struct
*t
;
1702 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
) ||
1703 unlikely(signal_group_exit(sig
)))
1706 * There is no group stop already in progress.
1707 * We must initiate one now.
1709 sig
->group_exit_code
= signr
;
1711 sig
->group_stop_count
= 1;
1712 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1714 * Setting state to TASK_STOPPED for a group
1715 * stop is always done with the siglock held,
1716 * so this check has no races.
1718 if (!(t
->flags
& PF_EXITING
) &&
1719 !task_is_stopped_or_traced(t
)) {
1720 sig
->group_stop_count
++;
1721 signal_wake_up(t
, 0);
1725 * If there are no other threads in the group, or if there is
1726 * a group stop in progress and we are the last to stop, report
1727 * to the parent. When ptraced, every thread reports itself.
1729 notify
= sig
->group_stop_count
== 1 ? CLD_STOPPED
: 0;
1730 notify
= tracehook_notify_jctl(notify
, CLD_STOPPED
);
1732 * tracehook_notify_jctl() can drop and reacquire siglock, so
1733 * we keep ->group_stop_count != 0 before the call. If SIGCONT
1734 * or SIGKILL comes in between ->group_stop_count == 0.
1736 if (sig
->group_stop_count
) {
1737 if (!--sig
->group_stop_count
)
1738 sig
->flags
= SIGNAL_STOP_STOPPED
;
1739 current
->exit_code
= sig
->group_exit_code
;
1740 __set_current_state(TASK_STOPPED
);
1742 spin_unlock_irq(¤t
->sighand
->siglock
);
1745 read_lock(&tasklist_lock
);
1746 do_notify_parent_cldstop(current
, notify
);
1747 read_unlock(&tasklist_lock
);
1750 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1753 } while (try_to_freeze());
1755 tracehook_finish_jctl();
1756 current
->exit_code
= 0;
1761 static int ptrace_signal(int signr
, siginfo_t
*info
,
1762 struct pt_regs
*regs
, void *cookie
)
1764 if (!task_ptrace(current
))
1767 ptrace_signal_deliver(regs
, cookie
);
1769 /* Let the debugger run. */
1770 ptrace_stop(signr
, 0, info
);
1772 /* We're back. Did the debugger cancel the sig? */
1773 signr
= current
->exit_code
;
1777 current
->exit_code
= 0;
1779 /* Update the siginfo structure if the signal has
1780 changed. If the debugger wanted something
1781 specific in the siginfo structure then it should
1782 have updated *info via PTRACE_SETSIGINFO. */
1783 if (signr
!= info
->si_signo
) {
1784 info
->si_signo
= signr
;
1786 info
->si_code
= SI_USER
;
1787 info
->si_pid
= task_pid_vnr(current
->parent
);
1788 info
->si_uid
= task_uid(current
->parent
);
1791 /* If the (new) signal is now blocked, requeue it. */
1792 if (sigismember(¤t
->blocked
, signr
)) {
1793 specific_send_sig_info(signr
, info
, current
);
1800 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1801 struct pt_regs
*regs
, void *cookie
)
1803 struct sighand_struct
*sighand
= current
->sighand
;
1804 struct signal_struct
*signal
= current
->signal
;
1809 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1810 * While in TASK_STOPPED, we were considered "frozen enough".
1811 * Now that we woke up, it's crucial if we're supposed to be
1812 * frozen that we freeze now before running anything substantial.
1816 spin_lock_irq(&sighand
->siglock
);
1818 * Every stopped thread goes here after wakeup. Check to see if
1819 * we should notify the parent, prepare_signal(SIGCONT) encodes
1820 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1822 if (unlikely(signal
->flags
& SIGNAL_CLD_MASK
)) {
1823 int why
= (signal
->flags
& SIGNAL_STOP_CONTINUED
)
1824 ? CLD_CONTINUED
: CLD_STOPPED
;
1825 signal
->flags
&= ~SIGNAL_CLD_MASK
;
1827 why
= tracehook_notify_jctl(why
, CLD_CONTINUED
);
1828 spin_unlock_irq(&sighand
->siglock
);
1831 read_lock(&tasklist_lock
);
1832 do_notify_parent_cldstop(current
->group_leader
, why
);
1833 read_unlock(&tasklist_lock
);
1839 struct k_sigaction
*ka
;
1841 if (unlikely(signal
->group_stop_count
> 0) &&
1846 * Tracing can induce an artifical signal and choose sigaction.
1847 * The return value in @signr determines the default action,
1848 * but @info->si_signo is the signal number we will report.
1850 signr
= tracehook_get_signal(current
, regs
, info
, return_ka
);
1851 if (unlikely(signr
< 0))
1853 if (unlikely(signr
!= 0))
1856 signr
= dequeue_signal(current
, ¤t
->blocked
,
1860 break; /* will return 0 */
1862 if (signr
!= SIGKILL
) {
1863 signr
= ptrace_signal(signr
, info
,
1869 ka
= &sighand
->action
[signr
-1];
1872 /* Trace actually delivered signals. */
1873 trace_signal_deliver(signr
, info
, ka
);
1875 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1877 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1878 /* Run the handler. */
1881 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1882 ka
->sa
.sa_handler
= SIG_DFL
;
1884 break; /* will return non-zero "signr" value */
1888 * Now we are doing the default action for this signal.
1890 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1894 * Global init gets no signals it doesn't want.
1895 * Container-init gets no signals it doesn't want from same
1898 * Note that if global/container-init sees a sig_kernel_only()
1899 * signal here, the signal must have been generated internally
1900 * or must have come from an ancestor namespace. In either
1901 * case, the signal cannot be dropped.
1903 if (unlikely(signal
->flags
& SIGNAL_UNKILLABLE
) &&
1904 !sig_kernel_only(signr
))
1907 if (sig_kernel_stop(signr
)) {
1909 * The default action is to stop all threads in
1910 * the thread group. The job control signals
1911 * do nothing in an orphaned pgrp, but SIGSTOP
1912 * always works. Note that siglock needs to be
1913 * dropped during the call to is_orphaned_pgrp()
1914 * because of lock ordering with tasklist_lock.
1915 * This allows an intervening SIGCONT to be posted.
1916 * We need to check for that and bail out if necessary.
1918 if (signr
!= SIGSTOP
) {
1919 spin_unlock_irq(&sighand
->siglock
);
1921 /* signals can be posted during this window */
1923 if (is_current_pgrp_orphaned())
1926 spin_lock_irq(&sighand
->siglock
);
1929 if (likely(do_signal_stop(info
->si_signo
))) {
1930 /* It released the siglock. */
1935 * We didn't actually stop, due to a race
1936 * with SIGCONT or something like that.
1941 spin_unlock_irq(&sighand
->siglock
);
1944 * Anything else is fatal, maybe with a core dump.
1946 current
->flags
|= PF_SIGNALED
;
1948 if (sig_kernel_coredump(signr
)) {
1949 if (print_fatal_signals
)
1950 print_fatal_signal(regs
, info
->si_signo
);
1952 * If it was able to dump core, this kills all
1953 * other threads in the group and synchronizes with
1954 * their demise. If we lost the race with another
1955 * thread getting here, it set group_exit_code
1956 * first and our do_group_exit call below will use
1957 * that value and ignore the one we pass it.
1959 do_coredump(info
->si_signo
, info
->si_signo
, regs
);
1963 * Death signals, no core dump.
1965 do_group_exit(info
->si_signo
);
1968 spin_unlock_irq(&sighand
->siglock
);
1972 void exit_signals(struct task_struct
*tsk
)
1975 struct task_struct
*t
;
1977 if (thread_group_empty(tsk
) || signal_group_exit(tsk
->signal
)) {
1978 tsk
->flags
|= PF_EXITING
;
1982 spin_lock_irq(&tsk
->sighand
->siglock
);
1984 * From now this task is not visible for group-wide signals,
1985 * see wants_signal(), do_signal_stop().
1987 tsk
->flags
|= PF_EXITING
;
1988 if (!signal_pending(tsk
))
1991 /* It could be that __group_complete_signal() choose us to
1992 * notify about group-wide signal. Another thread should be
1993 * woken now to take the signal since we will not.
1995 for (t
= tsk
; (t
= next_thread(t
)) != tsk
; )
1996 if (!signal_pending(t
) && !(t
->flags
& PF_EXITING
))
1997 recalc_sigpending_and_wake(t
);
1999 if (unlikely(tsk
->signal
->group_stop_count
) &&
2000 !--tsk
->signal
->group_stop_count
) {
2001 tsk
->signal
->flags
= SIGNAL_STOP_STOPPED
;
2002 group_stop
= tracehook_notify_jctl(CLD_STOPPED
, CLD_STOPPED
);
2005 spin_unlock_irq(&tsk
->sighand
->siglock
);
2007 if (unlikely(group_stop
)) {
2008 read_lock(&tasklist_lock
);
2009 do_notify_parent_cldstop(tsk
, group_stop
);
2010 read_unlock(&tasklist_lock
);
2014 EXPORT_SYMBOL(recalc_sigpending
);
2015 EXPORT_SYMBOL_GPL(dequeue_signal
);
2016 EXPORT_SYMBOL(flush_signals
);
2017 EXPORT_SYMBOL(force_sig
);
2018 EXPORT_SYMBOL(send_sig
);
2019 EXPORT_SYMBOL(send_sig_info
);
2020 EXPORT_SYMBOL(sigprocmask
);
2021 EXPORT_SYMBOL(block_all_signals
);
2022 EXPORT_SYMBOL(unblock_all_signals
);
2026 * System call entry points.
2029 SYSCALL_DEFINE0(restart_syscall
)
2031 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
2032 return restart
->fn(restart
);
2035 long do_no_restart_syscall(struct restart_block
*param
)
2041 * We don't need to get the kernel lock - this is all local to this
2042 * particular thread.. (and that's good, because this is _heavily_
2043 * used by various programs)
2047 * This is also useful for kernel threads that want to temporarily
2048 * (or permanently) block certain signals.
2050 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2051 * interface happily blocks "unblockable" signals like SIGKILL
2054 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2058 spin_lock_irq(¤t
->sighand
->siglock
);
2060 *oldset
= current
->blocked
;
2065 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
2068 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
2071 current
->blocked
= *set
;
2076 recalc_sigpending();
2077 spin_unlock_irq(¤t
->sighand
->siglock
);
2082 SYSCALL_DEFINE4(rt_sigprocmask
, int, how
, sigset_t __user
*, set
,
2083 sigset_t __user
*, oset
, size_t, sigsetsize
)
2085 int error
= -EINVAL
;
2086 sigset_t old_set
, new_set
;
2088 /* XXX: Don't preclude handling different sized sigset_t's. */
2089 if (sigsetsize
!= sizeof(sigset_t
))
2094 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2096 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2098 error
= sigprocmask(how
, &new_set
, &old_set
);
2104 spin_lock_irq(¤t
->sighand
->siglock
);
2105 old_set
= current
->blocked
;
2106 spin_unlock_irq(¤t
->sighand
->siglock
);
2110 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2118 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2120 long error
= -EINVAL
;
2123 if (sigsetsize
> sizeof(sigset_t
))
2126 spin_lock_irq(¤t
->sighand
->siglock
);
2127 sigorsets(&pending
, ¤t
->pending
.signal
,
2128 ¤t
->signal
->shared_pending
.signal
);
2129 spin_unlock_irq(¤t
->sighand
->siglock
);
2131 /* Outside the lock because only this thread touches it. */
2132 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2135 if (!copy_to_user(set
, &pending
, sigsetsize
))
2142 SYSCALL_DEFINE2(rt_sigpending
, sigset_t __user
*, set
, size_t, sigsetsize
)
2144 return do_sigpending(set
, sigsetsize
);
2147 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2149 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2153 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2155 if (from
->si_code
< 0)
2156 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2159 * If you change siginfo_t structure, please be sure
2160 * this code is fixed accordingly.
2161 * Please remember to update the signalfd_copyinfo() function
2162 * inside fs/signalfd.c too, in case siginfo_t changes.
2163 * It should never copy any pad contained in the structure
2164 * to avoid security leaks, but must copy the generic
2165 * 3 ints plus the relevant union member.
2167 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2168 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2169 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2170 switch (from
->si_code
& __SI_MASK
) {
2172 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2173 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2176 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2177 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2178 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2181 err
|= __put_user(from
->si_band
, &to
->si_band
);
2182 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2185 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2186 #ifdef __ARCH_SI_TRAPNO
2187 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2191 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2192 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2193 err
|= __put_user(from
->si_status
, &to
->si_status
);
2194 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2195 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2197 case __SI_RT
: /* This is not generated by the kernel as of now. */
2198 case __SI_MESGQ
: /* But this is */
2199 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2200 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2201 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2203 default: /* this is just in case for now ... */
2204 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2205 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2213 SYSCALL_DEFINE4(rt_sigtimedwait
, const sigset_t __user
*, uthese
,
2214 siginfo_t __user
*, uinfo
, const struct timespec __user
*, uts
,
2223 /* XXX: Don't preclude handling different sized sigset_t's. */
2224 if (sigsetsize
!= sizeof(sigset_t
))
2227 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2231 * Invert the set of allowed signals to get those we
2234 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2238 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2240 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2245 spin_lock_irq(¤t
->sighand
->siglock
);
2246 sig
= dequeue_signal(current
, &these
, &info
);
2248 timeout
= MAX_SCHEDULE_TIMEOUT
;
2250 timeout
= (timespec_to_jiffies(&ts
)
2251 + (ts
.tv_sec
|| ts
.tv_nsec
));
2254 /* None ready -- temporarily unblock those we're
2255 * interested while we are sleeping in so that we'll
2256 * be awakened when they arrive. */
2257 current
->real_blocked
= current
->blocked
;
2258 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2259 recalc_sigpending();
2260 spin_unlock_irq(¤t
->sighand
->siglock
);
2262 timeout
= schedule_timeout_interruptible(timeout
);
2264 spin_lock_irq(¤t
->sighand
->siglock
);
2265 sig
= dequeue_signal(current
, &these
, &info
);
2266 current
->blocked
= current
->real_blocked
;
2267 siginitset(¤t
->real_blocked
, 0);
2268 recalc_sigpending();
2271 spin_unlock_irq(¤t
->sighand
->siglock
);
2276 if (copy_siginfo_to_user(uinfo
, &info
))
2288 SYSCALL_DEFINE2(kill
, pid_t
, pid
, int, sig
)
2290 struct siginfo info
;
2292 info
.si_signo
= sig
;
2294 info
.si_code
= SI_USER
;
2295 info
.si_pid
= task_tgid_vnr(current
);
2296 info
.si_uid
= current_uid();
2298 return kill_something_info(sig
, &info
, pid
);
2302 do_send_specific(pid_t tgid
, pid_t pid
, int sig
, struct siginfo
*info
)
2304 struct task_struct
*p
;
2308 p
= find_task_by_vpid(pid
);
2309 if (p
&& (tgid
<= 0 || task_tgid_vnr(p
) == tgid
)) {
2310 error
= check_kill_permission(sig
, info
, p
);
2312 * The null signal is a permissions and process existence
2313 * probe. No signal is actually delivered.
2315 if (!error
&& sig
) {
2316 error
= do_send_sig_info(sig
, info
, p
, false);
2318 * If lock_task_sighand() failed we pretend the task
2319 * dies after receiving the signal. The window is tiny,
2320 * and the signal is private anyway.
2322 if (unlikely(error
== -ESRCH
))
2331 static int do_tkill(pid_t tgid
, pid_t pid
, int sig
)
2333 struct siginfo info
;
2335 info
.si_signo
= sig
;
2337 info
.si_code
= SI_TKILL
;
2338 info
.si_pid
= task_tgid_vnr(current
);
2339 info
.si_uid
= current_uid();
2341 return do_send_specific(tgid
, pid
, sig
, &info
);
2345 * sys_tgkill - send signal to one specific thread
2346 * @tgid: the thread group ID of the thread
2347 * @pid: the PID of the thread
2348 * @sig: signal to be sent
2350 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2351 * exists but it's not belonging to the target process anymore. This
2352 * method solves the problem of threads exiting and PIDs getting reused.
2354 SYSCALL_DEFINE3(tgkill
, pid_t
, tgid
, pid_t
, pid
, int, sig
)
2356 /* This is only valid for single tasks */
2357 if (pid
<= 0 || tgid
<= 0)
2360 return do_tkill(tgid
, pid
, sig
);
2364 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2366 SYSCALL_DEFINE2(tkill
, pid_t
, pid
, int, sig
)
2368 /* This is only valid for single tasks */
2372 return do_tkill(0, pid
, sig
);
2375 SYSCALL_DEFINE3(rt_sigqueueinfo
, pid_t
, pid
, int, sig
,
2376 siginfo_t __user
*, uinfo
)
2380 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2383 /* Not even root can pretend to send signals from the kernel.
2384 Nor can they impersonate a kill(), which adds source info. */
2385 if (info
.si_code
>= 0)
2387 info
.si_signo
= sig
;
2389 /* POSIX.1b doesn't mention process groups. */
2390 return kill_proc_info(sig
, &info
, pid
);
2393 long do_rt_tgsigqueueinfo(pid_t tgid
, pid_t pid
, int sig
, siginfo_t
*info
)
2395 /* This is only valid for single tasks */
2396 if (pid
<= 0 || tgid
<= 0)
2399 /* Not even root can pretend to send signals from the kernel.
2400 Nor can they impersonate a kill(), which adds source info. */
2401 if (info
->si_code
>= 0)
2403 info
->si_signo
= sig
;
2405 return do_send_specific(tgid
, pid
, sig
, info
);
2408 SYSCALL_DEFINE4(rt_tgsigqueueinfo
, pid_t
, tgid
, pid_t
, pid
, int, sig
,
2409 siginfo_t __user
*, uinfo
)
2413 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2416 return do_rt_tgsigqueueinfo(tgid
, pid
, sig
, &info
);
2419 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2421 struct task_struct
*t
= current
;
2422 struct k_sigaction
*k
;
2425 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2428 k
= &t
->sighand
->action
[sig
-1];
2430 spin_lock_irq(¤t
->sighand
->siglock
);
2435 sigdelsetmask(&act
->sa
.sa_mask
,
2436 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2440 * "Setting a signal action to SIG_IGN for a signal that is
2441 * pending shall cause the pending signal to be discarded,
2442 * whether or not it is blocked."
2444 * "Setting a signal action to SIG_DFL for a signal that is
2445 * pending and whose default action is to ignore the signal
2446 * (for example, SIGCHLD), shall cause the pending signal to
2447 * be discarded, whether or not it is blocked"
2449 if (sig_handler_ignored(sig_handler(t
, sig
), sig
)) {
2451 sigaddset(&mask
, sig
);
2452 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2454 rm_from_queue_full(&mask
, &t
->pending
);
2456 } while (t
!= current
);
2460 spin_unlock_irq(¤t
->sighand
->siglock
);
2465 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2470 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2471 oss
.ss_size
= current
->sas_ss_size
;
2472 oss
.ss_flags
= sas_ss_flags(sp
);
2480 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
)))
2482 error
= __get_user(ss_sp
, &uss
->ss_sp
) |
2483 __get_user(ss_flags
, &uss
->ss_flags
) |
2484 __get_user(ss_size
, &uss
->ss_size
);
2489 if (on_sig_stack(sp
))
2495 * Note - this code used to test ss_flags incorrectly
2496 * old code may have been written using ss_flags==0
2497 * to mean ss_flags==SS_ONSTACK (as this was the only
2498 * way that worked) - this fix preserves that older
2501 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2504 if (ss_flags
== SS_DISABLE
) {
2509 if (ss_size
< MINSIGSTKSZ
)
2513 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2514 current
->sas_ss_size
= ss_size
;
2520 if (!access_ok(VERIFY_WRITE
, uoss
, sizeof(*uoss
)))
2522 error
= __put_user(oss
.ss_sp
, &uoss
->ss_sp
) |
2523 __put_user(oss
.ss_size
, &uoss
->ss_size
) |
2524 __put_user(oss
.ss_flags
, &uoss
->ss_flags
);
2531 #ifdef __ARCH_WANT_SYS_SIGPENDING
2533 SYSCALL_DEFINE1(sigpending
, old_sigset_t __user
*, set
)
2535 return do_sigpending(set
, sizeof(*set
));
2540 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2541 /* Some platforms have their own version with special arguments others
2542 support only sys_rt_sigprocmask. */
2544 SYSCALL_DEFINE3(sigprocmask
, int, how
, old_sigset_t __user
*, set
,
2545 old_sigset_t __user
*, oset
)
2548 old_sigset_t old_set
, new_set
;
2552 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2554 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2556 spin_lock_irq(¤t
->sighand
->siglock
);
2557 old_set
= current
->blocked
.sig
[0];
2565 sigaddsetmask(¤t
->blocked
, new_set
);
2568 sigdelsetmask(¤t
->blocked
, new_set
);
2571 current
->blocked
.sig
[0] = new_set
;
2575 recalc_sigpending();
2576 spin_unlock_irq(¤t
->sighand
->siglock
);
2582 old_set
= current
->blocked
.sig
[0];
2585 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2592 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2594 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2595 SYSCALL_DEFINE4(rt_sigaction
, int, sig
,
2596 const struct sigaction __user
*, act
,
2597 struct sigaction __user
*, oact
,
2600 struct k_sigaction new_sa
, old_sa
;
2603 /* XXX: Don't preclude handling different sized sigset_t's. */
2604 if (sigsetsize
!= sizeof(sigset_t
))
2608 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2612 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2615 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2621 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2623 #ifdef __ARCH_WANT_SYS_SGETMASK
2626 * For backwards compatibility. Functionality superseded by sigprocmask.
2628 SYSCALL_DEFINE0(sgetmask
)
2631 return current
->blocked
.sig
[0];
2634 SYSCALL_DEFINE1(ssetmask
, int, newmask
)
2638 spin_lock_irq(¤t
->sighand
->siglock
);
2639 old
= current
->blocked
.sig
[0];
2641 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2643 recalc_sigpending();
2644 spin_unlock_irq(¤t
->sighand
->siglock
);
2648 #endif /* __ARCH_WANT_SGETMASK */
2650 #ifdef __ARCH_WANT_SYS_SIGNAL
2652 * For backwards compatibility. Functionality superseded by sigaction.
2654 SYSCALL_DEFINE2(signal
, int, sig
, __sighandler_t
, handler
)
2656 struct k_sigaction new_sa
, old_sa
;
2659 new_sa
.sa
.sa_handler
= handler
;
2660 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2661 sigemptyset(&new_sa
.sa
.sa_mask
);
2663 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2665 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2667 #endif /* __ARCH_WANT_SYS_SIGNAL */
2669 #ifdef __ARCH_WANT_SYS_PAUSE
2671 SYSCALL_DEFINE0(pause
)
2673 current
->state
= TASK_INTERRUPTIBLE
;
2675 return -ERESTARTNOHAND
;
2680 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2681 SYSCALL_DEFINE2(rt_sigsuspend
, sigset_t __user
*, unewset
, size_t, sigsetsize
)
2685 /* XXX: Don't preclude handling different sized sigset_t's. */
2686 if (sigsetsize
!= sizeof(sigset_t
))
2689 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2691 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2693 spin_lock_irq(¤t
->sighand
->siglock
);
2694 current
->saved_sigmask
= current
->blocked
;
2695 current
->blocked
= newset
;
2696 recalc_sigpending();
2697 spin_unlock_irq(¤t
->sighand
->siglock
);
2699 current
->state
= TASK_INTERRUPTIBLE
;
2701 set_restore_sigmask();
2702 return -ERESTARTNOHAND
;
2704 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2706 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2711 void __init
signals_init(void)
2713 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);