cgroup: simplify cgroup_load_subsys() failure path
[linux-2.6/libata-dev.git] / kernel / signal.c
blob5ffb5626e0721d52e7230e02a809d904fb2c2666
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/signal.h>
37 #include <asm/param.h>
38 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
40 #include <asm/siginfo.h>
41 #include <asm/cacheflush.h>
42 #include "audit.h" /* audit_signal_info() */
45 * SLAB caches for signal bits.
48 static struct kmem_cache *sigqueue_cachep;
50 int print_fatal_signals __read_mostly;
52 static void __user *sig_handler(struct task_struct *t, int sig)
54 return t->sighand->action[sig - 1].sa.sa_handler;
57 static int sig_handler_ignored(void __user *handler, int sig)
59 /* Is it explicitly or implicitly ignored? */
60 return handler == SIG_IGN ||
61 (handler == SIG_DFL && sig_kernel_ignore(sig));
64 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
66 void __user *handler;
68 handler = sig_handler(t, sig);
70 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
71 handler == SIG_DFL && !force)
72 return 1;
74 return sig_handler_ignored(handler, sig);
77 static int sig_ignored(struct task_struct *t, int sig, bool force)
80 * Blocked signals are never ignored, since the
81 * signal handler may change by the time it is
82 * unblocked.
84 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
85 return 0;
87 if (!sig_task_ignored(t, sig, force))
88 return 0;
91 * Tracers may want to know about even ignored signals.
93 return !t->ptrace;
97 * Re-calculate pending state from the set of locally pending
98 * signals, globally pending signals, and blocked signals.
100 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
102 unsigned long ready;
103 long i;
105 switch (_NSIG_WORDS) {
106 default:
107 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
108 ready |= signal->sig[i] &~ blocked->sig[i];
109 break;
111 case 4: ready = signal->sig[3] &~ blocked->sig[3];
112 ready |= signal->sig[2] &~ blocked->sig[2];
113 ready |= signal->sig[1] &~ blocked->sig[1];
114 ready |= signal->sig[0] &~ blocked->sig[0];
115 break;
117 case 2: ready = signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
119 break;
121 case 1: ready = signal->sig[0] &~ blocked->sig[0];
123 return ready != 0;
126 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
128 static int recalc_sigpending_tsk(struct task_struct *t)
130 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
131 PENDING(&t->pending, &t->blocked) ||
132 PENDING(&t->signal->shared_pending, &t->blocked)) {
133 set_tsk_thread_flag(t, TIF_SIGPENDING);
134 return 1;
137 * We must never clear the flag in another thread, or in current
138 * when it's possible the current syscall is returning -ERESTART*.
139 * So we don't clear it here, and only callers who know they should do.
141 return 0;
145 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
146 * This is superfluous when called on current, the wakeup is a harmless no-op.
148 void recalc_sigpending_and_wake(struct task_struct *t)
150 if (recalc_sigpending_tsk(t))
151 signal_wake_up(t, 0);
154 void recalc_sigpending(void)
156 if (!recalc_sigpending_tsk(current) && !freezing(current))
157 clear_thread_flag(TIF_SIGPENDING);
161 /* Given the mask, find the first available signal that should be serviced. */
163 #define SYNCHRONOUS_MASK \
164 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
165 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
167 int next_signal(struct sigpending *pending, sigset_t *mask)
169 unsigned long i, *s, *m, x;
170 int sig = 0;
172 s = pending->signal.sig;
173 m = mask->sig;
176 * Handle the first word specially: it contains the
177 * synchronous signals that need to be dequeued first.
179 x = *s &~ *m;
180 if (x) {
181 if (x & SYNCHRONOUS_MASK)
182 x &= SYNCHRONOUS_MASK;
183 sig = ffz(~x) + 1;
184 return sig;
187 switch (_NSIG_WORDS) {
188 default:
189 for (i = 1; i < _NSIG_WORDS; ++i) {
190 x = *++s &~ *++m;
191 if (!x)
192 continue;
193 sig = ffz(~x) + i*_NSIG_BPW + 1;
194 break;
196 break;
198 case 2:
199 x = s[1] &~ m[1];
200 if (!x)
201 break;
202 sig = ffz(~x) + _NSIG_BPW + 1;
203 break;
205 case 1:
206 /* Nothing to do */
207 break;
210 return sig;
213 static inline void print_dropped_signal(int sig)
215 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
217 if (!print_fatal_signals)
218 return;
220 if (!__ratelimit(&ratelimit_state))
221 return;
223 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
224 current->comm, current->pid, sig);
228 * task_set_jobctl_pending - set jobctl pending bits
229 * @task: target task
230 * @mask: pending bits to set
232 * Clear @mask from @task->jobctl. @mask must be subset of
233 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
234 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
235 * cleared. If @task is already being killed or exiting, this function
236 * becomes noop.
238 * CONTEXT:
239 * Must be called with @task->sighand->siglock held.
241 * RETURNS:
242 * %true if @mask is set, %false if made noop because @task was dying.
244 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
246 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
247 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
248 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
250 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
251 return false;
253 if (mask & JOBCTL_STOP_SIGMASK)
254 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
256 task->jobctl |= mask;
257 return true;
261 * task_clear_jobctl_trapping - clear jobctl trapping bit
262 * @task: target task
264 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
265 * Clear it and wake up the ptracer. Note that we don't need any further
266 * locking. @task->siglock guarantees that @task->parent points to the
267 * ptracer.
269 * CONTEXT:
270 * Must be called with @task->sighand->siglock held.
272 void task_clear_jobctl_trapping(struct task_struct *task)
274 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
275 task->jobctl &= ~JOBCTL_TRAPPING;
276 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
281 * task_clear_jobctl_pending - clear jobctl pending bits
282 * @task: target task
283 * @mask: pending bits to clear
285 * Clear @mask from @task->jobctl. @mask must be subset of
286 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
287 * STOP bits are cleared together.
289 * If clearing of @mask leaves no stop or trap pending, this function calls
290 * task_clear_jobctl_trapping().
292 * CONTEXT:
293 * Must be called with @task->sighand->siglock held.
295 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
297 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
299 if (mask & JOBCTL_STOP_PENDING)
300 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
302 task->jobctl &= ~mask;
304 if (!(task->jobctl & JOBCTL_PENDING_MASK))
305 task_clear_jobctl_trapping(task);
309 * task_participate_group_stop - participate in a group stop
310 * @task: task participating in a group stop
312 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
313 * Group stop states are cleared and the group stop count is consumed if
314 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
315 * stop, the appropriate %SIGNAL_* flags are set.
317 * CONTEXT:
318 * Must be called with @task->sighand->siglock held.
320 * RETURNS:
321 * %true if group stop completion should be notified to the parent, %false
322 * otherwise.
324 static bool task_participate_group_stop(struct task_struct *task)
326 struct signal_struct *sig = task->signal;
327 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
329 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
331 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
333 if (!consume)
334 return false;
336 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
337 sig->group_stop_count--;
340 * Tell the caller to notify completion iff we are entering into a
341 * fresh group stop. Read comment in do_signal_stop() for details.
343 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
344 sig->flags = SIGNAL_STOP_STOPPED;
345 return true;
347 return false;
351 * allocate a new signal queue record
352 * - this may be called without locks if and only if t == current, otherwise an
353 * appropriate lock must be held to stop the target task from exiting
355 static struct sigqueue *
356 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
358 struct sigqueue *q = NULL;
359 struct user_struct *user;
362 * Protect access to @t credentials. This can go away when all
363 * callers hold rcu read lock.
365 rcu_read_lock();
366 user = get_uid(__task_cred(t)->user);
367 atomic_inc(&user->sigpending);
368 rcu_read_unlock();
370 if (override_rlimit ||
371 atomic_read(&user->sigpending) <=
372 task_rlimit(t, RLIMIT_SIGPENDING)) {
373 q = kmem_cache_alloc(sigqueue_cachep, flags);
374 } else {
375 print_dropped_signal(sig);
378 if (unlikely(q == NULL)) {
379 atomic_dec(&user->sigpending);
380 free_uid(user);
381 } else {
382 INIT_LIST_HEAD(&q->list);
383 q->flags = 0;
384 q->user = user;
387 return q;
390 static void __sigqueue_free(struct sigqueue *q)
392 if (q->flags & SIGQUEUE_PREALLOC)
393 return;
394 atomic_dec(&q->user->sigpending);
395 free_uid(q->user);
396 kmem_cache_free(sigqueue_cachep, q);
399 void flush_sigqueue(struct sigpending *queue)
401 struct sigqueue *q;
403 sigemptyset(&queue->signal);
404 while (!list_empty(&queue->list)) {
405 q = list_entry(queue->list.next, struct sigqueue , list);
406 list_del_init(&q->list);
407 __sigqueue_free(q);
412 * Flush all pending signals for a task.
414 void __flush_signals(struct task_struct *t)
416 clear_tsk_thread_flag(t, TIF_SIGPENDING);
417 flush_sigqueue(&t->pending);
418 flush_sigqueue(&t->signal->shared_pending);
421 void flush_signals(struct task_struct *t)
423 unsigned long flags;
425 spin_lock_irqsave(&t->sighand->siglock, flags);
426 __flush_signals(t);
427 spin_unlock_irqrestore(&t->sighand->siglock, flags);
430 static void __flush_itimer_signals(struct sigpending *pending)
432 sigset_t signal, retain;
433 struct sigqueue *q, *n;
435 signal = pending->signal;
436 sigemptyset(&retain);
438 list_for_each_entry_safe(q, n, &pending->list, list) {
439 int sig = q->info.si_signo;
441 if (likely(q->info.si_code != SI_TIMER)) {
442 sigaddset(&retain, sig);
443 } else {
444 sigdelset(&signal, sig);
445 list_del_init(&q->list);
446 __sigqueue_free(q);
450 sigorsets(&pending->signal, &signal, &retain);
453 void flush_itimer_signals(void)
455 struct task_struct *tsk = current;
456 unsigned long flags;
458 spin_lock_irqsave(&tsk->sighand->siglock, flags);
459 __flush_itimer_signals(&tsk->pending);
460 __flush_itimer_signals(&tsk->signal->shared_pending);
461 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
464 void ignore_signals(struct task_struct *t)
466 int i;
468 for (i = 0; i < _NSIG; ++i)
469 t->sighand->action[i].sa.sa_handler = SIG_IGN;
471 flush_signals(t);
475 * Flush all handlers for a task.
478 void
479 flush_signal_handlers(struct task_struct *t, int force_default)
481 int i;
482 struct k_sigaction *ka = &t->sighand->action[0];
483 for (i = _NSIG ; i != 0 ; i--) {
484 if (force_default || ka->sa.sa_handler != SIG_IGN)
485 ka->sa.sa_handler = SIG_DFL;
486 ka->sa.sa_flags = 0;
487 sigemptyset(&ka->sa.sa_mask);
488 ka++;
492 int unhandled_signal(struct task_struct *tsk, int sig)
494 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
495 if (is_global_init(tsk))
496 return 1;
497 if (handler != SIG_IGN && handler != SIG_DFL)
498 return 0;
499 /* if ptraced, let the tracer determine */
500 return !tsk->ptrace;
504 * Notify the system that a driver wants to block all signals for this
505 * process, and wants to be notified if any signals at all were to be
506 * sent/acted upon. If the notifier routine returns non-zero, then the
507 * signal will be acted upon after all. If the notifier routine returns 0,
508 * then then signal will be blocked. Only one block per process is
509 * allowed. priv is a pointer to private data that the notifier routine
510 * can use to determine if the signal should be blocked or not.
512 void
513 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
515 unsigned long flags;
517 spin_lock_irqsave(&current->sighand->siglock, flags);
518 current->notifier_mask = mask;
519 current->notifier_data = priv;
520 current->notifier = notifier;
521 spin_unlock_irqrestore(&current->sighand->siglock, flags);
524 /* Notify the system that blocking has ended. */
526 void
527 unblock_all_signals(void)
529 unsigned long flags;
531 spin_lock_irqsave(&current->sighand->siglock, flags);
532 current->notifier = NULL;
533 current->notifier_data = NULL;
534 recalc_sigpending();
535 spin_unlock_irqrestore(&current->sighand->siglock, flags);
538 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
540 struct sigqueue *q, *first = NULL;
543 * Collect the siginfo appropriate to this signal. Check if
544 * there is another siginfo for the same signal.
546 list_for_each_entry(q, &list->list, list) {
547 if (q->info.si_signo == sig) {
548 if (first)
549 goto still_pending;
550 first = q;
554 sigdelset(&list->signal, sig);
556 if (first) {
557 still_pending:
558 list_del_init(&first->list);
559 copy_siginfo(info, &first->info);
560 __sigqueue_free(first);
561 } else {
563 * Ok, it wasn't in the queue. This must be
564 * a fast-pathed signal or we must have been
565 * out of queue space. So zero out the info.
567 info->si_signo = sig;
568 info->si_errno = 0;
569 info->si_code = SI_USER;
570 info->si_pid = 0;
571 info->si_uid = 0;
575 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
576 siginfo_t *info)
578 int sig = next_signal(pending, mask);
580 if (sig) {
581 if (current->notifier) {
582 if (sigismember(current->notifier_mask, sig)) {
583 if (!(current->notifier)(current->notifier_data)) {
584 clear_thread_flag(TIF_SIGPENDING);
585 return 0;
590 collect_signal(sig, pending, info);
593 return sig;
597 * Dequeue a signal and return the element to the caller, which is
598 * expected to free it.
600 * All callers have to hold the siglock.
602 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
604 int signr;
606 /* We only dequeue private signals from ourselves, we don't let
607 * signalfd steal them
609 signr = __dequeue_signal(&tsk->pending, mask, info);
610 if (!signr) {
611 signr = __dequeue_signal(&tsk->signal->shared_pending,
612 mask, info);
614 * itimer signal ?
616 * itimers are process shared and we restart periodic
617 * itimers in the signal delivery path to prevent DoS
618 * attacks in the high resolution timer case. This is
619 * compliant with the old way of self-restarting
620 * itimers, as the SIGALRM is a legacy signal and only
621 * queued once. Changing the restart behaviour to
622 * restart the timer in the signal dequeue path is
623 * reducing the timer noise on heavy loaded !highres
624 * systems too.
626 if (unlikely(signr == SIGALRM)) {
627 struct hrtimer *tmr = &tsk->signal->real_timer;
629 if (!hrtimer_is_queued(tmr) &&
630 tsk->signal->it_real_incr.tv64 != 0) {
631 hrtimer_forward(tmr, tmr->base->get_time(),
632 tsk->signal->it_real_incr);
633 hrtimer_restart(tmr);
638 recalc_sigpending();
639 if (!signr)
640 return 0;
642 if (unlikely(sig_kernel_stop(signr))) {
644 * Set a marker that we have dequeued a stop signal. Our
645 * caller might release the siglock and then the pending
646 * stop signal it is about to process is no longer in the
647 * pending bitmasks, but must still be cleared by a SIGCONT
648 * (and overruled by a SIGKILL). So those cases clear this
649 * shared flag after we've set it. Note that this flag may
650 * remain set after the signal we return is ignored or
651 * handled. That doesn't matter because its only purpose
652 * is to alert stop-signal processing code when another
653 * processor has come along and cleared the flag.
655 current->jobctl |= JOBCTL_STOP_DEQUEUED;
657 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
659 * Release the siglock to ensure proper locking order
660 * of timer locks outside of siglocks. Note, we leave
661 * irqs disabled here, since the posix-timers code is
662 * about to disable them again anyway.
664 spin_unlock(&tsk->sighand->siglock);
665 do_schedule_next_timer(info);
666 spin_lock(&tsk->sighand->siglock);
668 return signr;
672 * Tell a process that it has a new active signal..
674 * NOTE! we rely on the previous spin_lock to
675 * lock interrupts for us! We can only be called with
676 * "siglock" held, and the local interrupt must
677 * have been disabled when that got acquired!
679 * No need to set need_resched since signal event passing
680 * goes through ->blocked
682 void signal_wake_up(struct task_struct *t, int resume)
684 unsigned int mask;
686 set_tsk_thread_flag(t, TIF_SIGPENDING);
689 * For SIGKILL, we want to wake it up in the stopped/traced/killable
690 * case. We don't check t->state here because there is a race with it
691 * executing another processor and just now entering stopped state.
692 * By using wake_up_state, we ensure the process will wake up and
693 * handle its death signal.
695 mask = TASK_INTERRUPTIBLE;
696 if (resume)
697 mask |= TASK_WAKEKILL;
698 if (!wake_up_state(t, mask))
699 kick_process(t);
703 * Remove signals in mask from the pending set and queue.
704 * Returns 1 if any signals were found.
706 * All callers must be holding the siglock.
708 * This version takes a sigset mask and looks at all signals,
709 * not just those in the first mask word.
711 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
713 struct sigqueue *q, *n;
714 sigset_t m;
716 sigandsets(&m, mask, &s->signal);
717 if (sigisemptyset(&m))
718 return 0;
720 sigandnsets(&s->signal, &s->signal, mask);
721 list_for_each_entry_safe(q, n, &s->list, list) {
722 if (sigismember(mask, q->info.si_signo)) {
723 list_del_init(&q->list);
724 __sigqueue_free(q);
727 return 1;
730 * Remove signals in mask from the pending set and queue.
731 * Returns 1 if any signals were found.
733 * All callers must be holding the siglock.
735 static int rm_from_queue(unsigned long mask, struct sigpending *s)
737 struct sigqueue *q, *n;
739 if (!sigtestsetmask(&s->signal, mask))
740 return 0;
742 sigdelsetmask(&s->signal, mask);
743 list_for_each_entry_safe(q, n, &s->list, list) {
744 if (q->info.si_signo < SIGRTMIN &&
745 (mask & sigmask(q->info.si_signo))) {
746 list_del_init(&q->list);
747 __sigqueue_free(q);
750 return 1;
753 static inline int is_si_special(const struct siginfo *info)
755 return info <= SEND_SIG_FORCED;
758 static inline bool si_fromuser(const struct siginfo *info)
760 return info == SEND_SIG_NOINFO ||
761 (!is_si_special(info) && SI_FROMUSER(info));
765 * called with RCU read lock from check_kill_permission()
767 static int kill_ok_by_cred(struct task_struct *t)
769 const struct cred *cred = current_cred();
770 const struct cred *tcred = __task_cred(t);
772 if (uid_eq(cred->euid, tcred->suid) ||
773 uid_eq(cred->euid, tcred->uid) ||
774 uid_eq(cred->uid, tcred->suid) ||
775 uid_eq(cred->uid, tcred->uid))
776 return 1;
778 if (ns_capable(tcred->user_ns, CAP_KILL))
779 return 1;
781 return 0;
785 * Bad permissions for sending the signal
786 * - the caller must hold the RCU read lock
788 static int check_kill_permission(int sig, struct siginfo *info,
789 struct task_struct *t)
791 struct pid *sid;
792 int error;
794 if (!valid_signal(sig))
795 return -EINVAL;
797 if (!si_fromuser(info))
798 return 0;
800 error = audit_signal_info(sig, t); /* Let audit system see the signal */
801 if (error)
802 return error;
804 if (!same_thread_group(current, t) &&
805 !kill_ok_by_cred(t)) {
806 switch (sig) {
807 case SIGCONT:
808 sid = task_session(t);
810 * We don't return the error if sid == NULL. The
811 * task was unhashed, the caller must notice this.
813 if (!sid || sid == task_session(current))
814 break;
815 default:
816 return -EPERM;
820 return security_task_kill(t, info, sig, 0);
824 * ptrace_trap_notify - schedule trap to notify ptracer
825 * @t: tracee wanting to notify tracer
827 * This function schedules sticky ptrace trap which is cleared on the next
828 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
829 * ptracer.
831 * If @t is running, STOP trap will be taken. If trapped for STOP and
832 * ptracer is listening for events, tracee is woken up so that it can
833 * re-trap for the new event. If trapped otherwise, STOP trap will be
834 * eventually taken without returning to userland after the existing traps
835 * are finished by PTRACE_CONT.
837 * CONTEXT:
838 * Must be called with @task->sighand->siglock held.
840 static void ptrace_trap_notify(struct task_struct *t)
842 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
843 assert_spin_locked(&t->sighand->siglock);
845 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
846 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
850 * Handle magic process-wide effects of stop/continue signals. Unlike
851 * the signal actions, these happen immediately at signal-generation
852 * time regardless of blocking, ignoring, or handling. This does the
853 * actual continuing for SIGCONT, but not the actual stopping for stop
854 * signals. The process stop is done as a signal action for SIG_DFL.
856 * Returns true if the signal should be actually delivered, otherwise
857 * it should be dropped.
859 static int prepare_signal(int sig, struct task_struct *p, bool force)
861 struct signal_struct *signal = p->signal;
862 struct task_struct *t;
864 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
866 * The process is in the middle of dying, nothing to do.
868 } else if (sig_kernel_stop(sig)) {
870 * This is a stop signal. Remove SIGCONT from all queues.
872 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
873 t = p;
874 do {
875 rm_from_queue(sigmask(SIGCONT), &t->pending);
876 } while_each_thread(p, t);
877 } else if (sig == SIGCONT) {
878 unsigned int why;
880 * Remove all stop signals from all queues, wake all threads.
882 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
883 t = p;
884 do {
885 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
886 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
887 if (likely(!(t->ptrace & PT_SEIZED)))
888 wake_up_state(t, __TASK_STOPPED);
889 else
890 ptrace_trap_notify(t);
891 } while_each_thread(p, t);
894 * Notify the parent with CLD_CONTINUED if we were stopped.
896 * If we were in the middle of a group stop, we pretend it
897 * was already finished, and then continued. Since SIGCHLD
898 * doesn't queue we report only CLD_STOPPED, as if the next
899 * CLD_CONTINUED was dropped.
901 why = 0;
902 if (signal->flags & SIGNAL_STOP_STOPPED)
903 why |= SIGNAL_CLD_CONTINUED;
904 else if (signal->group_stop_count)
905 why |= SIGNAL_CLD_STOPPED;
907 if (why) {
909 * The first thread which returns from do_signal_stop()
910 * will take ->siglock, notice SIGNAL_CLD_MASK, and
911 * notify its parent. See get_signal_to_deliver().
913 signal->flags = why | SIGNAL_STOP_CONTINUED;
914 signal->group_stop_count = 0;
915 signal->group_exit_code = 0;
919 return !sig_ignored(p, sig, force);
923 * Test if P wants to take SIG. After we've checked all threads with this,
924 * it's equivalent to finding no threads not blocking SIG. Any threads not
925 * blocking SIG were ruled out because they are not running and already
926 * have pending signals. Such threads will dequeue from the shared queue
927 * as soon as they're available, so putting the signal on the shared queue
928 * will be equivalent to sending it to one such thread.
930 static inline int wants_signal(int sig, struct task_struct *p)
932 if (sigismember(&p->blocked, sig))
933 return 0;
934 if (p->flags & PF_EXITING)
935 return 0;
936 if (sig == SIGKILL)
937 return 1;
938 if (task_is_stopped_or_traced(p))
939 return 0;
940 return task_curr(p) || !signal_pending(p);
943 static void complete_signal(int sig, struct task_struct *p, int group)
945 struct signal_struct *signal = p->signal;
946 struct task_struct *t;
949 * Now find a thread we can wake up to take the signal off the queue.
951 * If the main thread wants the signal, it gets first crack.
952 * Probably the least surprising to the average bear.
954 if (wants_signal(sig, p))
955 t = p;
956 else if (!group || thread_group_empty(p))
958 * There is just one thread and it does not need to be woken.
959 * It will dequeue unblocked signals before it runs again.
961 return;
962 else {
964 * Otherwise try to find a suitable thread.
966 t = signal->curr_target;
967 while (!wants_signal(sig, t)) {
968 t = next_thread(t);
969 if (t == signal->curr_target)
971 * No thread needs to be woken.
972 * Any eligible threads will see
973 * the signal in the queue soon.
975 return;
977 signal->curr_target = t;
981 * Found a killable thread. If the signal will be fatal,
982 * then start taking the whole group down immediately.
984 if (sig_fatal(p, sig) &&
985 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
986 !sigismember(&t->real_blocked, sig) &&
987 (sig == SIGKILL || !t->ptrace)) {
989 * This signal will be fatal to the whole group.
991 if (!sig_kernel_coredump(sig)) {
993 * Start a group exit and wake everybody up.
994 * This way we don't have other threads
995 * running and doing things after a slower
996 * thread has the fatal signal pending.
998 signal->flags = SIGNAL_GROUP_EXIT;
999 signal->group_exit_code = sig;
1000 signal->group_stop_count = 0;
1001 t = p;
1002 do {
1003 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1004 sigaddset(&t->pending.signal, SIGKILL);
1005 signal_wake_up(t, 1);
1006 } while_each_thread(p, t);
1007 return;
1012 * The signal is already in the shared-pending queue.
1013 * Tell the chosen thread to wake up and dequeue it.
1015 signal_wake_up(t, sig == SIGKILL);
1016 return;
1019 static inline int legacy_queue(struct sigpending *signals, int sig)
1021 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1024 #ifdef CONFIG_USER_NS
1025 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1027 if (current_user_ns() == task_cred_xxx(t, user_ns))
1028 return;
1030 if (SI_FROMKERNEL(info))
1031 return;
1033 rcu_read_lock();
1034 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1035 make_kuid(current_user_ns(), info->si_uid));
1036 rcu_read_unlock();
1038 #else
1039 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1041 return;
1043 #endif
1045 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1046 int group, int from_ancestor_ns)
1048 struct sigpending *pending;
1049 struct sigqueue *q;
1050 int override_rlimit;
1051 int ret = 0, result;
1053 assert_spin_locked(&t->sighand->siglock);
1055 result = TRACE_SIGNAL_IGNORED;
1056 if (!prepare_signal(sig, t,
1057 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1058 goto ret;
1060 pending = group ? &t->signal->shared_pending : &t->pending;
1062 * Short-circuit ignored signals and support queuing
1063 * exactly one non-rt signal, so that we can get more
1064 * detailed information about the cause of the signal.
1066 result = TRACE_SIGNAL_ALREADY_PENDING;
1067 if (legacy_queue(pending, sig))
1068 goto ret;
1070 result = TRACE_SIGNAL_DELIVERED;
1072 * fast-pathed signals for kernel-internal things like SIGSTOP
1073 * or SIGKILL.
1075 if (info == SEND_SIG_FORCED)
1076 goto out_set;
1079 * Real-time signals must be queued if sent by sigqueue, or
1080 * some other real-time mechanism. It is implementation
1081 * defined whether kill() does so. We attempt to do so, on
1082 * the principle of least surprise, but since kill is not
1083 * allowed to fail with EAGAIN when low on memory we just
1084 * make sure at least one signal gets delivered and don't
1085 * pass on the info struct.
1087 if (sig < SIGRTMIN)
1088 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1089 else
1090 override_rlimit = 0;
1092 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1093 override_rlimit);
1094 if (q) {
1095 list_add_tail(&q->list, &pending->list);
1096 switch ((unsigned long) info) {
1097 case (unsigned long) SEND_SIG_NOINFO:
1098 q->info.si_signo = sig;
1099 q->info.si_errno = 0;
1100 q->info.si_code = SI_USER;
1101 q->info.si_pid = task_tgid_nr_ns(current,
1102 task_active_pid_ns(t));
1103 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1104 break;
1105 case (unsigned long) SEND_SIG_PRIV:
1106 q->info.si_signo = sig;
1107 q->info.si_errno = 0;
1108 q->info.si_code = SI_KERNEL;
1109 q->info.si_pid = 0;
1110 q->info.si_uid = 0;
1111 break;
1112 default:
1113 copy_siginfo(&q->info, info);
1114 if (from_ancestor_ns)
1115 q->info.si_pid = 0;
1116 break;
1119 userns_fixup_signal_uid(&q->info, t);
1121 } else if (!is_si_special(info)) {
1122 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1124 * Queue overflow, abort. We may abort if the
1125 * signal was rt and sent by user using something
1126 * other than kill().
1128 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1129 ret = -EAGAIN;
1130 goto ret;
1131 } else {
1133 * This is a silent loss of information. We still
1134 * send the signal, but the *info bits are lost.
1136 result = TRACE_SIGNAL_LOSE_INFO;
1140 out_set:
1141 signalfd_notify(t, sig);
1142 sigaddset(&pending->signal, sig);
1143 complete_signal(sig, t, group);
1144 ret:
1145 trace_signal_generate(sig, info, t, group, result);
1146 return ret;
1149 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1150 int group)
1152 int from_ancestor_ns = 0;
1154 #ifdef CONFIG_PID_NS
1155 from_ancestor_ns = si_fromuser(info) &&
1156 !task_pid_nr_ns(current, task_active_pid_ns(t));
1157 #endif
1159 return __send_signal(sig, info, t, group, from_ancestor_ns);
1162 static void print_fatal_signal(struct pt_regs *regs, int signr)
1164 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1165 current->comm, task_pid_nr(current), signr);
1167 #if defined(__i386__) && !defined(__arch_um__)
1168 printk("code at %08lx: ", regs->ip);
1170 int i;
1171 for (i = 0; i < 16; i++) {
1172 unsigned char insn;
1174 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1175 break;
1176 printk("%02x ", insn);
1179 #endif
1180 printk("\n");
1181 preempt_disable();
1182 show_regs(regs);
1183 preempt_enable();
1186 static int __init setup_print_fatal_signals(char *str)
1188 get_option (&str, &print_fatal_signals);
1190 return 1;
1193 __setup("print-fatal-signals=", setup_print_fatal_signals);
1196 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1198 return send_signal(sig, info, p, 1);
1201 static int
1202 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1204 return send_signal(sig, info, t, 0);
1207 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1208 bool group)
1210 unsigned long flags;
1211 int ret = -ESRCH;
1213 if (lock_task_sighand(p, &flags)) {
1214 ret = send_signal(sig, info, p, group);
1215 unlock_task_sighand(p, &flags);
1218 return ret;
1222 * Force a signal that the process can't ignore: if necessary
1223 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1225 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1226 * since we do not want to have a signal handler that was blocked
1227 * be invoked when user space had explicitly blocked it.
1229 * We don't want to have recursive SIGSEGV's etc, for example,
1230 * that is why we also clear SIGNAL_UNKILLABLE.
1233 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1235 unsigned long int flags;
1236 int ret, blocked, ignored;
1237 struct k_sigaction *action;
1239 spin_lock_irqsave(&t->sighand->siglock, flags);
1240 action = &t->sighand->action[sig-1];
1241 ignored = action->sa.sa_handler == SIG_IGN;
1242 blocked = sigismember(&t->blocked, sig);
1243 if (blocked || ignored) {
1244 action->sa.sa_handler = SIG_DFL;
1245 if (blocked) {
1246 sigdelset(&t->blocked, sig);
1247 recalc_sigpending_and_wake(t);
1250 if (action->sa.sa_handler == SIG_DFL)
1251 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1252 ret = specific_send_sig_info(sig, info, t);
1253 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1255 return ret;
1259 * Nuke all other threads in the group.
1261 int zap_other_threads(struct task_struct *p)
1263 struct task_struct *t = p;
1264 int count = 0;
1266 p->signal->group_stop_count = 0;
1268 while_each_thread(p, t) {
1269 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1270 count++;
1272 /* Don't bother with already dead threads */
1273 if (t->exit_state)
1274 continue;
1275 sigaddset(&t->pending.signal, SIGKILL);
1276 signal_wake_up(t, 1);
1279 return count;
1282 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1283 unsigned long *flags)
1285 struct sighand_struct *sighand;
1287 for (;;) {
1288 local_irq_save(*flags);
1289 rcu_read_lock();
1290 sighand = rcu_dereference(tsk->sighand);
1291 if (unlikely(sighand == NULL)) {
1292 rcu_read_unlock();
1293 local_irq_restore(*flags);
1294 break;
1297 spin_lock(&sighand->siglock);
1298 if (likely(sighand == tsk->sighand)) {
1299 rcu_read_unlock();
1300 break;
1302 spin_unlock(&sighand->siglock);
1303 rcu_read_unlock();
1304 local_irq_restore(*flags);
1307 return sighand;
1311 * send signal info to all the members of a group
1313 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1315 int ret;
1317 rcu_read_lock();
1318 ret = check_kill_permission(sig, info, p);
1319 rcu_read_unlock();
1321 if (!ret && sig)
1322 ret = do_send_sig_info(sig, info, p, true);
1324 return ret;
1328 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1329 * control characters do (^C, ^Z etc)
1330 * - the caller must hold at least a readlock on tasklist_lock
1332 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1334 struct task_struct *p = NULL;
1335 int retval, success;
1337 success = 0;
1338 retval = -ESRCH;
1339 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1340 int err = group_send_sig_info(sig, info, p);
1341 success |= !err;
1342 retval = err;
1343 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1344 return success ? 0 : retval;
1347 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1349 int error = -ESRCH;
1350 struct task_struct *p;
1352 rcu_read_lock();
1353 retry:
1354 p = pid_task(pid, PIDTYPE_PID);
1355 if (p) {
1356 error = group_send_sig_info(sig, info, p);
1357 if (unlikely(error == -ESRCH))
1359 * The task was unhashed in between, try again.
1360 * If it is dead, pid_task() will return NULL,
1361 * if we race with de_thread() it will find the
1362 * new leader.
1364 goto retry;
1366 rcu_read_unlock();
1368 return error;
1371 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1373 int error;
1374 rcu_read_lock();
1375 error = kill_pid_info(sig, info, find_vpid(pid));
1376 rcu_read_unlock();
1377 return error;
1380 static int kill_as_cred_perm(const struct cred *cred,
1381 struct task_struct *target)
1383 const struct cred *pcred = __task_cred(target);
1384 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1385 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1386 return 0;
1387 return 1;
1390 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1391 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1392 const struct cred *cred, u32 secid)
1394 int ret = -EINVAL;
1395 struct task_struct *p;
1396 unsigned long flags;
1398 if (!valid_signal(sig))
1399 return ret;
1401 rcu_read_lock();
1402 p = pid_task(pid, PIDTYPE_PID);
1403 if (!p) {
1404 ret = -ESRCH;
1405 goto out_unlock;
1407 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1408 ret = -EPERM;
1409 goto out_unlock;
1411 ret = security_task_kill(p, info, sig, secid);
1412 if (ret)
1413 goto out_unlock;
1415 if (sig) {
1416 if (lock_task_sighand(p, &flags)) {
1417 ret = __send_signal(sig, info, p, 1, 0);
1418 unlock_task_sighand(p, &flags);
1419 } else
1420 ret = -ESRCH;
1422 out_unlock:
1423 rcu_read_unlock();
1424 return ret;
1426 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1429 * kill_something_info() interprets pid in interesting ways just like kill(2).
1431 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1432 * is probably wrong. Should make it like BSD or SYSV.
1435 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1437 int ret;
1439 if (pid > 0) {
1440 rcu_read_lock();
1441 ret = kill_pid_info(sig, info, find_vpid(pid));
1442 rcu_read_unlock();
1443 return ret;
1446 read_lock(&tasklist_lock);
1447 if (pid != -1) {
1448 ret = __kill_pgrp_info(sig, info,
1449 pid ? find_vpid(-pid) : task_pgrp(current));
1450 } else {
1451 int retval = 0, count = 0;
1452 struct task_struct * p;
1454 for_each_process(p) {
1455 if (task_pid_vnr(p) > 1 &&
1456 !same_thread_group(p, current)) {
1457 int err = group_send_sig_info(sig, info, p);
1458 ++count;
1459 if (err != -EPERM)
1460 retval = err;
1463 ret = count ? retval : -ESRCH;
1465 read_unlock(&tasklist_lock);
1467 return ret;
1471 * These are for backward compatibility with the rest of the kernel source.
1474 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1477 * Make sure legacy kernel users don't send in bad values
1478 * (normal paths check this in check_kill_permission).
1480 if (!valid_signal(sig))
1481 return -EINVAL;
1483 return do_send_sig_info(sig, info, p, false);
1486 #define __si_special(priv) \
1487 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1490 send_sig(int sig, struct task_struct *p, int priv)
1492 return send_sig_info(sig, __si_special(priv), p);
1495 void
1496 force_sig(int sig, struct task_struct *p)
1498 force_sig_info(sig, SEND_SIG_PRIV, p);
1502 * When things go south during signal handling, we
1503 * will force a SIGSEGV. And if the signal that caused
1504 * the problem was already a SIGSEGV, we'll want to
1505 * make sure we don't even try to deliver the signal..
1508 force_sigsegv(int sig, struct task_struct *p)
1510 if (sig == SIGSEGV) {
1511 unsigned long flags;
1512 spin_lock_irqsave(&p->sighand->siglock, flags);
1513 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1514 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1516 force_sig(SIGSEGV, p);
1517 return 0;
1520 int kill_pgrp(struct pid *pid, int sig, int priv)
1522 int ret;
1524 read_lock(&tasklist_lock);
1525 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1526 read_unlock(&tasklist_lock);
1528 return ret;
1530 EXPORT_SYMBOL(kill_pgrp);
1532 int kill_pid(struct pid *pid, int sig, int priv)
1534 return kill_pid_info(sig, __si_special(priv), pid);
1536 EXPORT_SYMBOL(kill_pid);
1539 * These functions support sending signals using preallocated sigqueue
1540 * structures. This is needed "because realtime applications cannot
1541 * afford to lose notifications of asynchronous events, like timer
1542 * expirations or I/O completions". In the case of POSIX Timers
1543 * we allocate the sigqueue structure from the timer_create. If this
1544 * allocation fails we are able to report the failure to the application
1545 * with an EAGAIN error.
1547 struct sigqueue *sigqueue_alloc(void)
1549 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1551 if (q)
1552 q->flags |= SIGQUEUE_PREALLOC;
1554 return q;
1557 void sigqueue_free(struct sigqueue *q)
1559 unsigned long flags;
1560 spinlock_t *lock = &current->sighand->siglock;
1562 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1564 * We must hold ->siglock while testing q->list
1565 * to serialize with collect_signal() or with
1566 * __exit_signal()->flush_sigqueue().
1568 spin_lock_irqsave(lock, flags);
1569 q->flags &= ~SIGQUEUE_PREALLOC;
1571 * If it is queued it will be freed when dequeued,
1572 * like the "regular" sigqueue.
1574 if (!list_empty(&q->list))
1575 q = NULL;
1576 spin_unlock_irqrestore(lock, flags);
1578 if (q)
1579 __sigqueue_free(q);
1582 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1584 int sig = q->info.si_signo;
1585 struct sigpending *pending;
1586 unsigned long flags;
1587 int ret, result;
1589 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1591 ret = -1;
1592 if (!likely(lock_task_sighand(t, &flags)))
1593 goto ret;
1595 ret = 1; /* the signal is ignored */
1596 result = TRACE_SIGNAL_IGNORED;
1597 if (!prepare_signal(sig, t, false))
1598 goto out;
1600 ret = 0;
1601 if (unlikely(!list_empty(&q->list))) {
1603 * If an SI_TIMER entry is already queue just increment
1604 * the overrun count.
1606 BUG_ON(q->info.si_code != SI_TIMER);
1607 q->info.si_overrun++;
1608 result = TRACE_SIGNAL_ALREADY_PENDING;
1609 goto out;
1611 q->info.si_overrun = 0;
1613 signalfd_notify(t, sig);
1614 pending = group ? &t->signal->shared_pending : &t->pending;
1615 list_add_tail(&q->list, &pending->list);
1616 sigaddset(&pending->signal, sig);
1617 complete_signal(sig, t, group);
1618 result = TRACE_SIGNAL_DELIVERED;
1619 out:
1620 trace_signal_generate(sig, &q->info, t, group, result);
1621 unlock_task_sighand(t, &flags);
1622 ret:
1623 return ret;
1627 * Let a parent know about the death of a child.
1628 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1630 * Returns true if our parent ignored us and so we've switched to
1631 * self-reaping.
1633 bool do_notify_parent(struct task_struct *tsk, int sig)
1635 struct siginfo info;
1636 unsigned long flags;
1637 struct sighand_struct *psig;
1638 bool autoreap = false;
1640 BUG_ON(sig == -1);
1642 /* do_notify_parent_cldstop should have been called instead. */
1643 BUG_ON(task_is_stopped_or_traced(tsk));
1645 BUG_ON(!tsk->ptrace &&
1646 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1648 if (sig != SIGCHLD) {
1650 * This is only possible if parent == real_parent.
1651 * Check if it has changed security domain.
1653 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1654 sig = SIGCHLD;
1657 info.si_signo = sig;
1658 info.si_errno = 0;
1660 * We are under tasklist_lock here so our parent is tied to
1661 * us and cannot change.
1663 * task_active_pid_ns will always return the same pid namespace
1664 * until a task passes through release_task.
1666 * write_lock() currently calls preempt_disable() which is the
1667 * same as rcu_read_lock(), but according to Oleg, this is not
1668 * correct to rely on this
1670 rcu_read_lock();
1671 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1672 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1673 task_uid(tsk));
1674 rcu_read_unlock();
1676 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
1677 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
1679 info.si_status = tsk->exit_code & 0x7f;
1680 if (tsk->exit_code & 0x80)
1681 info.si_code = CLD_DUMPED;
1682 else if (tsk->exit_code & 0x7f)
1683 info.si_code = CLD_KILLED;
1684 else {
1685 info.si_code = CLD_EXITED;
1686 info.si_status = tsk->exit_code >> 8;
1689 psig = tsk->parent->sighand;
1690 spin_lock_irqsave(&psig->siglock, flags);
1691 if (!tsk->ptrace && sig == SIGCHLD &&
1692 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1693 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1695 * We are exiting and our parent doesn't care. POSIX.1
1696 * defines special semantics for setting SIGCHLD to SIG_IGN
1697 * or setting the SA_NOCLDWAIT flag: we should be reaped
1698 * automatically and not left for our parent's wait4 call.
1699 * Rather than having the parent do it as a magic kind of
1700 * signal handler, we just set this to tell do_exit that we
1701 * can be cleaned up without becoming a zombie. Note that
1702 * we still call __wake_up_parent in this case, because a
1703 * blocked sys_wait4 might now return -ECHILD.
1705 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1706 * is implementation-defined: we do (if you don't want
1707 * it, just use SIG_IGN instead).
1709 autoreap = true;
1710 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1711 sig = 0;
1713 if (valid_signal(sig) && sig)
1714 __group_send_sig_info(sig, &info, tsk->parent);
1715 __wake_up_parent(tsk, tsk->parent);
1716 spin_unlock_irqrestore(&psig->siglock, flags);
1718 return autoreap;
1722 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1723 * @tsk: task reporting the state change
1724 * @for_ptracer: the notification is for ptracer
1725 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1727 * Notify @tsk's parent that the stopped/continued state has changed. If
1728 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1729 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1731 * CONTEXT:
1732 * Must be called with tasklist_lock at least read locked.
1734 static void do_notify_parent_cldstop(struct task_struct *tsk,
1735 bool for_ptracer, int why)
1737 struct siginfo info;
1738 unsigned long flags;
1739 struct task_struct *parent;
1740 struct sighand_struct *sighand;
1742 if (for_ptracer) {
1743 parent = tsk->parent;
1744 } else {
1745 tsk = tsk->group_leader;
1746 parent = tsk->real_parent;
1749 info.si_signo = SIGCHLD;
1750 info.si_errno = 0;
1752 * see comment in do_notify_parent() about the following 4 lines
1754 rcu_read_lock();
1755 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1756 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1757 rcu_read_unlock();
1759 info.si_utime = cputime_to_clock_t(tsk->utime);
1760 info.si_stime = cputime_to_clock_t(tsk->stime);
1762 info.si_code = why;
1763 switch (why) {
1764 case CLD_CONTINUED:
1765 info.si_status = SIGCONT;
1766 break;
1767 case CLD_STOPPED:
1768 info.si_status = tsk->signal->group_exit_code & 0x7f;
1769 break;
1770 case CLD_TRAPPED:
1771 info.si_status = tsk->exit_code & 0x7f;
1772 break;
1773 default:
1774 BUG();
1777 sighand = parent->sighand;
1778 spin_lock_irqsave(&sighand->siglock, flags);
1779 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1780 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1781 __group_send_sig_info(SIGCHLD, &info, parent);
1783 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1785 __wake_up_parent(tsk, parent);
1786 spin_unlock_irqrestore(&sighand->siglock, flags);
1789 static inline int may_ptrace_stop(void)
1791 if (!likely(current->ptrace))
1792 return 0;
1794 * Are we in the middle of do_coredump?
1795 * If so and our tracer is also part of the coredump stopping
1796 * is a deadlock situation, and pointless because our tracer
1797 * is dead so don't allow us to stop.
1798 * If SIGKILL was already sent before the caller unlocked
1799 * ->siglock we must see ->core_state != NULL. Otherwise it
1800 * is safe to enter schedule().
1802 if (unlikely(current->mm->core_state) &&
1803 unlikely(current->mm == current->parent->mm))
1804 return 0;
1806 return 1;
1810 * Return non-zero if there is a SIGKILL that should be waking us up.
1811 * Called with the siglock held.
1813 static int sigkill_pending(struct task_struct *tsk)
1815 return sigismember(&tsk->pending.signal, SIGKILL) ||
1816 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1820 * This must be called with current->sighand->siglock held.
1822 * This should be the path for all ptrace stops.
1823 * We always set current->last_siginfo while stopped here.
1824 * That makes it a way to test a stopped process for
1825 * being ptrace-stopped vs being job-control-stopped.
1827 * If we actually decide not to stop at all because the tracer
1828 * is gone, we keep current->exit_code unless clear_code.
1830 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1831 __releases(&current->sighand->siglock)
1832 __acquires(&current->sighand->siglock)
1834 bool gstop_done = false;
1836 if (arch_ptrace_stop_needed(exit_code, info)) {
1838 * The arch code has something special to do before a
1839 * ptrace stop. This is allowed to block, e.g. for faults
1840 * on user stack pages. We can't keep the siglock while
1841 * calling arch_ptrace_stop, so we must release it now.
1842 * To preserve proper semantics, we must do this before
1843 * any signal bookkeeping like checking group_stop_count.
1844 * Meanwhile, a SIGKILL could come in before we retake the
1845 * siglock. That must prevent us from sleeping in TASK_TRACED.
1846 * So after regaining the lock, we must check for SIGKILL.
1848 spin_unlock_irq(&current->sighand->siglock);
1849 arch_ptrace_stop(exit_code, info);
1850 spin_lock_irq(&current->sighand->siglock);
1851 if (sigkill_pending(current))
1852 return;
1856 * We're committing to trapping. TRACED should be visible before
1857 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1858 * Also, transition to TRACED and updates to ->jobctl should be
1859 * atomic with respect to siglock and should be done after the arch
1860 * hook as siglock is released and regrabbed across it.
1862 set_current_state(TASK_TRACED);
1864 current->last_siginfo = info;
1865 current->exit_code = exit_code;
1868 * If @why is CLD_STOPPED, we're trapping to participate in a group
1869 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1870 * across siglock relocks since INTERRUPT was scheduled, PENDING
1871 * could be clear now. We act as if SIGCONT is received after
1872 * TASK_TRACED is entered - ignore it.
1874 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1875 gstop_done = task_participate_group_stop(current);
1877 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1878 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1879 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1880 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1882 /* entering a trap, clear TRAPPING */
1883 task_clear_jobctl_trapping(current);
1885 spin_unlock_irq(&current->sighand->siglock);
1886 read_lock(&tasklist_lock);
1887 if (may_ptrace_stop()) {
1889 * Notify parents of the stop.
1891 * While ptraced, there are two parents - the ptracer and
1892 * the real_parent of the group_leader. The ptracer should
1893 * know about every stop while the real parent is only
1894 * interested in the completion of group stop. The states
1895 * for the two don't interact with each other. Notify
1896 * separately unless they're gonna be duplicates.
1898 do_notify_parent_cldstop(current, true, why);
1899 if (gstop_done && ptrace_reparented(current))
1900 do_notify_parent_cldstop(current, false, why);
1903 * Don't want to allow preemption here, because
1904 * sys_ptrace() needs this task to be inactive.
1906 * XXX: implement read_unlock_no_resched().
1908 preempt_disable();
1909 read_unlock(&tasklist_lock);
1910 preempt_enable_no_resched();
1911 freezable_schedule();
1912 } else {
1914 * By the time we got the lock, our tracer went away.
1915 * Don't drop the lock yet, another tracer may come.
1917 * If @gstop_done, the ptracer went away between group stop
1918 * completion and here. During detach, it would have set
1919 * JOBCTL_STOP_PENDING on us and we'll re-enter
1920 * TASK_STOPPED in do_signal_stop() on return, so notifying
1921 * the real parent of the group stop completion is enough.
1923 if (gstop_done)
1924 do_notify_parent_cldstop(current, false, why);
1926 __set_current_state(TASK_RUNNING);
1927 if (clear_code)
1928 current->exit_code = 0;
1929 read_unlock(&tasklist_lock);
1933 * We are back. Now reacquire the siglock before touching
1934 * last_siginfo, so that we are sure to have synchronized with
1935 * any signal-sending on another CPU that wants to examine it.
1937 spin_lock_irq(&current->sighand->siglock);
1938 current->last_siginfo = NULL;
1940 /* LISTENING can be set only during STOP traps, clear it */
1941 current->jobctl &= ~JOBCTL_LISTENING;
1944 * Queued signals ignored us while we were stopped for tracing.
1945 * So check for any that we should take before resuming user mode.
1946 * This sets TIF_SIGPENDING, but never clears it.
1948 recalc_sigpending_tsk(current);
1951 static void ptrace_do_notify(int signr, int exit_code, int why)
1953 siginfo_t info;
1955 memset(&info, 0, sizeof info);
1956 info.si_signo = signr;
1957 info.si_code = exit_code;
1958 info.si_pid = task_pid_vnr(current);
1959 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1961 /* Let the debugger run. */
1962 ptrace_stop(exit_code, why, 1, &info);
1965 void ptrace_notify(int exit_code)
1967 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1968 if (unlikely(current->task_works))
1969 task_work_run();
1971 spin_lock_irq(&current->sighand->siglock);
1972 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1973 spin_unlock_irq(&current->sighand->siglock);
1977 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1978 * @signr: signr causing group stop if initiating
1980 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1981 * and participate in it. If already set, participate in the existing
1982 * group stop. If participated in a group stop (and thus slept), %true is
1983 * returned with siglock released.
1985 * If ptraced, this function doesn't handle stop itself. Instead,
1986 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1987 * untouched. The caller must ensure that INTERRUPT trap handling takes
1988 * places afterwards.
1990 * CONTEXT:
1991 * Must be called with @current->sighand->siglock held, which is released
1992 * on %true return.
1994 * RETURNS:
1995 * %false if group stop is already cancelled or ptrace trap is scheduled.
1996 * %true if participated in group stop.
1998 static bool do_signal_stop(int signr)
1999 __releases(&current->sighand->siglock)
2001 struct signal_struct *sig = current->signal;
2003 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2004 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2005 struct task_struct *t;
2007 /* signr will be recorded in task->jobctl for retries */
2008 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2010 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2011 unlikely(signal_group_exit(sig)))
2012 return false;
2014 * There is no group stop already in progress. We must
2015 * initiate one now.
2017 * While ptraced, a task may be resumed while group stop is
2018 * still in effect and then receive a stop signal and
2019 * initiate another group stop. This deviates from the
2020 * usual behavior as two consecutive stop signals can't
2021 * cause two group stops when !ptraced. That is why we
2022 * also check !task_is_stopped(t) below.
2024 * The condition can be distinguished by testing whether
2025 * SIGNAL_STOP_STOPPED is already set. Don't generate
2026 * group_exit_code in such case.
2028 * This is not necessary for SIGNAL_STOP_CONTINUED because
2029 * an intervening stop signal is required to cause two
2030 * continued events regardless of ptrace.
2032 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2033 sig->group_exit_code = signr;
2035 sig->group_stop_count = 0;
2037 if (task_set_jobctl_pending(current, signr | gstop))
2038 sig->group_stop_count++;
2040 for (t = next_thread(current); t != current;
2041 t = next_thread(t)) {
2043 * Setting state to TASK_STOPPED for a group
2044 * stop is always done with the siglock held,
2045 * so this check has no races.
2047 if (!task_is_stopped(t) &&
2048 task_set_jobctl_pending(t, signr | gstop)) {
2049 sig->group_stop_count++;
2050 if (likely(!(t->ptrace & PT_SEIZED)))
2051 signal_wake_up(t, 0);
2052 else
2053 ptrace_trap_notify(t);
2058 if (likely(!current->ptrace)) {
2059 int notify = 0;
2062 * If there are no other threads in the group, or if there
2063 * is a group stop in progress and we are the last to stop,
2064 * report to the parent.
2066 if (task_participate_group_stop(current))
2067 notify = CLD_STOPPED;
2069 __set_current_state(TASK_STOPPED);
2070 spin_unlock_irq(&current->sighand->siglock);
2073 * Notify the parent of the group stop completion. Because
2074 * we're not holding either the siglock or tasklist_lock
2075 * here, ptracer may attach inbetween; however, this is for
2076 * group stop and should always be delivered to the real
2077 * parent of the group leader. The new ptracer will get
2078 * its notification when this task transitions into
2079 * TASK_TRACED.
2081 if (notify) {
2082 read_lock(&tasklist_lock);
2083 do_notify_parent_cldstop(current, false, notify);
2084 read_unlock(&tasklist_lock);
2087 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2088 freezable_schedule();
2089 return true;
2090 } else {
2092 * While ptraced, group stop is handled by STOP trap.
2093 * Schedule it and let the caller deal with it.
2095 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2096 return false;
2101 * do_jobctl_trap - take care of ptrace jobctl traps
2103 * When PT_SEIZED, it's used for both group stop and explicit
2104 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2105 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2106 * the stop signal; otherwise, %SIGTRAP.
2108 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2109 * number as exit_code and no siginfo.
2111 * CONTEXT:
2112 * Must be called with @current->sighand->siglock held, which may be
2113 * released and re-acquired before returning with intervening sleep.
2115 static void do_jobctl_trap(void)
2117 struct signal_struct *signal = current->signal;
2118 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2120 if (current->ptrace & PT_SEIZED) {
2121 if (!signal->group_stop_count &&
2122 !(signal->flags & SIGNAL_STOP_STOPPED))
2123 signr = SIGTRAP;
2124 WARN_ON_ONCE(!signr);
2125 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2126 CLD_STOPPED);
2127 } else {
2128 WARN_ON_ONCE(!signr);
2129 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2130 current->exit_code = 0;
2134 static int ptrace_signal(int signr, siginfo_t *info,
2135 struct pt_regs *regs, void *cookie)
2137 ptrace_signal_deliver(regs, cookie);
2139 * We do not check sig_kernel_stop(signr) but set this marker
2140 * unconditionally because we do not know whether debugger will
2141 * change signr. This flag has no meaning unless we are going
2142 * to stop after return from ptrace_stop(). In this case it will
2143 * be checked in do_signal_stop(), we should only stop if it was
2144 * not cleared by SIGCONT while we were sleeping. See also the
2145 * comment in dequeue_signal().
2147 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2148 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2150 /* We're back. Did the debugger cancel the sig? */
2151 signr = current->exit_code;
2152 if (signr == 0)
2153 return signr;
2155 current->exit_code = 0;
2158 * Update the siginfo structure if the signal has
2159 * changed. If the debugger wanted something
2160 * specific in the siginfo structure then it should
2161 * have updated *info via PTRACE_SETSIGINFO.
2163 if (signr != info->si_signo) {
2164 info->si_signo = signr;
2165 info->si_errno = 0;
2166 info->si_code = SI_USER;
2167 rcu_read_lock();
2168 info->si_pid = task_pid_vnr(current->parent);
2169 info->si_uid = from_kuid_munged(current_user_ns(),
2170 task_uid(current->parent));
2171 rcu_read_unlock();
2174 /* If the (new) signal is now blocked, requeue it. */
2175 if (sigismember(&current->blocked, signr)) {
2176 specific_send_sig_info(signr, info, current);
2177 signr = 0;
2180 return signr;
2183 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2184 struct pt_regs *regs, void *cookie)
2186 struct sighand_struct *sighand = current->sighand;
2187 struct signal_struct *signal = current->signal;
2188 int signr;
2190 if (unlikely(current->task_works))
2191 task_work_run();
2193 if (unlikely(uprobe_deny_signal()))
2194 return 0;
2197 * Do this once, we can't return to user-mode if freezing() == T.
2198 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2199 * thus do not need another check after return.
2201 try_to_freeze();
2203 relock:
2204 spin_lock_irq(&sighand->siglock);
2206 * Every stopped thread goes here after wakeup. Check to see if
2207 * we should notify the parent, prepare_signal(SIGCONT) encodes
2208 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2210 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2211 int why;
2213 if (signal->flags & SIGNAL_CLD_CONTINUED)
2214 why = CLD_CONTINUED;
2215 else
2216 why = CLD_STOPPED;
2218 signal->flags &= ~SIGNAL_CLD_MASK;
2220 spin_unlock_irq(&sighand->siglock);
2223 * Notify the parent that we're continuing. This event is
2224 * always per-process and doesn't make whole lot of sense
2225 * for ptracers, who shouldn't consume the state via
2226 * wait(2) either, but, for backward compatibility, notify
2227 * the ptracer of the group leader too unless it's gonna be
2228 * a duplicate.
2230 read_lock(&tasklist_lock);
2231 do_notify_parent_cldstop(current, false, why);
2233 if (ptrace_reparented(current->group_leader))
2234 do_notify_parent_cldstop(current->group_leader,
2235 true, why);
2236 read_unlock(&tasklist_lock);
2238 goto relock;
2241 for (;;) {
2242 struct k_sigaction *ka;
2244 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2245 do_signal_stop(0))
2246 goto relock;
2248 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2249 do_jobctl_trap();
2250 spin_unlock_irq(&sighand->siglock);
2251 goto relock;
2254 signr = dequeue_signal(current, &current->blocked, info);
2256 if (!signr)
2257 break; /* will return 0 */
2259 if (unlikely(current->ptrace) && signr != SIGKILL) {
2260 signr = ptrace_signal(signr, info,
2261 regs, cookie);
2262 if (!signr)
2263 continue;
2266 ka = &sighand->action[signr-1];
2268 /* Trace actually delivered signals. */
2269 trace_signal_deliver(signr, info, ka);
2271 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2272 continue;
2273 if (ka->sa.sa_handler != SIG_DFL) {
2274 /* Run the handler. */
2275 *return_ka = *ka;
2277 if (ka->sa.sa_flags & SA_ONESHOT)
2278 ka->sa.sa_handler = SIG_DFL;
2280 break; /* will return non-zero "signr" value */
2284 * Now we are doing the default action for this signal.
2286 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2287 continue;
2290 * Global init gets no signals it doesn't want.
2291 * Container-init gets no signals it doesn't want from same
2292 * container.
2294 * Note that if global/container-init sees a sig_kernel_only()
2295 * signal here, the signal must have been generated internally
2296 * or must have come from an ancestor namespace. In either
2297 * case, the signal cannot be dropped.
2299 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2300 !sig_kernel_only(signr))
2301 continue;
2303 if (sig_kernel_stop(signr)) {
2305 * The default action is to stop all threads in
2306 * the thread group. The job control signals
2307 * do nothing in an orphaned pgrp, but SIGSTOP
2308 * always works. Note that siglock needs to be
2309 * dropped during the call to is_orphaned_pgrp()
2310 * because of lock ordering with tasklist_lock.
2311 * This allows an intervening SIGCONT to be posted.
2312 * We need to check for that and bail out if necessary.
2314 if (signr != SIGSTOP) {
2315 spin_unlock_irq(&sighand->siglock);
2317 /* signals can be posted during this window */
2319 if (is_current_pgrp_orphaned())
2320 goto relock;
2322 spin_lock_irq(&sighand->siglock);
2325 if (likely(do_signal_stop(info->si_signo))) {
2326 /* It released the siglock. */
2327 goto relock;
2331 * We didn't actually stop, due to a race
2332 * with SIGCONT or something like that.
2334 continue;
2337 spin_unlock_irq(&sighand->siglock);
2340 * Anything else is fatal, maybe with a core dump.
2342 current->flags |= PF_SIGNALED;
2344 if (sig_kernel_coredump(signr)) {
2345 if (print_fatal_signals)
2346 print_fatal_signal(regs, info->si_signo);
2348 * If it was able to dump core, this kills all
2349 * other threads in the group and synchronizes with
2350 * their demise. If we lost the race with another
2351 * thread getting here, it set group_exit_code
2352 * first and our do_group_exit call below will use
2353 * that value and ignore the one we pass it.
2355 do_coredump(info, regs);
2359 * Death signals, no core dump.
2361 do_group_exit(info->si_signo);
2362 /* NOTREACHED */
2364 spin_unlock_irq(&sighand->siglock);
2365 return signr;
2369 * signal_delivered -
2370 * @sig: number of signal being delivered
2371 * @info: siginfo_t of signal being delivered
2372 * @ka: sigaction setting that chose the handler
2373 * @regs: user register state
2374 * @stepping: nonzero if debugger single-step or block-step in use
2376 * This function should be called when a signal has succesfully been
2377 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2378 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2379 * is set in @ka->sa.sa_flags. Tracing is notified.
2381 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2382 struct pt_regs *regs, int stepping)
2384 sigset_t blocked;
2386 /* A signal was successfully delivered, and the
2387 saved sigmask was stored on the signal frame,
2388 and will be restored by sigreturn. So we can
2389 simply clear the restore sigmask flag. */
2390 clear_restore_sigmask();
2392 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2393 if (!(ka->sa.sa_flags & SA_NODEFER))
2394 sigaddset(&blocked, sig);
2395 set_current_blocked(&blocked);
2396 tracehook_signal_handler(sig, info, ka, regs, stepping);
2400 * It could be that complete_signal() picked us to notify about the
2401 * group-wide signal. Other threads should be notified now to take
2402 * the shared signals in @which since we will not.
2404 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2406 sigset_t retarget;
2407 struct task_struct *t;
2409 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2410 if (sigisemptyset(&retarget))
2411 return;
2413 t = tsk;
2414 while_each_thread(tsk, t) {
2415 if (t->flags & PF_EXITING)
2416 continue;
2418 if (!has_pending_signals(&retarget, &t->blocked))
2419 continue;
2420 /* Remove the signals this thread can handle. */
2421 sigandsets(&retarget, &retarget, &t->blocked);
2423 if (!signal_pending(t))
2424 signal_wake_up(t, 0);
2426 if (sigisemptyset(&retarget))
2427 break;
2431 void exit_signals(struct task_struct *tsk)
2433 int group_stop = 0;
2434 sigset_t unblocked;
2437 * @tsk is about to have PF_EXITING set - lock out users which
2438 * expect stable threadgroup.
2440 threadgroup_change_begin(tsk);
2442 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2443 tsk->flags |= PF_EXITING;
2444 threadgroup_change_end(tsk);
2445 return;
2448 spin_lock_irq(&tsk->sighand->siglock);
2450 * From now this task is not visible for group-wide signals,
2451 * see wants_signal(), do_signal_stop().
2453 tsk->flags |= PF_EXITING;
2455 threadgroup_change_end(tsk);
2457 if (!signal_pending(tsk))
2458 goto out;
2460 unblocked = tsk->blocked;
2461 signotset(&unblocked);
2462 retarget_shared_pending(tsk, &unblocked);
2464 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2465 task_participate_group_stop(tsk))
2466 group_stop = CLD_STOPPED;
2467 out:
2468 spin_unlock_irq(&tsk->sighand->siglock);
2471 * If group stop has completed, deliver the notification. This
2472 * should always go to the real parent of the group leader.
2474 if (unlikely(group_stop)) {
2475 read_lock(&tasklist_lock);
2476 do_notify_parent_cldstop(tsk, false, group_stop);
2477 read_unlock(&tasklist_lock);
2481 EXPORT_SYMBOL(recalc_sigpending);
2482 EXPORT_SYMBOL_GPL(dequeue_signal);
2483 EXPORT_SYMBOL(flush_signals);
2484 EXPORT_SYMBOL(force_sig);
2485 EXPORT_SYMBOL(send_sig);
2486 EXPORT_SYMBOL(send_sig_info);
2487 EXPORT_SYMBOL(sigprocmask);
2488 EXPORT_SYMBOL(block_all_signals);
2489 EXPORT_SYMBOL(unblock_all_signals);
2493 * System call entry points.
2497 * sys_restart_syscall - restart a system call
2499 SYSCALL_DEFINE0(restart_syscall)
2501 struct restart_block *restart = &current_thread_info()->restart_block;
2502 return restart->fn(restart);
2505 long do_no_restart_syscall(struct restart_block *param)
2507 return -EINTR;
2510 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2512 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2513 sigset_t newblocked;
2514 /* A set of now blocked but previously unblocked signals. */
2515 sigandnsets(&newblocked, newset, &current->blocked);
2516 retarget_shared_pending(tsk, &newblocked);
2518 tsk->blocked = *newset;
2519 recalc_sigpending();
2523 * set_current_blocked - change current->blocked mask
2524 * @newset: new mask
2526 * It is wrong to change ->blocked directly, this helper should be used
2527 * to ensure the process can't miss a shared signal we are going to block.
2529 void set_current_blocked(sigset_t *newset)
2531 struct task_struct *tsk = current;
2532 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2533 spin_lock_irq(&tsk->sighand->siglock);
2534 __set_task_blocked(tsk, newset);
2535 spin_unlock_irq(&tsk->sighand->siglock);
2538 void __set_current_blocked(const sigset_t *newset)
2540 struct task_struct *tsk = current;
2542 spin_lock_irq(&tsk->sighand->siglock);
2543 __set_task_blocked(tsk, newset);
2544 spin_unlock_irq(&tsk->sighand->siglock);
2548 * This is also useful for kernel threads that want to temporarily
2549 * (or permanently) block certain signals.
2551 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2552 * interface happily blocks "unblockable" signals like SIGKILL
2553 * and friends.
2555 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2557 struct task_struct *tsk = current;
2558 sigset_t newset;
2560 /* Lockless, only current can change ->blocked, never from irq */
2561 if (oldset)
2562 *oldset = tsk->blocked;
2564 switch (how) {
2565 case SIG_BLOCK:
2566 sigorsets(&newset, &tsk->blocked, set);
2567 break;
2568 case SIG_UNBLOCK:
2569 sigandnsets(&newset, &tsk->blocked, set);
2570 break;
2571 case SIG_SETMASK:
2572 newset = *set;
2573 break;
2574 default:
2575 return -EINVAL;
2578 __set_current_blocked(&newset);
2579 return 0;
2583 * sys_rt_sigprocmask - change the list of currently blocked signals
2584 * @how: whether to add, remove, or set signals
2585 * @nset: stores pending signals
2586 * @oset: previous value of signal mask if non-null
2587 * @sigsetsize: size of sigset_t type
2589 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2590 sigset_t __user *, oset, size_t, sigsetsize)
2592 sigset_t old_set, new_set;
2593 int error;
2595 /* XXX: Don't preclude handling different sized sigset_t's. */
2596 if (sigsetsize != sizeof(sigset_t))
2597 return -EINVAL;
2599 old_set = current->blocked;
2601 if (nset) {
2602 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2603 return -EFAULT;
2604 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2606 error = sigprocmask(how, &new_set, NULL);
2607 if (error)
2608 return error;
2611 if (oset) {
2612 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2613 return -EFAULT;
2616 return 0;
2619 long do_sigpending(void __user *set, unsigned long sigsetsize)
2621 long error = -EINVAL;
2622 sigset_t pending;
2624 if (sigsetsize > sizeof(sigset_t))
2625 goto out;
2627 spin_lock_irq(&current->sighand->siglock);
2628 sigorsets(&pending, &current->pending.signal,
2629 &current->signal->shared_pending.signal);
2630 spin_unlock_irq(&current->sighand->siglock);
2632 /* Outside the lock because only this thread touches it. */
2633 sigandsets(&pending, &current->blocked, &pending);
2635 error = -EFAULT;
2636 if (!copy_to_user(set, &pending, sigsetsize))
2637 error = 0;
2639 out:
2640 return error;
2644 * sys_rt_sigpending - examine a pending signal that has been raised
2645 * while blocked
2646 * @set: stores pending signals
2647 * @sigsetsize: size of sigset_t type or larger
2649 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2651 return do_sigpending(set, sigsetsize);
2654 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2656 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2658 int err;
2660 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2661 return -EFAULT;
2662 if (from->si_code < 0)
2663 return __copy_to_user(to, from, sizeof(siginfo_t))
2664 ? -EFAULT : 0;
2666 * If you change siginfo_t structure, please be sure
2667 * this code is fixed accordingly.
2668 * Please remember to update the signalfd_copyinfo() function
2669 * inside fs/signalfd.c too, in case siginfo_t changes.
2670 * It should never copy any pad contained in the structure
2671 * to avoid security leaks, but must copy the generic
2672 * 3 ints plus the relevant union member.
2674 err = __put_user(from->si_signo, &to->si_signo);
2675 err |= __put_user(from->si_errno, &to->si_errno);
2676 err |= __put_user((short)from->si_code, &to->si_code);
2677 switch (from->si_code & __SI_MASK) {
2678 case __SI_KILL:
2679 err |= __put_user(from->si_pid, &to->si_pid);
2680 err |= __put_user(from->si_uid, &to->si_uid);
2681 break;
2682 case __SI_TIMER:
2683 err |= __put_user(from->si_tid, &to->si_tid);
2684 err |= __put_user(from->si_overrun, &to->si_overrun);
2685 err |= __put_user(from->si_ptr, &to->si_ptr);
2686 break;
2687 case __SI_POLL:
2688 err |= __put_user(from->si_band, &to->si_band);
2689 err |= __put_user(from->si_fd, &to->si_fd);
2690 break;
2691 case __SI_FAULT:
2692 err |= __put_user(from->si_addr, &to->si_addr);
2693 #ifdef __ARCH_SI_TRAPNO
2694 err |= __put_user(from->si_trapno, &to->si_trapno);
2695 #endif
2696 #ifdef BUS_MCEERR_AO
2698 * Other callers might not initialize the si_lsb field,
2699 * so check explicitly for the right codes here.
2701 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2702 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2703 #endif
2704 break;
2705 case __SI_CHLD:
2706 err |= __put_user(from->si_pid, &to->si_pid);
2707 err |= __put_user(from->si_uid, &to->si_uid);
2708 err |= __put_user(from->si_status, &to->si_status);
2709 err |= __put_user(from->si_utime, &to->si_utime);
2710 err |= __put_user(from->si_stime, &to->si_stime);
2711 break;
2712 case __SI_RT: /* This is not generated by the kernel as of now. */
2713 case __SI_MESGQ: /* But this is */
2714 err |= __put_user(from->si_pid, &to->si_pid);
2715 err |= __put_user(from->si_uid, &to->si_uid);
2716 err |= __put_user(from->si_ptr, &to->si_ptr);
2717 break;
2718 #ifdef __ARCH_SIGSYS
2719 case __SI_SYS:
2720 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2721 err |= __put_user(from->si_syscall, &to->si_syscall);
2722 err |= __put_user(from->si_arch, &to->si_arch);
2723 break;
2724 #endif
2725 default: /* this is just in case for now ... */
2726 err |= __put_user(from->si_pid, &to->si_pid);
2727 err |= __put_user(from->si_uid, &to->si_uid);
2728 break;
2730 return err;
2733 #endif
2736 * do_sigtimedwait - wait for queued signals specified in @which
2737 * @which: queued signals to wait for
2738 * @info: if non-null, the signal's siginfo is returned here
2739 * @ts: upper bound on process time suspension
2741 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2742 const struct timespec *ts)
2744 struct task_struct *tsk = current;
2745 long timeout = MAX_SCHEDULE_TIMEOUT;
2746 sigset_t mask = *which;
2747 int sig;
2749 if (ts) {
2750 if (!timespec_valid(ts))
2751 return -EINVAL;
2752 timeout = timespec_to_jiffies(ts);
2754 * We can be close to the next tick, add another one
2755 * to ensure we will wait at least the time asked for.
2757 if (ts->tv_sec || ts->tv_nsec)
2758 timeout++;
2762 * Invert the set of allowed signals to get those we want to block.
2764 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2765 signotset(&mask);
2767 spin_lock_irq(&tsk->sighand->siglock);
2768 sig = dequeue_signal(tsk, &mask, info);
2769 if (!sig && timeout) {
2771 * None ready, temporarily unblock those we're interested
2772 * while we are sleeping in so that we'll be awakened when
2773 * they arrive. Unblocking is always fine, we can avoid
2774 * set_current_blocked().
2776 tsk->real_blocked = tsk->blocked;
2777 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2778 recalc_sigpending();
2779 spin_unlock_irq(&tsk->sighand->siglock);
2781 timeout = schedule_timeout_interruptible(timeout);
2783 spin_lock_irq(&tsk->sighand->siglock);
2784 __set_task_blocked(tsk, &tsk->real_blocked);
2785 siginitset(&tsk->real_blocked, 0);
2786 sig = dequeue_signal(tsk, &mask, info);
2788 spin_unlock_irq(&tsk->sighand->siglock);
2790 if (sig)
2791 return sig;
2792 return timeout ? -EINTR : -EAGAIN;
2796 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2797 * in @uthese
2798 * @uthese: queued signals to wait for
2799 * @uinfo: if non-null, the signal's siginfo is returned here
2800 * @uts: upper bound on process time suspension
2801 * @sigsetsize: size of sigset_t type
2803 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2804 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2805 size_t, sigsetsize)
2807 sigset_t these;
2808 struct timespec ts;
2809 siginfo_t info;
2810 int ret;
2812 /* XXX: Don't preclude handling different sized sigset_t's. */
2813 if (sigsetsize != sizeof(sigset_t))
2814 return -EINVAL;
2816 if (copy_from_user(&these, uthese, sizeof(these)))
2817 return -EFAULT;
2819 if (uts) {
2820 if (copy_from_user(&ts, uts, sizeof(ts)))
2821 return -EFAULT;
2824 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2826 if (ret > 0 && uinfo) {
2827 if (copy_siginfo_to_user(uinfo, &info))
2828 ret = -EFAULT;
2831 return ret;
2835 * sys_kill - send a signal to a process
2836 * @pid: the PID of the process
2837 * @sig: signal to be sent
2839 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2841 struct siginfo info;
2843 info.si_signo = sig;
2844 info.si_errno = 0;
2845 info.si_code = SI_USER;
2846 info.si_pid = task_tgid_vnr(current);
2847 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2849 return kill_something_info(sig, &info, pid);
2852 static int
2853 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2855 struct task_struct *p;
2856 int error = -ESRCH;
2858 rcu_read_lock();
2859 p = find_task_by_vpid(pid);
2860 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2861 error = check_kill_permission(sig, info, p);
2863 * The null signal is a permissions and process existence
2864 * probe. No signal is actually delivered.
2866 if (!error && sig) {
2867 error = do_send_sig_info(sig, info, p, false);
2869 * If lock_task_sighand() failed we pretend the task
2870 * dies after receiving the signal. The window is tiny,
2871 * and the signal is private anyway.
2873 if (unlikely(error == -ESRCH))
2874 error = 0;
2877 rcu_read_unlock();
2879 return error;
2882 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2884 struct siginfo info;
2886 info.si_signo = sig;
2887 info.si_errno = 0;
2888 info.si_code = SI_TKILL;
2889 info.si_pid = task_tgid_vnr(current);
2890 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2892 return do_send_specific(tgid, pid, sig, &info);
2896 * sys_tgkill - send signal to one specific thread
2897 * @tgid: the thread group ID of the thread
2898 * @pid: the PID of the thread
2899 * @sig: signal to be sent
2901 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2902 * exists but it's not belonging to the target process anymore. This
2903 * method solves the problem of threads exiting and PIDs getting reused.
2905 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2907 /* This is only valid for single tasks */
2908 if (pid <= 0 || tgid <= 0)
2909 return -EINVAL;
2911 return do_tkill(tgid, pid, sig);
2915 * sys_tkill - send signal to one specific task
2916 * @pid: the PID of the task
2917 * @sig: signal to be sent
2919 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2921 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2923 /* This is only valid for single tasks */
2924 if (pid <= 0)
2925 return -EINVAL;
2927 return do_tkill(0, pid, sig);
2931 * sys_rt_sigqueueinfo - send signal information to a signal
2932 * @pid: the PID of the thread
2933 * @sig: signal to be sent
2934 * @uinfo: signal info to be sent
2936 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2937 siginfo_t __user *, uinfo)
2939 siginfo_t info;
2941 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2942 return -EFAULT;
2944 /* Not even root can pretend to send signals from the kernel.
2945 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2947 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2948 /* We used to allow any < 0 si_code */
2949 WARN_ON_ONCE(info.si_code < 0);
2950 return -EPERM;
2952 info.si_signo = sig;
2954 /* POSIX.1b doesn't mention process groups. */
2955 return kill_proc_info(sig, &info, pid);
2958 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2960 /* This is only valid for single tasks */
2961 if (pid <= 0 || tgid <= 0)
2962 return -EINVAL;
2964 /* Not even root can pretend to send signals from the kernel.
2965 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2967 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2968 /* We used to allow any < 0 si_code */
2969 WARN_ON_ONCE(info->si_code < 0);
2970 return -EPERM;
2972 info->si_signo = sig;
2974 return do_send_specific(tgid, pid, sig, info);
2977 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2978 siginfo_t __user *, uinfo)
2980 siginfo_t info;
2982 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2983 return -EFAULT;
2985 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2988 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2990 struct task_struct *t = current;
2991 struct k_sigaction *k;
2992 sigset_t mask;
2994 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2995 return -EINVAL;
2997 k = &t->sighand->action[sig-1];
2999 spin_lock_irq(&current->sighand->siglock);
3000 if (oact)
3001 *oact = *k;
3003 if (act) {
3004 sigdelsetmask(&act->sa.sa_mask,
3005 sigmask(SIGKILL) | sigmask(SIGSTOP));
3006 *k = *act;
3008 * POSIX 3.3.1.3:
3009 * "Setting a signal action to SIG_IGN for a signal that is
3010 * pending shall cause the pending signal to be discarded,
3011 * whether or not it is blocked."
3013 * "Setting a signal action to SIG_DFL for a signal that is
3014 * pending and whose default action is to ignore the signal
3015 * (for example, SIGCHLD), shall cause the pending signal to
3016 * be discarded, whether or not it is blocked"
3018 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3019 sigemptyset(&mask);
3020 sigaddset(&mask, sig);
3021 rm_from_queue_full(&mask, &t->signal->shared_pending);
3022 do {
3023 rm_from_queue_full(&mask, &t->pending);
3024 t = next_thread(t);
3025 } while (t != current);
3029 spin_unlock_irq(&current->sighand->siglock);
3030 return 0;
3033 int
3034 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3036 stack_t oss;
3037 int error;
3039 oss.ss_sp = (void __user *) current->sas_ss_sp;
3040 oss.ss_size = current->sas_ss_size;
3041 oss.ss_flags = sas_ss_flags(sp);
3043 if (uss) {
3044 void __user *ss_sp;
3045 size_t ss_size;
3046 int ss_flags;
3048 error = -EFAULT;
3049 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3050 goto out;
3051 error = __get_user(ss_sp, &uss->ss_sp) |
3052 __get_user(ss_flags, &uss->ss_flags) |
3053 __get_user(ss_size, &uss->ss_size);
3054 if (error)
3055 goto out;
3057 error = -EPERM;
3058 if (on_sig_stack(sp))
3059 goto out;
3061 error = -EINVAL;
3063 * Note - this code used to test ss_flags incorrectly:
3064 * old code may have been written using ss_flags==0
3065 * to mean ss_flags==SS_ONSTACK (as this was the only
3066 * way that worked) - this fix preserves that older
3067 * mechanism.
3069 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3070 goto out;
3072 if (ss_flags == SS_DISABLE) {
3073 ss_size = 0;
3074 ss_sp = NULL;
3075 } else {
3076 error = -ENOMEM;
3077 if (ss_size < MINSIGSTKSZ)
3078 goto out;
3081 current->sas_ss_sp = (unsigned long) ss_sp;
3082 current->sas_ss_size = ss_size;
3085 error = 0;
3086 if (uoss) {
3087 error = -EFAULT;
3088 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3089 goto out;
3090 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3091 __put_user(oss.ss_size, &uoss->ss_size) |
3092 __put_user(oss.ss_flags, &uoss->ss_flags);
3095 out:
3096 return error;
3099 #ifdef __ARCH_WANT_SYS_SIGPENDING
3102 * sys_sigpending - examine pending signals
3103 * @set: where mask of pending signal is returned
3105 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3107 return do_sigpending(set, sizeof(*set));
3110 #endif
3112 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3114 * sys_sigprocmask - examine and change blocked signals
3115 * @how: whether to add, remove, or set signals
3116 * @nset: signals to add or remove (if non-null)
3117 * @oset: previous value of signal mask if non-null
3119 * Some platforms have their own version with special arguments;
3120 * others support only sys_rt_sigprocmask.
3123 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3124 old_sigset_t __user *, oset)
3126 old_sigset_t old_set, new_set;
3127 sigset_t new_blocked;
3129 old_set = current->blocked.sig[0];
3131 if (nset) {
3132 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3133 return -EFAULT;
3134 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3136 new_blocked = current->blocked;
3138 switch (how) {
3139 case SIG_BLOCK:
3140 sigaddsetmask(&new_blocked, new_set);
3141 break;
3142 case SIG_UNBLOCK:
3143 sigdelsetmask(&new_blocked, new_set);
3144 break;
3145 case SIG_SETMASK:
3146 new_blocked.sig[0] = new_set;
3147 break;
3148 default:
3149 return -EINVAL;
3152 __set_current_blocked(&new_blocked);
3155 if (oset) {
3156 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3157 return -EFAULT;
3160 return 0;
3162 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3164 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3166 * sys_rt_sigaction - alter an action taken by a process
3167 * @sig: signal to be sent
3168 * @act: new sigaction
3169 * @oact: used to save the previous sigaction
3170 * @sigsetsize: size of sigset_t type
3172 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3173 const struct sigaction __user *, act,
3174 struct sigaction __user *, oact,
3175 size_t, sigsetsize)
3177 struct k_sigaction new_sa, old_sa;
3178 int ret = -EINVAL;
3180 /* XXX: Don't preclude handling different sized sigset_t's. */
3181 if (sigsetsize != sizeof(sigset_t))
3182 goto out;
3184 if (act) {
3185 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3186 return -EFAULT;
3189 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3191 if (!ret && oact) {
3192 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3193 return -EFAULT;
3195 out:
3196 return ret;
3198 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3200 #ifdef __ARCH_WANT_SYS_SGETMASK
3203 * For backwards compatibility. Functionality superseded by sigprocmask.
3205 SYSCALL_DEFINE0(sgetmask)
3207 /* SMP safe */
3208 return current->blocked.sig[0];
3211 SYSCALL_DEFINE1(ssetmask, int, newmask)
3213 int old = current->blocked.sig[0];
3214 sigset_t newset;
3216 set_current_blocked(&newset);
3218 return old;
3220 #endif /* __ARCH_WANT_SGETMASK */
3222 #ifdef __ARCH_WANT_SYS_SIGNAL
3224 * For backwards compatibility. Functionality superseded by sigaction.
3226 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3228 struct k_sigaction new_sa, old_sa;
3229 int ret;
3231 new_sa.sa.sa_handler = handler;
3232 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3233 sigemptyset(&new_sa.sa.sa_mask);
3235 ret = do_sigaction(sig, &new_sa, &old_sa);
3237 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3239 #endif /* __ARCH_WANT_SYS_SIGNAL */
3241 #ifdef __ARCH_WANT_SYS_PAUSE
3243 SYSCALL_DEFINE0(pause)
3245 while (!signal_pending(current)) {
3246 current->state = TASK_INTERRUPTIBLE;
3247 schedule();
3249 return -ERESTARTNOHAND;
3252 #endif
3254 int sigsuspend(sigset_t *set)
3256 current->saved_sigmask = current->blocked;
3257 set_current_blocked(set);
3259 current->state = TASK_INTERRUPTIBLE;
3260 schedule();
3261 set_restore_sigmask();
3262 return -ERESTARTNOHAND;
3265 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3267 * sys_rt_sigsuspend - replace the signal mask for a value with the
3268 * @unewset value until a signal is received
3269 * @unewset: new signal mask value
3270 * @sigsetsize: size of sigset_t type
3272 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3274 sigset_t newset;
3276 /* XXX: Don't preclude handling different sized sigset_t's. */
3277 if (sigsetsize != sizeof(sigset_t))
3278 return -EINVAL;
3280 if (copy_from_user(&newset, unewset, sizeof(newset)))
3281 return -EFAULT;
3282 return sigsuspend(&newset);
3284 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3286 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3288 return NULL;
3291 void __init signals_init(void)
3293 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3296 #ifdef CONFIG_KGDB_KDB
3297 #include <linux/kdb.h>
3299 * kdb_send_sig_info - Allows kdb to send signals without exposing
3300 * signal internals. This function checks if the required locks are
3301 * available before calling the main signal code, to avoid kdb
3302 * deadlocks.
3304 void
3305 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3307 static struct task_struct *kdb_prev_t;
3308 int sig, new_t;
3309 if (!spin_trylock(&t->sighand->siglock)) {
3310 kdb_printf("Can't do kill command now.\n"
3311 "The sigmask lock is held somewhere else in "
3312 "kernel, try again later\n");
3313 return;
3315 spin_unlock(&t->sighand->siglock);
3316 new_t = kdb_prev_t != t;
3317 kdb_prev_t = t;
3318 if (t->state != TASK_RUNNING && new_t) {
3319 kdb_printf("Process is not RUNNING, sending a signal from "
3320 "kdb risks deadlock\n"
3321 "on the run queue locks. "
3322 "The signal has _not_ been sent.\n"
3323 "Reissue the kill command if you want to risk "
3324 "the deadlock.\n");
3325 return;
3327 sig = info->si_signo;
3328 if (send_sig_info(sig, info, t))
3329 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3330 sig, t->pid);
3331 else
3332 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3334 #endif /* CONFIG_KGDB_KDB */