ACPI: thinkpad-acpi: make sure DSDT TMPx readings don't return +128
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / signal.c
blob39d122753bac93eb7cfa62d0f2e2e451a4908dd5
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
43 static int sig_ignored(struct task_struct *t, int sig)
45 void __user * handler;
48 * Tracers always want to know about signals..
50 if (t->ptrace & PT_PTRACED)
51 return 0;
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
56 * unblocked.
58 if (sigismember(&t->blocked, sig))
59 return 0;
61 /* Is it explicitly or implicitly ignored? */
62 handler = t->sighand->action[sig-1].sa.sa_handler;
63 return handler == SIG_IGN ||
64 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
73 unsigned long ready;
74 long i;
76 switch (_NSIG_WORDS) {
77 default:
78 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 ready |= signal->sig[i] &~ blocked->sig[i];
80 break;
82 case 4: ready = signal->sig[3] &~ blocked->sig[3];
83 ready |= signal->sig[2] &~ blocked->sig[2];
84 ready |= signal->sig[1] &~ blocked->sig[1];
85 ready |= signal->sig[0] &~ blocked->sig[0];
86 break;
88 case 2: ready = signal->sig[1] &~ blocked->sig[1];
89 ready |= signal->sig[0] &~ blocked->sig[0];
90 break;
92 case 1: ready = signal->sig[0] &~ blocked->sig[0];
94 return ready != 0;
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct *t)
101 if (t->signal->group_stop_count > 0 ||
102 (freezing(t)) ||
103 PENDING(&t->pending, &t->blocked) ||
104 PENDING(&t->signal->shared_pending, &t->blocked)) {
105 set_tsk_thread_flag(t, TIF_SIGPENDING);
106 return 1;
109 * We must never clear the flag in another thread, or in current
110 * when it's possible the current syscall is returning -ERESTART*.
111 * So we don't clear it here, and only callers who know they should do.
113 return 0;
117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118 * This is superfluous when called on current, the wakeup is a harmless no-op.
120 void recalc_sigpending_and_wake(struct task_struct *t)
122 if (recalc_sigpending_tsk(t))
123 signal_wake_up(t, 0);
126 void recalc_sigpending(void)
128 if (!recalc_sigpending_tsk(current))
129 clear_thread_flag(TIF_SIGPENDING);
133 /* Given the mask, find the first available signal that should be serviced. */
135 int next_signal(struct sigpending *pending, sigset_t *mask)
137 unsigned long i, *s, *m, x;
138 int sig = 0;
140 s = pending->signal.sig;
141 m = mask->sig;
142 switch (_NSIG_WORDS) {
143 default:
144 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 if ((x = *s &~ *m) != 0) {
146 sig = ffz(~x) + i*_NSIG_BPW + 1;
147 break;
149 break;
151 case 2: if ((x = s[0] &~ m[0]) != 0)
152 sig = 1;
153 else if ((x = s[1] &~ m[1]) != 0)
154 sig = _NSIG_BPW + 1;
155 else
156 break;
157 sig += ffz(~x);
158 break;
160 case 1: if ((x = *s &~ *m) != 0)
161 sig = ffz(~x) + 1;
162 break;
165 return sig;
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 int override_rlimit)
171 struct sigqueue *q = NULL;
172 struct user_struct *user;
175 * In order to avoid problems with "switch_user()", we want to make
176 * sure that the compiler doesn't re-load "t->user"
178 user = t->user;
179 barrier();
180 atomic_inc(&user->sigpending);
181 if (override_rlimit ||
182 atomic_read(&user->sigpending) <=
183 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 q = kmem_cache_alloc(sigqueue_cachep, flags);
185 if (unlikely(q == NULL)) {
186 atomic_dec(&user->sigpending);
187 } else {
188 INIT_LIST_HEAD(&q->list);
189 q->flags = 0;
190 q->user = get_uid(user);
192 return(q);
195 static void __sigqueue_free(struct sigqueue *q)
197 if (q->flags & SIGQUEUE_PREALLOC)
198 return;
199 atomic_dec(&q->user->sigpending);
200 free_uid(q->user);
201 kmem_cache_free(sigqueue_cachep, q);
204 void flush_sigqueue(struct sigpending *queue)
206 struct sigqueue *q;
208 sigemptyset(&queue->signal);
209 while (!list_empty(&queue->list)) {
210 q = list_entry(queue->list.next, struct sigqueue , list);
211 list_del_init(&q->list);
212 __sigqueue_free(q);
217 * Flush all pending signals for a task.
219 void flush_signals(struct task_struct *t)
221 unsigned long flags;
223 spin_lock_irqsave(&t->sighand->siglock, flags);
224 clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 flush_sigqueue(&t->pending);
226 flush_sigqueue(&t->signal->shared_pending);
227 spin_unlock_irqrestore(&t->sighand->siglock, flags);
230 void ignore_signals(struct task_struct *t)
232 int i;
234 for (i = 0; i < _NSIG; ++i)
235 t->sighand->action[i].sa.sa_handler = SIG_IGN;
237 flush_signals(t);
241 * Flush all handlers for a task.
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
247 int i;
248 struct k_sigaction *ka = &t->sighand->action[0];
249 for (i = _NSIG ; i != 0 ; i--) {
250 if (force_default || ka->sa.sa_handler != SIG_IGN)
251 ka->sa.sa_handler = SIG_DFL;
252 ka->sa.sa_flags = 0;
253 sigemptyset(&ka->sa.sa_mask);
254 ka++;
259 /* Notify the system that a driver wants to block all signals for this
260 * process, and wants to be notified if any signals at all were to be
261 * sent/acted upon. If the notifier routine returns non-zero, then the
262 * signal will be acted upon after all. If the notifier routine returns 0,
263 * then then signal will be blocked. Only one block per process is
264 * allowed. priv is a pointer to private data that the notifier routine
265 * can use to determine if the signal should be blocked or not. */
267 void
268 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
270 unsigned long flags;
272 spin_lock_irqsave(&current->sighand->siglock, flags);
273 current->notifier_mask = mask;
274 current->notifier_data = priv;
275 current->notifier = notifier;
276 spin_unlock_irqrestore(&current->sighand->siglock, flags);
279 /* Notify the system that blocking has ended. */
281 void
282 unblock_all_signals(void)
284 unsigned long flags;
286 spin_lock_irqsave(&current->sighand->siglock, flags);
287 current->notifier = NULL;
288 current->notifier_data = NULL;
289 recalc_sigpending();
290 spin_unlock_irqrestore(&current->sighand->siglock, flags);
293 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
295 struct sigqueue *q, *first = NULL;
296 int still_pending = 0;
298 if (unlikely(!sigismember(&list->signal, sig)))
299 return 0;
302 * Collect the siginfo appropriate to this signal. Check if
303 * there is another siginfo for the same signal.
305 list_for_each_entry(q, &list->list, list) {
306 if (q->info.si_signo == sig) {
307 if (first) {
308 still_pending = 1;
309 break;
311 first = q;
314 if (first) {
315 list_del_init(&first->list);
316 copy_siginfo(info, &first->info);
317 __sigqueue_free(first);
318 if (!still_pending)
319 sigdelset(&list->signal, sig);
320 } else {
322 /* Ok, it wasn't in the queue. This must be
323 a fast-pathed signal or we must have been
324 out of queue space. So zero out the info.
326 sigdelset(&list->signal, sig);
327 info->si_signo = sig;
328 info->si_errno = 0;
329 info->si_code = 0;
330 info->si_pid = 0;
331 info->si_uid = 0;
333 return 1;
336 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
337 siginfo_t *info)
339 int sig = next_signal(pending, mask);
341 if (sig) {
342 if (current->notifier) {
343 if (sigismember(current->notifier_mask, sig)) {
344 if (!(current->notifier)(current->notifier_data)) {
345 clear_thread_flag(TIF_SIGPENDING);
346 return 0;
351 if (!collect_signal(sig, pending, info))
352 sig = 0;
355 return sig;
359 * Dequeue a signal and return the element to the caller, which is
360 * expected to free it.
362 * All callers have to hold the siglock.
364 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
366 int signr = 0;
368 /* We only dequeue private signals from ourselves, we don't let
369 * signalfd steal them
371 if (tsk == current)
372 signr = __dequeue_signal(&tsk->pending, mask, info);
373 if (!signr) {
374 signr = __dequeue_signal(&tsk->signal->shared_pending,
375 mask, info);
377 * itimer signal ?
379 * itimers are process shared and we restart periodic
380 * itimers in the signal delivery path to prevent DoS
381 * attacks in the high resolution timer case. This is
382 * compliant with the old way of self restarting
383 * itimers, as the SIGALRM is a legacy signal and only
384 * queued once. Changing the restart behaviour to
385 * restart the timer in the signal dequeue path is
386 * reducing the timer noise on heavy loaded !highres
387 * systems too.
389 if (unlikely(signr == SIGALRM)) {
390 struct hrtimer *tmr = &tsk->signal->real_timer;
392 if (!hrtimer_is_queued(tmr) &&
393 tsk->signal->it_real_incr.tv64 != 0) {
394 hrtimer_forward(tmr, tmr->base->get_time(),
395 tsk->signal->it_real_incr);
396 hrtimer_restart(tmr);
400 if (likely(tsk == current))
401 recalc_sigpending();
402 if (signr && unlikely(sig_kernel_stop(signr))) {
404 * Set a marker that we have dequeued a stop signal. Our
405 * caller might release the siglock and then the pending
406 * stop signal it is about to process is no longer in the
407 * pending bitmasks, but must still be cleared by a SIGCONT
408 * (and overruled by a SIGKILL). So those cases clear this
409 * shared flag after we've set it. Note that this flag may
410 * remain set after the signal we return is ignored or
411 * handled. That doesn't matter because its only purpose
412 * is to alert stop-signal processing code when another
413 * processor has come along and cleared the flag.
415 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
416 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
418 if ( signr &&
419 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
420 info->si_sys_private){
422 * Release the siglock to ensure proper locking order
423 * of timer locks outside of siglocks. Note, we leave
424 * irqs disabled here, since the posix-timers code is
425 * about to disable them again anyway.
427 spin_unlock(&tsk->sighand->siglock);
428 do_schedule_next_timer(info);
429 spin_lock(&tsk->sighand->siglock);
431 return signr;
435 * Tell a process that it has a new active signal..
437 * NOTE! we rely on the previous spin_lock to
438 * lock interrupts for us! We can only be called with
439 * "siglock" held, and the local interrupt must
440 * have been disabled when that got acquired!
442 * No need to set need_resched since signal event passing
443 * goes through ->blocked
445 void signal_wake_up(struct task_struct *t, int resume)
447 unsigned int mask;
449 set_tsk_thread_flag(t, TIF_SIGPENDING);
452 * For SIGKILL, we want to wake it up in the stopped/traced case.
453 * We don't check t->state here because there is a race with it
454 * executing another processor and just now entering stopped state.
455 * By using wake_up_state, we ensure the process will wake up and
456 * handle its death signal.
458 mask = TASK_INTERRUPTIBLE;
459 if (resume)
460 mask |= TASK_STOPPED | TASK_TRACED;
461 if (!wake_up_state(t, mask))
462 kick_process(t);
466 * Remove signals in mask from the pending set and queue.
467 * Returns 1 if any signals were found.
469 * All callers must be holding the siglock.
471 * This version takes a sigset mask and looks at all signals,
472 * not just those in the first mask word.
474 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
476 struct sigqueue *q, *n;
477 sigset_t m;
479 sigandsets(&m, mask, &s->signal);
480 if (sigisemptyset(&m))
481 return 0;
483 signandsets(&s->signal, &s->signal, mask);
484 list_for_each_entry_safe(q, n, &s->list, list) {
485 if (sigismember(mask, q->info.si_signo)) {
486 list_del_init(&q->list);
487 __sigqueue_free(q);
490 return 1;
493 * Remove signals in mask from the pending set and queue.
494 * Returns 1 if any signals were found.
496 * All callers must be holding the siglock.
498 static int rm_from_queue(unsigned long mask, struct sigpending *s)
500 struct sigqueue *q, *n;
502 if (!sigtestsetmask(&s->signal, mask))
503 return 0;
505 sigdelsetmask(&s->signal, mask);
506 list_for_each_entry_safe(q, n, &s->list, list) {
507 if (q->info.si_signo < SIGRTMIN &&
508 (mask & sigmask(q->info.si_signo))) {
509 list_del_init(&q->list);
510 __sigqueue_free(q);
513 return 1;
517 * Bad permissions for sending the signal
519 static int check_kill_permission(int sig, struct siginfo *info,
520 struct task_struct *t)
522 int error = -EINVAL;
523 if (!valid_signal(sig))
524 return error;
526 error = audit_signal_info(sig, t); /* Let audit system see the signal */
527 if (error)
528 return error;
530 error = -EPERM;
531 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
532 && ((sig != SIGCONT) ||
533 (process_session(current) != process_session(t)))
534 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
535 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
536 && !capable(CAP_KILL))
537 return error;
539 return security_task_kill(t, info, sig, 0);
542 /* forward decl */
543 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
546 * Handle magic process-wide effects of stop/continue signals.
547 * Unlike the signal actions, these happen immediately at signal-generation
548 * time regardless of blocking, ignoring, or handling. This does the
549 * actual continuing for SIGCONT, but not the actual stopping for stop
550 * signals. The process stop is done as a signal action for SIG_DFL.
552 static void handle_stop_signal(int sig, struct task_struct *p)
554 struct task_struct *t;
556 if (p->signal->flags & SIGNAL_GROUP_EXIT)
558 * The process is in the middle of dying already.
560 return;
562 if (sig_kernel_stop(sig)) {
564 * This is a stop signal. Remove SIGCONT from all queues.
566 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
567 t = p;
568 do {
569 rm_from_queue(sigmask(SIGCONT), &t->pending);
570 t = next_thread(t);
571 } while (t != p);
572 } else if (sig == SIGCONT) {
574 * Remove all stop signals from all queues,
575 * and wake all threads.
577 if (unlikely(p->signal->group_stop_count > 0)) {
579 * There was a group stop in progress. We'll
580 * pretend it finished before we got here. We are
581 * obliged to report it to the parent: if the
582 * SIGSTOP happened "after" this SIGCONT, then it
583 * would have cleared this pending SIGCONT. If it
584 * happened "before" this SIGCONT, then the parent
585 * got the SIGCHLD about the stop finishing before
586 * the continue happened. We do the notification
587 * now, and it's as if the stop had finished and
588 * the SIGCHLD was pending on entry to this kill.
590 p->signal->group_stop_count = 0;
591 p->signal->flags = SIGNAL_STOP_CONTINUED;
592 spin_unlock(&p->sighand->siglock);
593 do_notify_parent_cldstop(p, CLD_STOPPED);
594 spin_lock(&p->sighand->siglock);
596 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
597 t = p;
598 do {
599 unsigned int state;
600 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
603 * If there is a handler for SIGCONT, we must make
604 * sure that no thread returns to user mode before
605 * we post the signal, in case it was the only
606 * thread eligible to run the signal handler--then
607 * it must not do anything between resuming and
608 * running the handler. With the TIF_SIGPENDING
609 * flag set, the thread will pause and acquire the
610 * siglock that we hold now and until we've queued
611 * the pending signal.
613 * Wake up the stopped thread _after_ setting
614 * TIF_SIGPENDING
616 state = TASK_STOPPED;
617 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
618 set_tsk_thread_flag(t, TIF_SIGPENDING);
619 state |= TASK_INTERRUPTIBLE;
621 wake_up_state(t, state);
623 t = next_thread(t);
624 } while (t != p);
626 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
628 * We were in fact stopped, and are now continued.
629 * Notify the parent with CLD_CONTINUED.
631 p->signal->flags = SIGNAL_STOP_CONTINUED;
632 p->signal->group_exit_code = 0;
633 spin_unlock(&p->sighand->siglock);
634 do_notify_parent_cldstop(p, CLD_CONTINUED);
635 spin_lock(&p->sighand->siglock);
636 } else {
638 * We are not stopped, but there could be a stop
639 * signal in the middle of being processed after
640 * being removed from the queue. Clear that too.
642 p->signal->flags = 0;
644 } else if (sig == SIGKILL) {
646 * Make sure that any pending stop signal already dequeued
647 * is undone by the wakeup for SIGKILL.
649 p->signal->flags = 0;
653 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
654 struct sigpending *signals)
656 struct sigqueue * q = NULL;
657 int ret = 0;
660 * Deliver the signal to listening signalfds. This must be called
661 * with the sighand lock held.
663 signalfd_notify(t, sig);
666 * fast-pathed signals for kernel-internal things like SIGSTOP
667 * or SIGKILL.
669 if (info == SEND_SIG_FORCED)
670 goto out_set;
672 /* Real-time signals must be queued if sent by sigqueue, or
673 some other real-time mechanism. It is implementation
674 defined whether kill() does so. We attempt to do so, on
675 the principle of least surprise, but since kill is not
676 allowed to fail with EAGAIN when low on memory we just
677 make sure at least one signal gets delivered and don't
678 pass on the info struct. */
680 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
681 (is_si_special(info) ||
682 info->si_code >= 0)));
683 if (q) {
684 list_add_tail(&q->list, &signals->list);
685 switch ((unsigned long) info) {
686 case (unsigned long) SEND_SIG_NOINFO:
687 q->info.si_signo = sig;
688 q->info.si_errno = 0;
689 q->info.si_code = SI_USER;
690 q->info.si_pid = current->pid;
691 q->info.si_uid = current->uid;
692 break;
693 case (unsigned long) SEND_SIG_PRIV:
694 q->info.si_signo = sig;
695 q->info.si_errno = 0;
696 q->info.si_code = SI_KERNEL;
697 q->info.si_pid = 0;
698 q->info.si_uid = 0;
699 break;
700 default:
701 copy_siginfo(&q->info, info);
702 break;
704 } else if (!is_si_special(info)) {
705 if (sig >= SIGRTMIN && info->si_code != SI_USER)
707 * Queue overflow, abort. We may abort if the signal was rt
708 * and sent by user using something other than kill().
710 return -EAGAIN;
713 out_set:
714 sigaddset(&signals->signal, sig);
715 return ret;
718 #define LEGACY_QUEUE(sigptr, sig) \
719 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
721 int print_fatal_signals;
723 static void print_fatal_signal(struct pt_regs *regs, int signr)
725 printk("%s/%d: potentially unexpected fatal signal %d.\n",
726 current->comm, current->pid, signr);
728 #ifdef __i386__
729 printk("code at %08lx: ", regs->eip);
731 int i;
732 for (i = 0; i < 16; i++) {
733 unsigned char insn;
735 __get_user(insn, (unsigned char *)(regs->eip + i));
736 printk("%02x ", insn);
739 #endif
740 printk("\n");
741 show_regs(regs);
744 static int __init setup_print_fatal_signals(char *str)
746 get_option (&str, &print_fatal_signals);
748 return 1;
751 __setup("print-fatal-signals=", setup_print_fatal_signals);
753 static int
754 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
756 int ret = 0;
758 BUG_ON(!irqs_disabled());
759 assert_spin_locked(&t->sighand->siglock);
761 /* Short-circuit ignored signals. */
762 if (sig_ignored(t, sig))
763 goto out;
765 /* Support queueing exactly one non-rt signal, so that we
766 can get more detailed information about the cause of
767 the signal. */
768 if (LEGACY_QUEUE(&t->pending, sig))
769 goto out;
771 ret = send_signal(sig, info, t, &t->pending);
772 if (!ret && !sigismember(&t->blocked, sig))
773 signal_wake_up(t, sig == SIGKILL);
774 out:
775 return ret;
779 * Force a signal that the process can't ignore: if necessary
780 * we unblock the signal and change any SIG_IGN to SIG_DFL.
782 * Note: If we unblock the signal, we always reset it to SIG_DFL,
783 * since we do not want to have a signal handler that was blocked
784 * be invoked when user space had explicitly blocked it.
786 * We don't want to have recursive SIGSEGV's etc, for example.
789 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
791 unsigned long int flags;
792 int ret, blocked, ignored;
793 struct k_sigaction *action;
795 spin_lock_irqsave(&t->sighand->siglock, flags);
796 action = &t->sighand->action[sig-1];
797 ignored = action->sa.sa_handler == SIG_IGN;
798 blocked = sigismember(&t->blocked, sig);
799 if (blocked || ignored) {
800 action->sa.sa_handler = SIG_DFL;
801 if (blocked) {
802 sigdelset(&t->blocked, sig);
803 recalc_sigpending_and_wake(t);
806 ret = specific_send_sig_info(sig, info, t);
807 spin_unlock_irqrestore(&t->sighand->siglock, flags);
809 return ret;
812 void
813 force_sig_specific(int sig, struct task_struct *t)
815 force_sig_info(sig, SEND_SIG_FORCED, t);
819 * Test if P wants to take SIG. After we've checked all threads with this,
820 * it's equivalent to finding no threads not blocking SIG. Any threads not
821 * blocking SIG were ruled out because they are not running and already
822 * have pending signals. Such threads will dequeue from the shared queue
823 * as soon as they're available, so putting the signal on the shared queue
824 * will be equivalent to sending it to one such thread.
826 static inline int wants_signal(int sig, struct task_struct *p)
828 if (sigismember(&p->blocked, sig))
829 return 0;
830 if (p->flags & PF_EXITING)
831 return 0;
832 if (sig == SIGKILL)
833 return 1;
834 if (p->state & (TASK_STOPPED | TASK_TRACED))
835 return 0;
836 return task_curr(p) || !signal_pending(p);
839 static void
840 __group_complete_signal(int sig, struct task_struct *p)
842 struct task_struct *t;
845 * Now find a thread we can wake up to take the signal off the queue.
847 * If the main thread wants the signal, it gets first crack.
848 * Probably the least surprising to the average bear.
850 if (wants_signal(sig, p))
851 t = p;
852 else if (thread_group_empty(p))
854 * There is just one thread and it does not need to be woken.
855 * It will dequeue unblocked signals before it runs again.
857 return;
858 else {
860 * Otherwise try to find a suitable thread.
862 t = p->signal->curr_target;
863 if (t == NULL)
864 /* restart balancing at this thread */
865 t = p->signal->curr_target = p;
867 while (!wants_signal(sig, t)) {
868 t = next_thread(t);
869 if (t == p->signal->curr_target)
871 * No thread needs to be woken.
872 * Any eligible threads will see
873 * the signal in the queue soon.
875 return;
877 p->signal->curr_target = t;
881 * Found a killable thread. If the signal will be fatal,
882 * then start taking the whole group down immediately.
884 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
885 !sigismember(&t->real_blocked, sig) &&
886 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
888 * This signal will be fatal to the whole group.
890 if (!sig_kernel_coredump(sig)) {
892 * Start a group exit and wake everybody up.
893 * This way we don't have other threads
894 * running and doing things after a slower
895 * thread has the fatal signal pending.
897 p->signal->flags = SIGNAL_GROUP_EXIT;
898 p->signal->group_exit_code = sig;
899 p->signal->group_stop_count = 0;
900 t = p;
901 do {
902 sigaddset(&t->pending.signal, SIGKILL);
903 signal_wake_up(t, 1);
904 t = next_thread(t);
905 } while (t != p);
906 return;
910 * There will be a core dump. We make all threads other
911 * than the chosen one go into a group stop so that nothing
912 * happens until it gets scheduled, takes the signal off
913 * the shared queue, and does the core dump. This is a
914 * little more complicated than strictly necessary, but it
915 * keeps the signal state that winds up in the core dump
916 * unchanged from the death state, e.g. which thread had
917 * the core-dump signal unblocked.
919 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
920 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
921 p->signal->group_stop_count = 0;
922 p->signal->group_exit_task = t;
923 t = p;
924 do {
925 p->signal->group_stop_count++;
926 signal_wake_up(t, 0);
927 t = next_thread(t);
928 } while (t != p);
929 wake_up_process(p->signal->group_exit_task);
930 return;
934 * The signal is already in the shared-pending queue.
935 * Tell the chosen thread to wake up and dequeue it.
937 signal_wake_up(t, sig == SIGKILL);
938 return;
942 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
944 int ret = 0;
946 assert_spin_locked(&p->sighand->siglock);
947 handle_stop_signal(sig, p);
949 /* Short-circuit ignored signals. */
950 if (sig_ignored(p, sig))
951 return ret;
953 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
954 /* This is a non-RT signal and we already have one queued. */
955 return ret;
958 * Put this signal on the shared-pending queue, or fail with EAGAIN.
959 * We always use the shared queue for process-wide signals,
960 * to avoid several races.
962 ret = send_signal(sig, info, p, &p->signal->shared_pending);
963 if (unlikely(ret))
964 return ret;
966 __group_complete_signal(sig, p);
967 return 0;
971 * Nuke all other threads in the group.
973 void zap_other_threads(struct task_struct *p)
975 struct task_struct *t;
977 p->signal->flags = SIGNAL_GROUP_EXIT;
978 p->signal->group_stop_count = 0;
980 if (thread_group_empty(p))
981 return;
983 for (t = next_thread(p); t != p; t = next_thread(t)) {
985 * Don't bother with already dead threads
987 if (t->exit_state)
988 continue;
990 /* SIGKILL will be handled before any pending SIGSTOP */
991 sigaddset(&t->pending.signal, SIGKILL);
992 signal_wake_up(t, 1);
997 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
999 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1001 struct sighand_struct *sighand;
1003 for (;;) {
1004 sighand = rcu_dereference(tsk->sighand);
1005 if (unlikely(sighand == NULL))
1006 break;
1008 spin_lock_irqsave(&sighand->siglock, *flags);
1009 if (likely(sighand == tsk->sighand))
1010 break;
1011 spin_unlock_irqrestore(&sighand->siglock, *flags);
1014 return sighand;
1017 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1019 unsigned long flags;
1020 int ret;
1022 ret = check_kill_permission(sig, info, p);
1024 if (!ret && sig) {
1025 ret = -ESRCH;
1026 if (lock_task_sighand(p, &flags)) {
1027 ret = __group_send_sig_info(sig, info, p);
1028 unlock_task_sighand(p, &flags);
1032 return ret;
1036 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1037 * control characters do (^C, ^Z etc)
1040 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1042 struct task_struct *p = NULL;
1043 int retval, success;
1045 success = 0;
1046 retval = -ESRCH;
1047 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1048 int err = group_send_sig_info(sig, info, p);
1049 success |= !err;
1050 retval = err;
1051 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1052 return success ? 0 : retval;
1055 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1057 int retval;
1059 read_lock(&tasklist_lock);
1060 retval = __kill_pgrp_info(sig, info, pgrp);
1061 read_unlock(&tasklist_lock);
1063 return retval;
1066 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1068 int error;
1069 struct task_struct *p;
1071 rcu_read_lock();
1072 if (unlikely(sig_needs_tasklist(sig)))
1073 read_lock(&tasklist_lock);
1075 p = pid_task(pid, PIDTYPE_PID);
1076 error = -ESRCH;
1077 if (p)
1078 error = group_send_sig_info(sig, info, p);
1080 if (unlikely(sig_needs_tasklist(sig)))
1081 read_unlock(&tasklist_lock);
1082 rcu_read_unlock();
1083 return error;
1087 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1089 int error;
1090 rcu_read_lock();
1091 error = kill_pid_info(sig, info, find_pid(pid));
1092 rcu_read_unlock();
1093 return error;
1096 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1097 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1098 uid_t uid, uid_t euid, u32 secid)
1100 int ret = -EINVAL;
1101 struct task_struct *p;
1103 if (!valid_signal(sig))
1104 return ret;
1106 read_lock(&tasklist_lock);
1107 p = pid_task(pid, PIDTYPE_PID);
1108 if (!p) {
1109 ret = -ESRCH;
1110 goto out_unlock;
1112 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1113 && (euid != p->suid) && (euid != p->uid)
1114 && (uid != p->suid) && (uid != p->uid)) {
1115 ret = -EPERM;
1116 goto out_unlock;
1118 ret = security_task_kill(p, info, sig, secid);
1119 if (ret)
1120 goto out_unlock;
1121 if (sig && p->sighand) {
1122 unsigned long flags;
1123 spin_lock_irqsave(&p->sighand->siglock, flags);
1124 ret = __group_send_sig_info(sig, info, p);
1125 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1127 out_unlock:
1128 read_unlock(&tasklist_lock);
1129 return ret;
1131 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1134 * kill_something_info() interprets pid in interesting ways just like kill(2).
1136 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1137 * is probably wrong. Should make it like BSD or SYSV.
1140 static int kill_something_info(int sig, struct siginfo *info, int pid)
1142 int ret;
1143 rcu_read_lock();
1144 if (!pid) {
1145 ret = kill_pgrp_info(sig, info, task_pgrp(current));
1146 } else if (pid == -1) {
1147 int retval = 0, count = 0;
1148 struct task_struct * p;
1150 read_lock(&tasklist_lock);
1151 for_each_process(p) {
1152 if (p->pid > 1 && p->tgid != current->tgid) {
1153 int err = group_send_sig_info(sig, info, p);
1154 ++count;
1155 if (err != -EPERM)
1156 retval = err;
1159 read_unlock(&tasklist_lock);
1160 ret = count ? retval : -ESRCH;
1161 } else if (pid < 0) {
1162 ret = kill_pgrp_info(sig, info, find_pid(-pid));
1163 } else {
1164 ret = kill_pid_info(sig, info, find_pid(pid));
1166 rcu_read_unlock();
1167 return ret;
1171 * These are for backward compatibility with the rest of the kernel source.
1175 * These two are the most common entry points. They send a signal
1176 * just to the specific thread.
1179 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1181 int ret;
1182 unsigned long flags;
1185 * Make sure legacy kernel users don't send in bad values
1186 * (normal paths check this in check_kill_permission).
1188 if (!valid_signal(sig))
1189 return -EINVAL;
1192 * We need the tasklist lock even for the specific
1193 * thread case (when we don't need to follow the group
1194 * lists) in order to avoid races with "p->sighand"
1195 * going away or changing from under us.
1197 read_lock(&tasklist_lock);
1198 spin_lock_irqsave(&p->sighand->siglock, flags);
1199 ret = specific_send_sig_info(sig, info, p);
1200 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1201 read_unlock(&tasklist_lock);
1202 return ret;
1205 #define __si_special(priv) \
1206 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1209 send_sig(int sig, struct task_struct *p, int priv)
1211 return send_sig_info(sig, __si_special(priv), p);
1215 * This is the entry point for "process-wide" signals.
1216 * They will go to an appropriate thread in the thread group.
1219 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1221 int ret;
1222 read_lock(&tasklist_lock);
1223 ret = group_send_sig_info(sig, info, p);
1224 read_unlock(&tasklist_lock);
1225 return ret;
1228 void
1229 force_sig(int sig, struct task_struct *p)
1231 force_sig_info(sig, SEND_SIG_PRIV, p);
1235 * When things go south during signal handling, we
1236 * will force a SIGSEGV. And if the signal that caused
1237 * the problem was already a SIGSEGV, we'll want to
1238 * make sure we don't even try to deliver the signal..
1241 force_sigsegv(int sig, struct task_struct *p)
1243 if (sig == SIGSEGV) {
1244 unsigned long flags;
1245 spin_lock_irqsave(&p->sighand->siglock, flags);
1246 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1247 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1249 force_sig(SIGSEGV, p);
1250 return 0;
1253 int kill_pgrp(struct pid *pid, int sig, int priv)
1255 return kill_pgrp_info(sig, __si_special(priv), pid);
1257 EXPORT_SYMBOL(kill_pgrp);
1259 int kill_pid(struct pid *pid, int sig, int priv)
1261 return kill_pid_info(sig, __si_special(priv), pid);
1263 EXPORT_SYMBOL(kill_pid);
1266 kill_proc(pid_t pid, int sig, int priv)
1268 return kill_proc_info(sig, __si_special(priv), pid);
1272 * These functions support sending signals using preallocated sigqueue
1273 * structures. This is needed "because realtime applications cannot
1274 * afford to lose notifications of asynchronous events, like timer
1275 * expirations or I/O completions". In the case of Posix Timers
1276 * we allocate the sigqueue structure from the timer_create. If this
1277 * allocation fails we are able to report the failure to the application
1278 * with an EAGAIN error.
1281 struct sigqueue *sigqueue_alloc(void)
1283 struct sigqueue *q;
1285 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1286 q->flags |= SIGQUEUE_PREALLOC;
1287 return(q);
1290 void sigqueue_free(struct sigqueue *q)
1292 unsigned long flags;
1293 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1295 * If the signal is still pending remove it from the
1296 * pending queue.
1298 if (unlikely(!list_empty(&q->list))) {
1299 spinlock_t *lock = &current->sighand->siglock;
1300 read_lock(&tasklist_lock);
1301 spin_lock_irqsave(lock, flags);
1302 if (!list_empty(&q->list))
1303 list_del_init(&q->list);
1304 spin_unlock_irqrestore(lock, flags);
1305 read_unlock(&tasklist_lock);
1307 q->flags &= ~SIGQUEUE_PREALLOC;
1308 __sigqueue_free(q);
1311 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1313 unsigned long flags;
1314 int ret = 0;
1316 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1319 * The rcu based delayed sighand destroy makes it possible to
1320 * run this without tasklist lock held. The task struct itself
1321 * cannot go away as create_timer did get_task_struct().
1323 * We return -1, when the task is marked exiting, so
1324 * posix_timer_event can redirect it to the group leader
1326 rcu_read_lock();
1328 if (!likely(lock_task_sighand(p, &flags))) {
1329 ret = -1;
1330 goto out_err;
1333 if (unlikely(!list_empty(&q->list))) {
1335 * If an SI_TIMER entry is already queue just increment
1336 * the overrun count.
1338 BUG_ON(q->info.si_code != SI_TIMER);
1339 q->info.si_overrun++;
1340 goto out;
1342 /* Short-circuit ignored signals. */
1343 if (sig_ignored(p, sig)) {
1344 ret = 1;
1345 goto out;
1348 * Deliver the signal to listening signalfds. This must be called
1349 * with the sighand lock held.
1351 signalfd_notify(p, sig);
1353 list_add_tail(&q->list, &p->pending.list);
1354 sigaddset(&p->pending.signal, sig);
1355 if (!sigismember(&p->blocked, sig))
1356 signal_wake_up(p, sig == SIGKILL);
1358 out:
1359 unlock_task_sighand(p, &flags);
1360 out_err:
1361 rcu_read_unlock();
1363 return ret;
1367 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1369 unsigned long flags;
1370 int ret = 0;
1372 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1374 read_lock(&tasklist_lock);
1375 /* Since it_lock is held, p->sighand cannot be NULL. */
1376 spin_lock_irqsave(&p->sighand->siglock, flags);
1377 handle_stop_signal(sig, p);
1379 /* Short-circuit ignored signals. */
1380 if (sig_ignored(p, sig)) {
1381 ret = 1;
1382 goto out;
1385 if (unlikely(!list_empty(&q->list))) {
1387 * If an SI_TIMER entry is already queue just increment
1388 * the overrun count. Other uses should not try to
1389 * send the signal multiple times.
1391 BUG_ON(q->info.si_code != SI_TIMER);
1392 q->info.si_overrun++;
1393 goto out;
1396 * Deliver the signal to listening signalfds. This must be called
1397 * with the sighand lock held.
1399 signalfd_notify(p, sig);
1402 * Put this signal on the shared-pending queue.
1403 * We always use the shared queue for process-wide signals,
1404 * to avoid several races.
1406 list_add_tail(&q->list, &p->signal->shared_pending.list);
1407 sigaddset(&p->signal->shared_pending.signal, sig);
1409 __group_complete_signal(sig, p);
1410 out:
1411 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1412 read_unlock(&tasklist_lock);
1413 return ret;
1417 * Wake up any threads in the parent blocked in wait* syscalls.
1419 static inline void __wake_up_parent(struct task_struct *p,
1420 struct task_struct *parent)
1422 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1426 * Let a parent know about the death of a child.
1427 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1430 void do_notify_parent(struct task_struct *tsk, int sig)
1432 struct siginfo info;
1433 unsigned long flags;
1434 struct sighand_struct *psig;
1436 BUG_ON(sig == -1);
1438 /* do_notify_parent_cldstop should have been called instead. */
1439 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1441 BUG_ON(!tsk->ptrace &&
1442 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1444 info.si_signo = sig;
1445 info.si_errno = 0;
1446 info.si_pid = tsk->pid;
1447 info.si_uid = tsk->uid;
1449 /* FIXME: find out whether or not this is supposed to be c*time. */
1450 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1451 tsk->signal->utime));
1452 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1453 tsk->signal->stime));
1455 info.si_status = tsk->exit_code & 0x7f;
1456 if (tsk->exit_code & 0x80)
1457 info.si_code = CLD_DUMPED;
1458 else if (tsk->exit_code & 0x7f)
1459 info.si_code = CLD_KILLED;
1460 else {
1461 info.si_code = CLD_EXITED;
1462 info.si_status = tsk->exit_code >> 8;
1465 psig = tsk->parent->sighand;
1466 spin_lock_irqsave(&psig->siglock, flags);
1467 if (!tsk->ptrace && sig == SIGCHLD &&
1468 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1469 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1471 * We are exiting and our parent doesn't care. POSIX.1
1472 * defines special semantics for setting SIGCHLD to SIG_IGN
1473 * or setting the SA_NOCLDWAIT flag: we should be reaped
1474 * automatically and not left for our parent's wait4 call.
1475 * Rather than having the parent do it as a magic kind of
1476 * signal handler, we just set this to tell do_exit that we
1477 * can be cleaned up without becoming a zombie. Note that
1478 * we still call __wake_up_parent in this case, because a
1479 * blocked sys_wait4 might now return -ECHILD.
1481 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1482 * is implementation-defined: we do (if you don't want
1483 * it, just use SIG_IGN instead).
1485 tsk->exit_signal = -1;
1486 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1487 sig = 0;
1489 if (valid_signal(sig) && sig > 0)
1490 __group_send_sig_info(sig, &info, tsk->parent);
1491 __wake_up_parent(tsk, tsk->parent);
1492 spin_unlock_irqrestore(&psig->siglock, flags);
1495 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1497 struct siginfo info;
1498 unsigned long flags;
1499 struct task_struct *parent;
1500 struct sighand_struct *sighand;
1502 if (tsk->ptrace & PT_PTRACED)
1503 parent = tsk->parent;
1504 else {
1505 tsk = tsk->group_leader;
1506 parent = tsk->real_parent;
1509 info.si_signo = SIGCHLD;
1510 info.si_errno = 0;
1511 info.si_pid = tsk->pid;
1512 info.si_uid = tsk->uid;
1514 /* FIXME: find out whether or not this is supposed to be c*time. */
1515 info.si_utime = cputime_to_jiffies(tsk->utime);
1516 info.si_stime = cputime_to_jiffies(tsk->stime);
1518 info.si_code = why;
1519 switch (why) {
1520 case CLD_CONTINUED:
1521 info.si_status = SIGCONT;
1522 break;
1523 case CLD_STOPPED:
1524 info.si_status = tsk->signal->group_exit_code & 0x7f;
1525 break;
1526 case CLD_TRAPPED:
1527 info.si_status = tsk->exit_code & 0x7f;
1528 break;
1529 default:
1530 BUG();
1533 sighand = parent->sighand;
1534 spin_lock_irqsave(&sighand->siglock, flags);
1535 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1536 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1537 __group_send_sig_info(SIGCHLD, &info, parent);
1539 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1541 __wake_up_parent(tsk, parent);
1542 spin_unlock_irqrestore(&sighand->siglock, flags);
1545 static inline int may_ptrace_stop(void)
1547 if (!likely(current->ptrace & PT_PTRACED))
1548 return 0;
1550 if (unlikely(current->parent == current->real_parent &&
1551 (current->ptrace & PT_ATTACHED)))
1552 return 0;
1554 if (unlikely(current->signal == current->parent->signal) &&
1555 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1556 return 0;
1559 * Are we in the middle of do_coredump?
1560 * If so and our tracer is also part of the coredump stopping
1561 * is a deadlock situation, and pointless because our tracer
1562 * is dead so don't allow us to stop.
1563 * If SIGKILL was already sent before the caller unlocked
1564 * ->siglock we must see ->core_waiters != 0. Otherwise it
1565 * is safe to enter schedule().
1567 if (unlikely(current->mm->core_waiters) &&
1568 unlikely(current->mm == current->parent->mm))
1569 return 0;
1571 return 1;
1575 * This must be called with current->sighand->siglock held.
1577 * This should be the path for all ptrace stops.
1578 * We always set current->last_siginfo while stopped here.
1579 * That makes it a way to test a stopped process for
1580 * being ptrace-stopped vs being job-control-stopped.
1582 * If we actually decide not to stop at all because the tracer is gone,
1583 * we leave nostop_code in current->exit_code.
1585 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1588 * If there is a group stop in progress,
1589 * we must participate in the bookkeeping.
1591 if (current->signal->group_stop_count > 0)
1592 --current->signal->group_stop_count;
1594 current->last_siginfo = info;
1595 current->exit_code = exit_code;
1597 /* Let the debugger run. */
1598 set_current_state(TASK_TRACED);
1599 spin_unlock_irq(&current->sighand->siglock);
1600 try_to_freeze();
1601 read_lock(&tasklist_lock);
1602 if (may_ptrace_stop()) {
1603 do_notify_parent_cldstop(current, CLD_TRAPPED);
1604 read_unlock(&tasklist_lock);
1605 schedule();
1606 } else {
1608 * By the time we got the lock, our tracer went away.
1609 * Don't stop here.
1611 read_unlock(&tasklist_lock);
1612 set_current_state(TASK_RUNNING);
1613 current->exit_code = nostop_code;
1617 * We are back. Now reacquire the siglock before touching
1618 * last_siginfo, so that we are sure to have synchronized with
1619 * any signal-sending on another CPU that wants to examine it.
1621 spin_lock_irq(&current->sighand->siglock);
1622 current->last_siginfo = NULL;
1625 * Queued signals ignored us while we were stopped for tracing.
1626 * So check for any that we should take before resuming user mode.
1627 * This sets TIF_SIGPENDING, but never clears it.
1629 recalc_sigpending_tsk(current);
1632 void ptrace_notify(int exit_code)
1634 siginfo_t info;
1636 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1638 memset(&info, 0, sizeof info);
1639 info.si_signo = SIGTRAP;
1640 info.si_code = exit_code;
1641 info.si_pid = current->pid;
1642 info.si_uid = current->uid;
1644 /* Let the debugger run. */
1645 spin_lock_irq(&current->sighand->siglock);
1646 ptrace_stop(exit_code, 0, &info);
1647 spin_unlock_irq(&current->sighand->siglock);
1650 static void
1651 finish_stop(int stop_count)
1654 * If there are no other threads in the group, or if there is
1655 * a group stop in progress and we are the last to stop,
1656 * report to the parent. When ptraced, every thread reports itself.
1658 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1659 read_lock(&tasklist_lock);
1660 do_notify_parent_cldstop(current, CLD_STOPPED);
1661 read_unlock(&tasklist_lock);
1664 do {
1665 schedule();
1666 } while (try_to_freeze());
1668 * Now we don't run again until continued.
1670 current->exit_code = 0;
1674 * This performs the stopping for SIGSTOP and other stop signals.
1675 * We have to stop all threads in the thread group.
1676 * Returns nonzero if we've actually stopped and released the siglock.
1677 * Returns zero if we didn't stop and still hold the siglock.
1679 static int do_signal_stop(int signr)
1681 struct signal_struct *sig = current->signal;
1682 int stop_count;
1684 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1685 return 0;
1687 if (sig->group_stop_count > 0) {
1689 * There is a group stop in progress. We don't need to
1690 * start another one.
1692 stop_count = --sig->group_stop_count;
1693 } else {
1695 * There is no group stop already in progress.
1696 * We must initiate one now.
1698 struct task_struct *t;
1700 sig->group_exit_code = signr;
1702 stop_count = 0;
1703 for (t = next_thread(current); t != current; t = next_thread(t))
1705 * Setting state to TASK_STOPPED for a group
1706 * stop is always done with the siglock held,
1707 * so this check has no races.
1709 if (!t->exit_state &&
1710 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1711 stop_count++;
1712 signal_wake_up(t, 0);
1714 sig->group_stop_count = stop_count;
1717 if (stop_count == 0)
1718 sig->flags = SIGNAL_STOP_STOPPED;
1719 current->exit_code = sig->group_exit_code;
1720 __set_current_state(TASK_STOPPED);
1722 spin_unlock_irq(&current->sighand->siglock);
1723 finish_stop(stop_count);
1724 return 1;
1728 * Do appropriate magic when group_stop_count > 0.
1729 * We return nonzero if we stopped, after releasing the siglock.
1730 * We return zero if we still hold the siglock and should look
1731 * for another signal without checking group_stop_count again.
1733 static int handle_group_stop(void)
1735 int stop_count;
1737 if (current->signal->group_exit_task == current) {
1739 * Group stop is so we can do a core dump,
1740 * We are the initiating thread, so get on with it.
1742 current->signal->group_exit_task = NULL;
1743 return 0;
1746 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1748 * Group stop is so another thread can do a core dump,
1749 * or else we are racing against a death signal.
1750 * Just punt the stop so we can get the next signal.
1752 return 0;
1755 * There is a group stop in progress. We stop
1756 * without any associated signal being in our queue.
1758 stop_count = --current->signal->group_stop_count;
1759 if (stop_count == 0)
1760 current->signal->flags = SIGNAL_STOP_STOPPED;
1761 current->exit_code = current->signal->group_exit_code;
1762 set_current_state(TASK_STOPPED);
1763 spin_unlock_irq(&current->sighand->siglock);
1764 finish_stop(stop_count);
1765 return 1;
1768 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1769 struct pt_regs *regs, void *cookie)
1771 sigset_t *mask = &current->blocked;
1772 int signr = 0;
1774 try_to_freeze();
1776 relock:
1777 spin_lock_irq(&current->sighand->siglock);
1778 for (;;) {
1779 struct k_sigaction *ka;
1781 if (unlikely(current->signal->group_stop_count > 0) &&
1782 handle_group_stop())
1783 goto relock;
1785 signr = dequeue_signal(current, mask, info);
1787 if (!signr)
1788 break; /* will return 0 */
1790 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1791 ptrace_signal_deliver(regs, cookie);
1793 /* Let the debugger run. */
1794 ptrace_stop(signr, signr, info);
1796 /* We're back. Did the debugger cancel the sig? */
1797 signr = current->exit_code;
1798 if (signr == 0)
1799 continue;
1801 current->exit_code = 0;
1803 /* Update the siginfo structure if the signal has
1804 changed. If the debugger wanted something
1805 specific in the siginfo structure then it should
1806 have updated *info via PTRACE_SETSIGINFO. */
1807 if (signr != info->si_signo) {
1808 info->si_signo = signr;
1809 info->si_errno = 0;
1810 info->si_code = SI_USER;
1811 info->si_pid = current->parent->pid;
1812 info->si_uid = current->parent->uid;
1815 /* If the (new) signal is now blocked, requeue it. */
1816 if (sigismember(&current->blocked, signr)) {
1817 specific_send_sig_info(signr, info, current);
1818 continue;
1822 ka = &current->sighand->action[signr-1];
1823 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1824 continue;
1825 if (ka->sa.sa_handler != SIG_DFL) {
1826 /* Run the handler. */
1827 *return_ka = *ka;
1829 if (ka->sa.sa_flags & SA_ONESHOT)
1830 ka->sa.sa_handler = SIG_DFL;
1832 break; /* will return non-zero "signr" value */
1836 * Now we are doing the default action for this signal.
1838 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1839 continue;
1842 * Init of a pid space gets no signals it doesn't want from
1843 * within that pid space. It can of course get signals from
1844 * its parent pid space.
1846 if (current == child_reaper(current))
1847 continue;
1849 if (sig_kernel_stop(signr)) {
1851 * The default action is to stop all threads in
1852 * the thread group. The job control signals
1853 * do nothing in an orphaned pgrp, but SIGSTOP
1854 * always works. Note that siglock needs to be
1855 * dropped during the call to is_orphaned_pgrp()
1856 * because of lock ordering with tasklist_lock.
1857 * This allows an intervening SIGCONT to be posted.
1858 * We need to check for that and bail out if necessary.
1860 if (signr != SIGSTOP) {
1861 spin_unlock_irq(&current->sighand->siglock);
1863 /* signals can be posted during this window */
1865 if (is_current_pgrp_orphaned())
1866 goto relock;
1868 spin_lock_irq(&current->sighand->siglock);
1871 if (likely(do_signal_stop(signr))) {
1872 /* It released the siglock. */
1873 goto relock;
1877 * We didn't actually stop, due to a race
1878 * with SIGCONT or something like that.
1880 continue;
1883 spin_unlock_irq(&current->sighand->siglock);
1886 * Anything else is fatal, maybe with a core dump.
1888 current->flags |= PF_SIGNALED;
1889 if ((signr != SIGKILL) && print_fatal_signals)
1890 print_fatal_signal(regs, signr);
1891 if (sig_kernel_coredump(signr)) {
1893 * If it was able to dump core, this kills all
1894 * other threads in the group and synchronizes with
1895 * their demise. If we lost the race with another
1896 * thread getting here, it set group_exit_code
1897 * first and our do_group_exit call below will use
1898 * that value and ignore the one we pass it.
1900 do_coredump((long)signr, signr, regs);
1904 * Death signals, no core dump.
1906 do_group_exit(signr);
1907 /* NOTREACHED */
1909 spin_unlock_irq(&current->sighand->siglock);
1910 return signr;
1913 EXPORT_SYMBOL(recalc_sigpending);
1914 EXPORT_SYMBOL_GPL(dequeue_signal);
1915 EXPORT_SYMBOL(flush_signals);
1916 EXPORT_SYMBOL(force_sig);
1917 EXPORT_SYMBOL(kill_proc);
1918 EXPORT_SYMBOL(ptrace_notify);
1919 EXPORT_SYMBOL(send_sig);
1920 EXPORT_SYMBOL(send_sig_info);
1921 EXPORT_SYMBOL(sigprocmask);
1922 EXPORT_SYMBOL(block_all_signals);
1923 EXPORT_SYMBOL(unblock_all_signals);
1927 * System call entry points.
1930 asmlinkage long sys_restart_syscall(void)
1932 struct restart_block *restart = &current_thread_info()->restart_block;
1933 return restart->fn(restart);
1936 long do_no_restart_syscall(struct restart_block *param)
1938 return -EINTR;
1942 * We don't need to get the kernel lock - this is all local to this
1943 * particular thread.. (and that's good, because this is _heavily_
1944 * used by various programs)
1948 * This is also useful for kernel threads that want to temporarily
1949 * (or permanently) block certain signals.
1951 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1952 * interface happily blocks "unblockable" signals like SIGKILL
1953 * and friends.
1955 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1957 int error;
1959 spin_lock_irq(&current->sighand->siglock);
1960 if (oldset)
1961 *oldset = current->blocked;
1963 error = 0;
1964 switch (how) {
1965 case SIG_BLOCK:
1966 sigorsets(&current->blocked, &current->blocked, set);
1967 break;
1968 case SIG_UNBLOCK:
1969 signandsets(&current->blocked, &current->blocked, set);
1970 break;
1971 case SIG_SETMASK:
1972 current->blocked = *set;
1973 break;
1974 default:
1975 error = -EINVAL;
1977 recalc_sigpending();
1978 spin_unlock_irq(&current->sighand->siglock);
1980 return error;
1983 asmlinkage long
1984 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1986 int error = -EINVAL;
1987 sigset_t old_set, new_set;
1989 /* XXX: Don't preclude handling different sized sigset_t's. */
1990 if (sigsetsize != sizeof(sigset_t))
1991 goto out;
1993 if (set) {
1994 error = -EFAULT;
1995 if (copy_from_user(&new_set, set, sizeof(*set)))
1996 goto out;
1997 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1999 error = sigprocmask(how, &new_set, &old_set);
2000 if (error)
2001 goto out;
2002 if (oset)
2003 goto set_old;
2004 } else if (oset) {
2005 spin_lock_irq(&current->sighand->siglock);
2006 old_set = current->blocked;
2007 spin_unlock_irq(&current->sighand->siglock);
2009 set_old:
2010 error = -EFAULT;
2011 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2012 goto out;
2014 error = 0;
2015 out:
2016 return error;
2019 long do_sigpending(void __user *set, unsigned long sigsetsize)
2021 long error = -EINVAL;
2022 sigset_t pending;
2024 if (sigsetsize > sizeof(sigset_t))
2025 goto out;
2027 spin_lock_irq(&current->sighand->siglock);
2028 sigorsets(&pending, &current->pending.signal,
2029 &current->signal->shared_pending.signal);
2030 spin_unlock_irq(&current->sighand->siglock);
2032 /* Outside the lock because only this thread touches it. */
2033 sigandsets(&pending, &current->blocked, &pending);
2035 error = -EFAULT;
2036 if (!copy_to_user(set, &pending, sigsetsize))
2037 error = 0;
2039 out:
2040 return error;
2043 asmlinkage long
2044 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2046 return do_sigpending(set, sigsetsize);
2049 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2051 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2053 int err;
2055 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2056 return -EFAULT;
2057 if (from->si_code < 0)
2058 return __copy_to_user(to, from, sizeof(siginfo_t))
2059 ? -EFAULT : 0;
2061 * If you change siginfo_t structure, please be sure
2062 * this code is fixed accordingly.
2063 * Please remember to update the signalfd_copyinfo() function
2064 * inside fs/signalfd.c too, in case siginfo_t changes.
2065 * It should never copy any pad contained in the structure
2066 * to avoid security leaks, but must copy the generic
2067 * 3 ints plus the relevant union member.
2069 err = __put_user(from->si_signo, &to->si_signo);
2070 err |= __put_user(from->si_errno, &to->si_errno);
2071 err |= __put_user((short)from->si_code, &to->si_code);
2072 switch (from->si_code & __SI_MASK) {
2073 case __SI_KILL:
2074 err |= __put_user(from->si_pid, &to->si_pid);
2075 err |= __put_user(from->si_uid, &to->si_uid);
2076 break;
2077 case __SI_TIMER:
2078 err |= __put_user(from->si_tid, &to->si_tid);
2079 err |= __put_user(from->si_overrun, &to->si_overrun);
2080 err |= __put_user(from->si_ptr, &to->si_ptr);
2081 break;
2082 case __SI_POLL:
2083 err |= __put_user(from->si_band, &to->si_band);
2084 err |= __put_user(from->si_fd, &to->si_fd);
2085 break;
2086 case __SI_FAULT:
2087 err |= __put_user(from->si_addr, &to->si_addr);
2088 #ifdef __ARCH_SI_TRAPNO
2089 err |= __put_user(from->si_trapno, &to->si_trapno);
2090 #endif
2091 break;
2092 case __SI_CHLD:
2093 err |= __put_user(from->si_pid, &to->si_pid);
2094 err |= __put_user(from->si_uid, &to->si_uid);
2095 err |= __put_user(from->si_status, &to->si_status);
2096 err |= __put_user(from->si_utime, &to->si_utime);
2097 err |= __put_user(from->si_stime, &to->si_stime);
2098 break;
2099 case __SI_RT: /* This is not generated by the kernel as of now. */
2100 case __SI_MESGQ: /* But this is */
2101 err |= __put_user(from->si_pid, &to->si_pid);
2102 err |= __put_user(from->si_uid, &to->si_uid);
2103 err |= __put_user(from->si_ptr, &to->si_ptr);
2104 break;
2105 default: /* this is just in case for now ... */
2106 err |= __put_user(from->si_pid, &to->si_pid);
2107 err |= __put_user(from->si_uid, &to->si_uid);
2108 break;
2110 return err;
2113 #endif
2115 asmlinkage long
2116 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2117 siginfo_t __user *uinfo,
2118 const struct timespec __user *uts,
2119 size_t sigsetsize)
2121 int ret, sig;
2122 sigset_t these;
2123 struct timespec ts;
2124 siginfo_t info;
2125 long timeout = 0;
2127 /* XXX: Don't preclude handling different sized sigset_t's. */
2128 if (sigsetsize != sizeof(sigset_t))
2129 return -EINVAL;
2131 if (copy_from_user(&these, uthese, sizeof(these)))
2132 return -EFAULT;
2135 * Invert the set of allowed signals to get those we
2136 * want to block.
2138 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2139 signotset(&these);
2141 if (uts) {
2142 if (copy_from_user(&ts, uts, sizeof(ts)))
2143 return -EFAULT;
2144 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2145 || ts.tv_sec < 0)
2146 return -EINVAL;
2149 spin_lock_irq(&current->sighand->siglock);
2150 sig = dequeue_signal(current, &these, &info);
2151 if (!sig) {
2152 timeout = MAX_SCHEDULE_TIMEOUT;
2153 if (uts)
2154 timeout = (timespec_to_jiffies(&ts)
2155 + (ts.tv_sec || ts.tv_nsec));
2157 if (timeout) {
2158 /* None ready -- temporarily unblock those we're
2159 * interested while we are sleeping in so that we'll
2160 * be awakened when they arrive. */
2161 current->real_blocked = current->blocked;
2162 sigandsets(&current->blocked, &current->blocked, &these);
2163 recalc_sigpending();
2164 spin_unlock_irq(&current->sighand->siglock);
2166 timeout = schedule_timeout_interruptible(timeout);
2168 spin_lock_irq(&current->sighand->siglock);
2169 sig = dequeue_signal(current, &these, &info);
2170 current->blocked = current->real_blocked;
2171 siginitset(&current->real_blocked, 0);
2172 recalc_sigpending();
2175 spin_unlock_irq(&current->sighand->siglock);
2177 if (sig) {
2178 ret = sig;
2179 if (uinfo) {
2180 if (copy_siginfo_to_user(uinfo, &info))
2181 ret = -EFAULT;
2183 } else {
2184 ret = -EAGAIN;
2185 if (timeout)
2186 ret = -EINTR;
2189 return ret;
2192 asmlinkage long
2193 sys_kill(int pid, int sig)
2195 struct siginfo info;
2197 info.si_signo = sig;
2198 info.si_errno = 0;
2199 info.si_code = SI_USER;
2200 info.si_pid = current->tgid;
2201 info.si_uid = current->uid;
2203 return kill_something_info(sig, &info, pid);
2206 static int do_tkill(int tgid, int pid, int sig)
2208 int error;
2209 struct siginfo info;
2210 struct task_struct *p;
2212 error = -ESRCH;
2213 info.si_signo = sig;
2214 info.si_errno = 0;
2215 info.si_code = SI_TKILL;
2216 info.si_pid = current->tgid;
2217 info.si_uid = current->uid;
2219 read_lock(&tasklist_lock);
2220 p = find_task_by_pid(pid);
2221 if (p && (tgid <= 0 || p->tgid == tgid)) {
2222 error = check_kill_permission(sig, &info, p);
2224 * The null signal is a permissions and process existence
2225 * probe. No signal is actually delivered.
2227 if (!error && sig && p->sighand) {
2228 spin_lock_irq(&p->sighand->siglock);
2229 handle_stop_signal(sig, p);
2230 error = specific_send_sig_info(sig, &info, p);
2231 spin_unlock_irq(&p->sighand->siglock);
2234 read_unlock(&tasklist_lock);
2236 return error;
2240 * sys_tgkill - send signal to one specific thread
2241 * @tgid: the thread group ID of the thread
2242 * @pid: the PID of the thread
2243 * @sig: signal to be sent
2245 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2246 * exists but it's not belonging to the target process anymore. This
2247 * method solves the problem of threads exiting and PIDs getting reused.
2249 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2251 /* This is only valid for single tasks */
2252 if (pid <= 0 || tgid <= 0)
2253 return -EINVAL;
2255 return do_tkill(tgid, pid, sig);
2259 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2261 asmlinkage long
2262 sys_tkill(int pid, int sig)
2264 /* This is only valid for single tasks */
2265 if (pid <= 0)
2266 return -EINVAL;
2268 return do_tkill(0, pid, sig);
2271 asmlinkage long
2272 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2274 siginfo_t info;
2276 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2277 return -EFAULT;
2279 /* Not even root can pretend to send signals from the kernel.
2280 Nor can they impersonate a kill(), which adds source info. */
2281 if (info.si_code >= 0)
2282 return -EPERM;
2283 info.si_signo = sig;
2285 /* POSIX.1b doesn't mention process groups. */
2286 return kill_proc_info(sig, &info, pid);
2289 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2291 struct k_sigaction *k;
2292 sigset_t mask;
2294 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2295 return -EINVAL;
2297 k = &current->sighand->action[sig-1];
2299 spin_lock_irq(&current->sighand->siglock);
2300 if (signal_pending(current)) {
2302 * If there might be a fatal signal pending on multiple
2303 * threads, make sure we take it before changing the action.
2305 spin_unlock_irq(&current->sighand->siglock);
2306 return -ERESTARTNOINTR;
2309 if (oact)
2310 *oact = *k;
2312 if (act) {
2313 sigdelsetmask(&act->sa.sa_mask,
2314 sigmask(SIGKILL) | sigmask(SIGSTOP));
2315 *k = *act;
2317 * POSIX 3.3.1.3:
2318 * "Setting a signal action to SIG_IGN for a signal that is
2319 * pending shall cause the pending signal to be discarded,
2320 * whether or not it is blocked."
2322 * "Setting a signal action to SIG_DFL for a signal that is
2323 * pending and whose default action is to ignore the signal
2324 * (for example, SIGCHLD), shall cause the pending signal to
2325 * be discarded, whether or not it is blocked"
2327 if (act->sa.sa_handler == SIG_IGN ||
2328 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2329 struct task_struct *t = current;
2330 sigemptyset(&mask);
2331 sigaddset(&mask, sig);
2332 rm_from_queue_full(&mask, &t->signal->shared_pending);
2333 do {
2334 rm_from_queue_full(&mask, &t->pending);
2335 recalc_sigpending_and_wake(t);
2336 t = next_thread(t);
2337 } while (t != current);
2341 spin_unlock_irq(&current->sighand->siglock);
2342 return 0;
2345 int
2346 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2348 stack_t oss;
2349 int error;
2351 if (uoss) {
2352 oss.ss_sp = (void __user *) current->sas_ss_sp;
2353 oss.ss_size = current->sas_ss_size;
2354 oss.ss_flags = sas_ss_flags(sp);
2357 if (uss) {
2358 void __user *ss_sp;
2359 size_t ss_size;
2360 int ss_flags;
2362 error = -EFAULT;
2363 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2364 || __get_user(ss_sp, &uss->ss_sp)
2365 || __get_user(ss_flags, &uss->ss_flags)
2366 || __get_user(ss_size, &uss->ss_size))
2367 goto out;
2369 error = -EPERM;
2370 if (on_sig_stack(sp))
2371 goto out;
2373 error = -EINVAL;
2376 * Note - this code used to test ss_flags incorrectly
2377 * old code may have been written using ss_flags==0
2378 * to mean ss_flags==SS_ONSTACK (as this was the only
2379 * way that worked) - this fix preserves that older
2380 * mechanism
2382 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2383 goto out;
2385 if (ss_flags == SS_DISABLE) {
2386 ss_size = 0;
2387 ss_sp = NULL;
2388 } else {
2389 error = -ENOMEM;
2390 if (ss_size < MINSIGSTKSZ)
2391 goto out;
2394 current->sas_ss_sp = (unsigned long) ss_sp;
2395 current->sas_ss_size = ss_size;
2398 if (uoss) {
2399 error = -EFAULT;
2400 if (copy_to_user(uoss, &oss, sizeof(oss)))
2401 goto out;
2404 error = 0;
2405 out:
2406 return error;
2409 #ifdef __ARCH_WANT_SYS_SIGPENDING
2411 asmlinkage long
2412 sys_sigpending(old_sigset_t __user *set)
2414 return do_sigpending(set, sizeof(*set));
2417 #endif
2419 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2420 /* Some platforms have their own version with special arguments others
2421 support only sys_rt_sigprocmask. */
2423 asmlinkage long
2424 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2426 int error;
2427 old_sigset_t old_set, new_set;
2429 if (set) {
2430 error = -EFAULT;
2431 if (copy_from_user(&new_set, set, sizeof(*set)))
2432 goto out;
2433 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2435 spin_lock_irq(&current->sighand->siglock);
2436 old_set = current->blocked.sig[0];
2438 error = 0;
2439 switch (how) {
2440 default:
2441 error = -EINVAL;
2442 break;
2443 case SIG_BLOCK:
2444 sigaddsetmask(&current->blocked, new_set);
2445 break;
2446 case SIG_UNBLOCK:
2447 sigdelsetmask(&current->blocked, new_set);
2448 break;
2449 case SIG_SETMASK:
2450 current->blocked.sig[0] = new_set;
2451 break;
2454 recalc_sigpending();
2455 spin_unlock_irq(&current->sighand->siglock);
2456 if (error)
2457 goto out;
2458 if (oset)
2459 goto set_old;
2460 } else if (oset) {
2461 old_set = current->blocked.sig[0];
2462 set_old:
2463 error = -EFAULT;
2464 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2465 goto out;
2467 error = 0;
2468 out:
2469 return error;
2471 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2473 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2474 asmlinkage long
2475 sys_rt_sigaction(int sig,
2476 const struct sigaction __user *act,
2477 struct sigaction __user *oact,
2478 size_t sigsetsize)
2480 struct k_sigaction new_sa, old_sa;
2481 int ret = -EINVAL;
2483 /* XXX: Don't preclude handling different sized sigset_t's. */
2484 if (sigsetsize != sizeof(sigset_t))
2485 goto out;
2487 if (act) {
2488 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2489 return -EFAULT;
2492 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2494 if (!ret && oact) {
2495 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2496 return -EFAULT;
2498 out:
2499 return ret;
2501 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2503 #ifdef __ARCH_WANT_SYS_SGETMASK
2506 * For backwards compatibility. Functionality superseded by sigprocmask.
2508 asmlinkage long
2509 sys_sgetmask(void)
2511 /* SMP safe */
2512 return current->blocked.sig[0];
2515 asmlinkage long
2516 sys_ssetmask(int newmask)
2518 int old;
2520 spin_lock_irq(&current->sighand->siglock);
2521 old = current->blocked.sig[0];
2523 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2524 sigmask(SIGSTOP)));
2525 recalc_sigpending();
2526 spin_unlock_irq(&current->sighand->siglock);
2528 return old;
2530 #endif /* __ARCH_WANT_SGETMASK */
2532 #ifdef __ARCH_WANT_SYS_SIGNAL
2534 * For backwards compatibility. Functionality superseded by sigaction.
2536 asmlinkage unsigned long
2537 sys_signal(int sig, __sighandler_t handler)
2539 struct k_sigaction new_sa, old_sa;
2540 int ret;
2542 new_sa.sa.sa_handler = handler;
2543 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2544 sigemptyset(&new_sa.sa.sa_mask);
2546 ret = do_sigaction(sig, &new_sa, &old_sa);
2548 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2550 #endif /* __ARCH_WANT_SYS_SIGNAL */
2552 #ifdef __ARCH_WANT_SYS_PAUSE
2554 asmlinkage long
2555 sys_pause(void)
2557 current->state = TASK_INTERRUPTIBLE;
2558 schedule();
2559 return -ERESTARTNOHAND;
2562 #endif
2564 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2565 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2567 sigset_t newset;
2569 /* XXX: Don't preclude handling different sized sigset_t's. */
2570 if (sigsetsize != sizeof(sigset_t))
2571 return -EINVAL;
2573 if (copy_from_user(&newset, unewset, sizeof(newset)))
2574 return -EFAULT;
2575 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2577 spin_lock_irq(&current->sighand->siglock);
2578 current->saved_sigmask = current->blocked;
2579 current->blocked = newset;
2580 recalc_sigpending();
2581 spin_unlock_irq(&current->sighand->siglock);
2583 current->state = TASK_INTERRUPTIBLE;
2584 schedule();
2585 set_thread_flag(TIF_RESTORE_SIGMASK);
2586 return -ERESTARTNOHAND;
2588 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2590 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2592 return NULL;
2595 void __init signals_init(void)
2597 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);