MM: Fix macro argument substitution in PageHead() and PageTail()
[linux-2.6.22.y-op.git] / kernel / signal.c
blob5c48ab230f8da2fd5b71c681285a8213d17841d8
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
43 static int sig_ignored(struct task_struct *t, int sig)
45 void __user * handler;
48 * Tracers always want to know about signals..
50 if (t->ptrace & PT_PTRACED)
51 return 0;
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
56 * unblocked.
58 if (sigismember(&t->blocked, sig))
59 return 0;
61 /* Is it explicitly or implicitly ignored? */
62 handler = t->sighand->action[sig-1].sa.sa_handler;
63 return handler == SIG_IGN ||
64 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
73 unsigned long ready;
74 long i;
76 switch (_NSIG_WORDS) {
77 default:
78 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
79 ready |= signal->sig[i] &~ blocked->sig[i];
80 break;
82 case 4: ready = signal->sig[3] &~ blocked->sig[3];
83 ready |= signal->sig[2] &~ blocked->sig[2];
84 ready |= signal->sig[1] &~ blocked->sig[1];
85 ready |= signal->sig[0] &~ blocked->sig[0];
86 break;
88 case 2: ready = signal->sig[1] &~ blocked->sig[1];
89 ready |= signal->sig[0] &~ blocked->sig[0];
90 break;
92 case 1: ready = signal->sig[0] &~ blocked->sig[0];
94 return ready != 0;
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct *t)
101 if (t->signal->group_stop_count > 0 ||
102 (freezing(t)) ||
103 PENDING(&t->pending, &t->blocked) ||
104 PENDING(&t->signal->shared_pending, &t->blocked)) {
105 set_tsk_thread_flag(t, TIF_SIGPENDING);
106 return 1;
109 * We must never clear the flag in another thread, or in current
110 * when it's possible the current syscall is returning -ERESTART*.
111 * So we don't clear it here, and only callers who know they should do.
113 return 0;
117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118 * This is superfluous when called on current, the wakeup is a harmless no-op.
120 void recalc_sigpending_and_wake(struct task_struct *t)
122 if (recalc_sigpending_tsk(t))
123 signal_wake_up(t, 0);
126 void recalc_sigpending(void)
128 if (!recalc_sigpending_tsk(current))
129 clear_thread_flag(TIF_SIGPENDING);
133 /* Given the mask, find the first available signal that should be serviced. */
135 int next_signal(struct sigpending *pending, sigset_t *mask)
137 unsigned long i, *s, *m, x;
138 int sig = 0;
140 s = pending->signal.sig;
141 m = mask->sig;
142 switch (_NSIG_WORDS) {
143 default:
144 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
145 if ((x = *s &~ *m) != 0) {
146 sig = ffz(~x) + i*_NSIG_BPW + 1;
147 break;
149 break;
151 case 2: if ((x = s[0] &~ m[0]) != 0)
152 sig = 1;
153 else if ((x = s[1] &~ m[1]) != 0)
154 sig = _NSIG_BPW + 1;
155 else
156 break;
157 sig += ffz(~x);
158 break;
160 case 1: if ((x = *s &~ *m) != 0)
161 sig = ffz(~x) + 1;
162 break;
165 return sig;
168 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
169 int override_rlimit)
171 struct sigqueue *q = NULL;
172 struct user_struct *user;
175 * In order to avoid problems with "switch_user()", we want to make
176 * sure that the compiler doesn't re-load "t->user"
178 user = t->user;
179 barrier();
180 atomic_inc(&user->sigpending);
181 if (override_rlimit ||
182 atomic_read(&user->sigpending) <=
183 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
184 q = kmem_cache_alloc(sigqueue_cachep, flags);
185 if (unlikely(q == NULL)) {
186 atomic_dec(&user->sigpending);
187 } else {
188 INIT_LIST_HEAD(&q->list);
189 q->flags = 0;
190 q->user = get_uid(user);
192 return(q);
195 static void __sigqueue_free(struct sigqueue *q)
197 if (q->flags & SIGQUEUE_PREALLOC)
198 return;
199 atomic_dec(&q->user->sigpending);
200 free_uid(q->user);
201 kmem_cache_free(sigqueue_cachep, q);
204 void flush_sigqueue(struct sigpending *queue)
206 struct sigqueue *q;
208 sigemptyset(&queue->signal);
209 while (!list_empty(&queue->list)) {
210 q = list_entry(queue->list.next, struct sigqueue , list);
211 list_del_init(&q->list);
212 __sigqueue_free(q);
217 * Flush all pending signals for a task.
219 void flush_signals(struct task_struct *t)
221 unsigned long flags;
223 spin_lock_irqsave(&t->sighand->siglock, flags);
224 clear_tsk_thread_flag(t,TIF_SIGPENDING);
225 flush_sigqueue(&t->pending);
226 flush_sigqueue(&t->signal->shared_pending);
227 spin_unlock_irqrestore(&t->sighand->siglock, flags);
230 void ignore_signals(struct task_struct *t)
232 int i;
234 for (i = 0; i < _NSIG; ++i)
235 t->sighand->action[i].sa.sa_handler = SIG_IGN;
237 flush_signals(t);
241 * Flush all handlers for a task.
244 void
245 flush_signal_handlers(struct task_struct *t, int force_default)
247 int i;
248 struct k_sigaction *ka = &t->sighand->action[0];
249 for (i = _NSIG ; i != 0 ; i--) {
250 if (force_default || ka->sa.sa_handler != SIG_IGN)
251 ka->sa.sa_handler = SIG_DFL;
252 ka->sa.sa_flags = 0;
253 sigemptyset(&ka->sa.sa_mask);
254 ka++;
259 /* Notify the system that a driver wants to block all signals for this
260 * process, and wants to be notified if any signals at all were to be
261 * sent/acted upon. If the notifier routine returns non-zero, then the
262 * signal will be acted upon after all. If the notifier routine returns 0,
263 * then then signal will be blocked. Only one block per process is
264 * allowed. priv is a pointer to private data that the notifier routine
265 * can use to determine if the signal should be blocked or not. */
267 void
268 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
270 unsigned long flags;
272 spin_lock_irqsave(&current->sighand->siglock, flags);
273 current->notifier_mask = mask;
274 current->notifier_data = priv;
275 current->notifier = notifier;
276 spin_unlock_irqrestore(&current->sighand->siglock, flags);
279 /* Notify the system that blocking has ended. */
281 void
282 unblock_all_signals(void)
284 unsigned long flags;
286 spin_lock_irqsave(&current->sighand->siglock, flags);
287 current->notifier = NULL;
288 current->notifier_data = NULL;
289 recalc_sigpending();
290 spin_unlock_irqrestore(&current->sighand->siglock, flags);
293 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
295 struct sigqueue *q, *first = NULL;
296 int still_pending = 0;
298 if (unlikely(!sigismember(&list->signal, sig)))
299 return 0;
302 * Collect the siginfo appropriate to this signal. Check if
303 * there is another siginfo for the same signal.
305 list_for_each_entry(q, &list->list, list) {
306 if (q->info.si_signo == sig) {
307 if (first) {
308 still_pending = 1;
309 break;
311 first = q;
314 if (first) {
315 list_del_init(&first->list);
316 copy_siginfo(info, &first->info);
317 __sigqueue_free(first);
318 if (!still_pending)
319 sigdelset(&list->signal, sig);
320 } else {
322 /* Ok, it wasn't in the queue. This must be
323 a fast-pathed signal or we must have been
324 out of queue space. So zero out the info.
326 sigdelset(&list->signal, sig);
327 info->si_signo = sig;
328 info->si_errno = 0;
329 info->si_code = 0;
330 info->si_pid = 0;
331 info->si_uid = 0;
333 return 1;
336 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
337 siginfo_t *info)
339 int sig = next_signal(pending, mask);
341 if (sig) {
342 if (current->notifier) {
343 if (sigismember(current->notifier_mask, sig)) {
344 if (!(current->notifier)(current->notifier_data)) {
345 clear_thread_flag(TIF_SIGPENDING);
346 return 0;
351 if (!collect_signal(sig, pending, info))
352 sig = 0;
355 return sig;
359 * Dequeue a signal and return the element to the caller, which is
360 * expected to free it.
362 * All callers have to hold the siglock.
364 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
366 int signr = 0;
368 /* We only dequeue private signals from ourselves, we don't let
369 * signalfd steal them
371 if (likely(tsk == current))
372 signr = __dequeue_signal(&tsk->pending, mask, info);
373 if (!signr) {
374 signr = __dequeue_signal(&tsk->signal->shared_pending,
375 mask, info);
377 * itimer signal ?
379 * itimers are process shared and we restart periodic
380 * itimers in the signal delivery path to prevent DoS
381 * attacks in the high resolution timer case. This is
382 * compliant with the old way of self restarting
383 * itimers, as the SIGALRM is a legacy signal and only
384 * queued once. Changing the restart behaviour to
385 * restart the timer in the signal dequeue path is
386 * reducing the timer noise on heavy loaded !highres
387 * systems too.
389 if (unlikely(signr == SIGALRM)) {
390 struct hrtimer *tmr = &tsk->signal->real_timer;
392 if (!hrtimer_is_queued(tmr) &&
393 tsk->signal->it_real_incr.tv64 != 0) {
394 hrtimer_forward(tmr, tmr->base->get_time(),
395 tsk->signal->it_real_incr);
396 hrtimer_restart(tmr);
400 if (likely(tsk == current))
401 recalc_sigpending();
402 if (signr && unlikely(sig_kernel_stop(signr))) {
404 * Set a marker that we have dequeued a stop signal. Our
405 * caller might release the siglock and then the pending
406 * stop signal it is about to process is no longer in the
407 * pending bitmasks, but must still be cleared by a SIGCONT
408 * (and overruled by a SIGKILL). So those cases clear this
409 * shared flag after we've set it. Note that this flag may
410 * remain set after the signal we return is ignored or
411 * handled. That doesn't matter because its only purpose
412 * is to alert stop-signal processing code when another
413 * processor has come along and cleared the flag.
415 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
416 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
418 if (signr && likely(tsk == current) &&
419 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
420 info->si_sys_private){
422 * Release the siglock to ensure proper locking order
423 * of timer locks outside of siglocks. Note, we leave
424 * irqs disabled here, since the posix-timers code is
425 * about to disable them again anyway.
427 spin_unlock(&tsk->sighand->siglock);
428 do_schedule_next_timer(info);
429 spin_lock(&tsk->sighand->siglock);
431 return signr;
435 * Tell a process that it has a new active signal..
437 * NOTE! we rely on the previous spin_lock to
438 * lock interrupts for us! We can only be called with
439 * "siglock" held, and the local interrupt must
440 * have been disabled when that got acquired!
442 * No need to set need_resched since signal event passing
443 * goes through ->blocked
445 void signal_wake_up(struct task_struct *t, int resume)
447 unsigned int mask;
449 set_tsk_thread_flag(t, TIF_SIGPENDING);
452 * For SIGKILL, we want to wake it up in the stopped/traced case.
453 * We don't check t->state here because there is a race with it
454 * executing another processor and just now entering stopped state.
455 * By using wake_up_state, we ensure the process will wake up and
456 * handle its death signal.
458 mask = TASK_INTERRUPTIBLE;
459 if (resume)
460 mask |= TASK_STOPPED | TASK_TRACED;
461 if (!wake_up_state(t, mask))
462 kick_process(t);
466 * Remove signals in mask from the pending set and queue.
467 * Returns 1 if any signals were found.
469 * All callers must be holding the siglock.
471 * This version takes a sigset mask and looks at all signals,
472 * not just those in the first mask word.
474 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
476 struct sigqueue *q, *n;
477 sigset_t m;
479 sigandsets(&m, mask, &s->signal);
480 if (sigisemptyset(&m))
481 return 0;
483 signandsets(&s->signal, &s->signal, mask);
484 list_for_each_entry_safe(q, n, &s->list, list) {
485 if (sigismember(mask, q->info.si_signo)) {
486 list_del_init(&q->list);
487 __sigqueue_free(q);
490 return 1;
493 * Remove signals in mask from the pending set and queue.
494 * Returns 1 if any signals were found.
496 * All callers must be holding the siglock.
498 static int rm_from_queue(unsigned long mask, struct sigpending *s)
500 struct sigqueue *q, *n;
502 if (!sigtestsetmask(&s->signal, mask))
503 return 0;
505 sigdelsetmask(&s->signal, mask);
506 list_for_each_entry_safe(q, n, &s->list, list) {
507 if (q->info.si_signo < SIGRTMIN &&
508 (mask & sigmask(q->info.si_signo))) {
509 list_del_init(&q->list);
510 __sigqueue_free(q);
513 return 1;
517 * Bad permissions for sending the signal
519 static int check_kill_permission(int sig, struct siginfo *info,
520 struct task_struct *t)
522 int error = -EINVAL;
523 if (!valid_signal(sig))
524 return error;
526 error = audit_signal_info(sig, t); /* Let audit system see the signal */
527 if (error)
528 return error;
530 error = -EPERM;
531 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
532 && ((sig != SIGCONT) ||
533 (process_session(current) != process_session(t)))
534 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
535 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
536 && !capable(CAP_KILL))
537 return error;
539 return security_task_kill(t, info, sig, 0);
542 /* forward decl */
543 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
546 * Handle magic process-wide effects of stop/continue signals.
547 * Unlike the signal actions, these happen immediately at signal-generation
548 * time regardless of blocking, ignoring, or handling. This does the
549 * actual continuing for SIGCONT, but not the actual stopping for stop
550 * signals. The process stop is done as a signal action for SIG_DFL.
552 static void handle_stop_signal(int sig, struct task_struct *p)
554 struct task_struct *t;
556 if (p->signal->flags & SIGNAL_GROUP_EXIT)
558 * The process is in the middle of dying already.
560 return;
562 if (sig_kernel_stop(sig)) {
564 * This is a stop signal. Remove SIGCONT from all queues.
566 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
567 t = p;
568 do {
569 rm_from_queue(sigmask(SIGCONT), &t->pending);
570 t = next_thread(t);
571 } while (t != p);
572 } else if (sig == SIGCONT) {
574 * Remove all stop signals from all queues,
575 * and wake all threads.
577 if (unlikely(p->signal->group_stop_count > 0)) {
579 * There was a group stop in progress. We'll
580 * pretend it finished before we got here. We are
581 * obliged to report it to the parent: if the
582 * SIGSTOP happened "after" this SIGCONT, then it
583 * would have cleared this pending SIGCONT. If it
584 * happened "before" this SIGCONT, then the parent
585 * got the SIGCHLD about the stop finishing before
586 * the continue happened. We do the notification
587 * now, and it's as if the stop had finished and
588 * the SIGCHLD was pending on entry to this kill.
590 p->signal->group_stop_count = 0;
591 p->signal->flags = SIGNAL_STOP_CONTINUED;
592 spin_unlock(&p->sighand->siglock);
593 do_notify_parent_cldstop(p, CLD_STOPPED);
594 spin_lock(&p->sighand->siglock);
596 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
597 t = p;
598 do {
599 unsigned int state;
600 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
603 * If there is a handler for SIGCONT, we must make
604 * sure that no thread returns to user mode before
605 * we post the signal, in case it was the only
606 * thread eligible to run the signal handler--then
607 * it must not do anything between resuming and
608 * running the handler. With the TIF_SIGPENDING
609 * flag set, the thread will pause and acquire the
610 * siglock that we hold now and until we've queued
611 * the pending signal.
613 * Wake up the stopped thread _after_ setting
614 * TIF_SIGPENDING
616 state = TASK_STOPPED;
617 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
618 set_tsk_thread_flag(t, TIF_SIGPENDING);
619 state |= TASK_INTERRUPTIBLE;
621 wake_up_state(t, state);
623 t = next_thread(t);
624 } while (t != p);
626 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
628 * We were in fact stopped, and are now continued.
629 * Notify the parent with CLD_CONTINUED.
631 p->signal->flags = SIGNAL_STOP_CONTINUED;
632 p->signal->group_exit_code = 0;
633 spin_unlock(&p->sighand->siglock);
634 do_notify_parent_cldstop(p, CLD_CONTINUED);
635 spin_lock(&p->sighand->siglock);
636 } else {
638 * We are not stopped, but there could be a stop
639 * signal in the middle of being processed after
640 * being removed from the queue. Clear that too.
642 p->signal->flags = 0;
644 } else if (sig == SIGKILL) {
646 * Make sure that any pending stop signal already dequeued
647 * is undone by the wakeup for SIGKILL.
649 p->signal->flags = 0;
653 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
654 struct sigpending *signals)
656 struct sigqueue * q = NULL;
657 int ret = 0;
660 * Deliver the signal to listening signalfds. This must be called
661 * with the sighand lock held.
663 signalfd_notify(t, sig);
666 * fast-pathed signals for kernel-internal things like SIGSTOP
667 * or SIGKILL.
669 if (info == SEND_SIG_FORCED)
670 goto out_set;
672 /* Real-time signals must be queued if sent by sigqueue, or
673 some other real-time mechanism. It is implementation
674 defined whether kill() does so. We attempt to do so, on
675 the principle of least surprise, but since kill is not
676 allowed to fail with EAGAIN when low on memory we just
677 make sure at least one signal gets delivered and don't
678 pass on the info struct. */
680 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
681 (is_si_special(info) ||
682 info->si_code >= 0)));
683 if (q) {
684 list_add_tail(&q->list, &signals->list);
685 switch ((unsigned long) info) {
686 case (unsigned long) SEND_SIG_NOINFO:
687 q->info.si_signo = sig;
688 q->info.si_errno = 0;
689 q->info.si_code = SI_USER;
690 q->info.si_pid = current->pid;
691 q->info.si_uid = current->uid;
692 break;
693 case (unsigned long) SEND_SIG_PRIV:
694 q->info.si_signo = sig;
695 q->info.si_errno = 0;
696 q->info.si_code = SI_KERNEL;
697 q->info.si_pid = 0;
698 q->info.si_uid = 0;
699 break;
700 default:
701 copy_siginfo(&q->info, info);
702 break;
704 } else if (!is_si_special(info)) {
705 if (sig >= SIGRTMIN && info->si_code != SI_USER)
707 * Queue overflow, abort. We may abort if the signal was rt
708 * and sent by user using something other than kill().
710 return -EAGAIN;
713 out_set:
714 sigaddset(&signals->signal, sig);
715 return ret;
718 #define LEGACY_QUEUE(sigptr, sig) \
719 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
722 static int
723 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
725 int ret = 0;
727 BUG_ON(!irqs_disabled());
728 assert_spin_locked(&t->sighand->siglock);
730 /* Short-circuit ignored signals. */
731 if (sig_ignored(t, sig))
732 goto out;
734 /* Support queueing exactly one non-rt signal, so that we
735 can get more detailed information about the cause of
736 the signal. */
737 if (LEGACY_QUEUE(&t->pending, sig))
738 goto out;
740 ret = send_signal(sig, info, t, &t->pending);
741 if (!ret && !sigismember(&t->blocked, sig))
742 signal_wake_up(t, sig == SIGKILL);
743 out:
744 return ret;
748 * Force a signal that the process can't ignore: if necessary
749 * we unblock the signal and change any SIG_IGN to SIG_DFL.
751 * Note: If we unblock the signal, we always reset it to SIG_DFL,
752 * since we do not want to have a signal handler that was blocked
753 * be invoked when user space had explicitly blocked it.
755 * We don't want to have recursive SIGSEGV's etc, for example.
758 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
760 unsigned long int flags;
761 int ret, blocked, ignored;
762 struct k_sigaction *action;
764 spin_lock_irqsave(&t->sighand->siglock, flags);
765 action = &t->sighand->action[sig-1];
766 ignored = action->sa.sa_handler == SIG_IGN;
767 blocked = sigismember(&t->blocked, sig);
768 if (blocked || ignored) {
769 action->sa.sa_handler = SIG_DFL;
770 if (blocked) {
771 sigdelset(&t->blocked, sig);
772 recalc_sigpending_and_wake(t);
775 ret = specific_send_sig_info(sig, info, t);
776 spin_unlock_irqrestore(&t->sighand->siglock, flags);
778 return ret;
781 void
782 force_sig_specific(int sig, struct task_struct *t)
784 force_sig_info(sig, SEND_SIG_FORCED, t);
788 * Test if P wants to take SIG. After we've checked all threads with this,
789 * it's equivalent to finding no threads not blocking SIG. Any threads not
790 * blocking SIG were ruled out because they are not running and already
791 * have pending signals. Such threads will dequeue from the shared queue
792 * as soon as they're available, so putting the signal on the shared queue
793 * will be equivalent to sending it to one such thread.
795 static inline int wants_signal(int sig, struct task_struct *p)
797 if (sigismember(&p->blocked, sig))
798 return 0;
799 if (p->flags & PF_EXITING)
800 return 0;
801 if (sig == SIGKILL)
802 return 1;
803 if (p->state & (TASK_STOPPED | TASK_TRACED))
804 return 0;
805 return task_curr(p) || !signal_pending(p);
808 static void
809 __group_complete_signal(int sig, struct task_struct *p)
811 struct task_struct *t;
814 * Now find a thread we can wake up to take the signal off the queue.
816 * If the main thread wants the signal, it gets first crack.
817 * Probably the least surprising to the average bear.
819 if (wants_signal(sig, p))
820 t = p;
821 else if (thread_group_empty(p))
823 * There is just one thread and it does not need to be woken.
824 * It will dequeue unblocked signals before it runs again.
826 return;
827 else {
829 * Otherwise try to find a suitable thread.
831 t = p->signal->curr_target;
832 if (t == NULL)
833 /* restart balancing at this thread */
834 t = p->signal->curr_target = p;
836 while (!wants_signal(sig, t)) {
837 t = next_thread(t);
838 if (t == p->signal->curr_target)
840 * No thread needs to be woken.
841 * Any eligible threads will see
842 * the signal in the queue soon.
844 return;
846 p->signal->curr_target = t;
850 * Found a killable thread. If the signal will be fatal,
851 * then start taking the whole group down immediately.
853 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
854 !sigismember(&t->real_blocked, sig) &&
855 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
857 * This signal will be fatal to the whole group.
859 if (!sig_kernel_coredump(sig)) {
861 * Start a group exit and wake everybody up.
862 * This way we don't have other threads
863 * running and doing things after a slower
864 * thread has the fatal signal pending.
866 p->signal->flags = SIGNAL_GROUP_EXIT;
867 p->signal->group_exit_code = sig;
868 p->signal->group_stop_count = 0;
869 t = p;
870 do {
871 sigaddset(&t->pending.signal, SIGKILL);
872 signal_wake_up(t, 1);
873 t = next_thread(t);
874 } while (t != p);
875 return;
879 * There will be a core dump. We make all threads other
880 * than the chosen one go into a group stop so that nothing
881 * happens until it gets scheduled, takes the signal off
882 * the shared queue, and does the core dump. This is a
883 * little more complicated than strictly necessary, but it
884 * keeps the signal state that winds up in the core dump
885 * unchanged from the death state, e.g. which thread had
886 * the core-dump signal unblocked.
888 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
889 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
890 p->signal->group_stop_count = 0;
891 p->signal->group_exit_task = t;
892 t = p;
893 do {
894 p->signal->group_stop_count++;
895 signal_wake_up(t, 0);
896 t = next_thread(t);
897 } while (t != p);
898 wake_up_process(p->signal->group_exit_task);
899 return;
903 * The signal is already in the shared-pending queue.
904 * Tell the chosen thread to wake up and dequeue it.
906 signal_wake_up(t, sig == SIGKILL);
907 return;
911 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
913 int ret = 0;
915 assert_spin_locked(&p->sighand->siglock);
916 handle_stop_signal(sig, p);
918 /* Short-circuit ignored signals. */
919 if (sig_ignored(p, sig))
920 return ret;
922 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
923 /* This is a non-RT signal and we already have one queued. */
924 return ret;
927 * Put this signal on the shared-pending queue, or fail with EAGAIN.
928 * We always use the shared queue for process-wide signals,
929 * to avoid several races.
931 ret = send_signal(sig, info, p, &p->signal->shared_pending);
932 if (unlikely(ret))
933 return ret;
935 __group_complete_signal(sig, p);
936 return 0;
940 * Nuke all other threads in the group.
942 void zap_other_threads(struct task_struct *p)
944 struct task_struct *t;
946 p->signal->flags = SIGNAL_GROUP_EXIT;
947 p->signal->group_stop_count = 0;
949 if (thread_group_empty(p))
950 return;
952 for (t = next_thread(p); t != p; t = next_thread(t)) {
954 * Don't bother with already dead threads
956 if (t->exit_state)
957 continue;
959 /* SIGKILL will be handled before any pending SIGSTOP */
960 sigaddset(&t->pending.signal, SIGKILL);
961 signal_wake_up(t, 1);
966 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
968 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
970 struct sighand_struct *sighand;
972 for (;;) {
973 sighand = rcu_dereference(tsk->sighand);
974 if (unlikely(sighand == NULL))
975 break;
977 spin_lock_irqsave(&sighand->siglock, *flags);
978 if (likely(sighand == tsk->sighand))
979 break;
980 spin_unlock_irqrestore(&sighand->siglock, *flags);
983 return sighand;
986 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
988 unsigned long flags;
989 int ret;
991 ret = check_kill_permission(sig, info, p);
993 if (!ret && sig) {
994 ret = -ESRCH;
995 if (lock_task_sighand(p, &flags)) {
996 ret = __group_send_sig_info(sig, info, p);
997 unlock_task_sighand(p, &flags);
1001 return ret;
1005 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1006 * control characters do (^C, ^Z etc)
1009 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1011 struct task_struct *p = NULL;
1012 int retval, success;
1014 success = 0;
1015 retval = -ESRCH;
1016 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1017 int err = group_send_sig_info(sig, info, p);
1018 success |= !err;
1019 retval = err;
1020 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1021 return success ? 0 : retval;
1024 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1026 int retval;
1028 read_lock(&tasklist_lock);
1029 retval = __kill_pgrp_info(sig, info, pgrp);
1030 read_unlock(&tasklist_lock);
1032 return retval;
1035 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1037 int error;
1038 struct task_struct *p;
1040 rcu_read_lock();
1041 if (unlikely(sig_needs_tasklist(sig)))
1042 read_lock(&tasklist_lock);
1044 p = pid_task(pid, PIDTYPE_PID);
1045 error = -ESRCH;
1046 if (p)
1047 error = group_send_sig_info(sig, info, p);
1049 if (unlikely(sig_needs_tasklist(sig)))
1050 read_unlock(&tasklist_lock);
1051 rcu_read_unlock();
1052 return error;
1056 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1058 int error;
1059 rcu_read_lock();
1060 error = kill_pid_info(sig, info, find_pid(pid));
1061 rcu_read_unlock();
1062 return error;
1065 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1066 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1067 uid_t uid, uid_t euid, u32 secid)
1069 int ret = -EINVAL;
1070 struct task_struct *p;
1072 if (!valid_signal(sig))
1073 return ret;
1075 read_lock(&tasklist_lock);
1076 p = pid_task(pid, PIDTYPE_PID);
1077 if (!p) {
1078 ret = -ESRCH;
1079 goto out_unlock;
1081 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1082 && (euid != p->suid) && (euid != p->uid)
1083 && (uid != p->suid) && (uid != p->uid)) {
1084 ret = -EPERM;
1085 goto out_unlock;
1087 ret = security_task_kill(p, info, sig, secid);
1088 if (ret)
1089 goto out_unlock;
1090 if (sig && p->sighand) {
1091 unsigned long flags;
1092 spin_lock_irqsave(&p->sighand->siglock, flags);
1093 ret = __group_send_sig_info(sig, info, p);
1094 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1096 out_unlock:
1097 read_unlock(&tasklist_lock);
1098 return ret;
1100 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1103 * kill_something_info() interprets pid in interesting ways just like kill(2).
1105 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1106 * is probably wrong. Should make it like BSD or SYSV.
1109 static int kill_something_info(int sig, struct siginfo *info, int pid)
1111 int ret;
1112 rcu_read_lock();
1113 if (!pid) {
1114 ret = kill_pgrp_info(sig, info, task_pgrp(current));
1115 } else if (pid == -1) {
1116 int retval = 0, count = 0;
1117 struct task_struct * p;
1119 read_lock(&tasklist_lock);
1120 for_each_process(p) {
1121 if (p->pid > 1 && p->tgid != current->tgid) {
1122 int err = group_send_sig_info(sig, info, p);
1123 ++count;
1124 if (err != -EPERM)
1125 retval = err;
1128 read_unlock(&tasklist_lock);
1129 ret = count ? retval : -ESRCH;
1130 } else if (pid < 0) {
1131 ret = kill_pgrp_info(sig, info, find_pid(-pid));
1132 } else {
1133 ret = kill_pid_info(sig, info, find_pid(pid));
1135 rcu_read_unlock();
1136 return ret;
1140 * These are for backward compatibility with the rest of the kernel source.
1144 * These two are the most common entry points. They send a signal
1145 * just to the specific thread.
1148 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1150 int ret;
1151 unsigned long flags;
1154 * Make sure legacy kernel users don't send in bad values
1155 * (normal paths check this in check_kill_permission).
1157 if (!valid_signal(sig))
1158 return -EINVAL;
1161 * We need the tasklist lock even for the specific
1162 * thread case (when we don't need to follow the group
1163 * lists) in order to avoid races with "p->sighand"
1164 * going away or changing from under us.
1166 read_lock(&tasklist_lock);
1167 spin_lock_irqsave(&p->sighand->siglock, flags);
1168 ret = specific_send_sig_info(sig, info, p);
1169 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1170 read_unlock(&tasklist_lock);
1171 return ret;
1174 #define __si_special(priv) \
1175 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1178 send_sig(int sig, struct task_struct *p, int priv)
1180 return send_sig_info(sig, __si_special(priv), p);
1184 * This is the entry point for "process-wide" signals.
1185 * They will go to an appropriate thread in the thread group.
1188 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190 int ret;
1191 read_lock(&tasklist_lock);
1192 ret = group_send_sig_info(sig, info, p);
1193 read_unlock(&tasklist_lock);
1194 return ret;
1197 void
1198 force_sig(int sig, struct task_struct *p)
1200 force_sig_info(sig, SEND_SIG_PRIV, p);
1204 * When things go south during signal handling, we
1205 * will force a SIGSEGV. And if the signal that caused
1206 * the problem was already a SIGSEGV, we'll want to
1207 * make sure we don't even try to deliver the signal..
1210 force_sigsegv(int sig, struct task_struct *p)
1212 if (sig == SIGSEGV) {
1213 unsigned long flags;
1214 spin_lock_irqsave(&p->sighand->siglock, flags);
1215 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1216 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1218 force_sig(SIGSEGV, p);
1219 return 0;
1222 int kill_pgrp(struct pid *pid, int sig, int priv)
1224 return kill_pgrp_info(sig, __si_special(priv), pid);
1226 EXPORT_SYMBOL(kill_pgrp);
1228 int kill_pid(struct pid *pid, int sig, int priv)
1230 return kill_pid_info(sig, __si_special(priv), pid);
1232 EXPORT_SYMBOL(kill_pid);
1235 kill_proc(pid_t pid, int sig, int priv)
1237 return kill_proc_info(sig, __si_special(priv), pid);
1241 * These functions support sending signals using preallocated sigqueue
1242 * structures. This is needed "because realtime applications cannot
1243 * afford to lose notifications of asynchronous events, like timer
1244 * expirations or I/O completions". In the case of Posix Timers
1245 * we allocate the sigqueue structure from the timer_create. If this
1246 * allocation fails we are able to report the failure to the application
1247 * with an EAGAIN error.
1250 struct sigqueue *sigqueue_alloc(void)
1252 struct sigqueue *q;
1254 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1255 q->flags |= SIGQUEUE_PREALLOC;
1256 return(q);
1259 void sigqueue_free(struct sigqueue *q)
1261 unsigned long flags;
1262 spinlock_t *lock = &current->sighand->siglock;
1264 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1266 * If the signal is still pending remove it from the
1267 * pending queue. We must hold ->siglock while testing
1268 * q->list to serialize with collect_signal().
1270 spin_lock_irqsave(lock, flags);
1271 if (!list_empty(&q->list))
1272 list_del_init(&q->list);
1273 spin_unlock_irqrestore(lock, flags);
1275 q->flags &= ~SIGQUEUE_PREALLOC;
1276 __sigqueue_free(q);
1279 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1281 unsigned long flags;
1282 int ret = 0;
1284 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1287 * The rcu based delayed sighand destroy makes it possible to
1288 * run this without tasklist lock held. The task struct itself
1289 * cannot go away as create_timer did get_task_struct().
1291 * We return -1, when the task is marked exiting, so
1292 * posix_timer_event can redirect it to the group leader
1294 rcu_read_lock();
1296 if (!likely(lock_task_sighand(p, &flags))) {
1297 ret = -1;
1298 goto out_err;
1301 if (unlikely(!list_empty(&q->list))) {
1303 * If an SI_TIMER entry is already queue just increment
1304 * the overrun count.
1306 BUG_ON(q->info.si_code != SI_TIMER);
1307 q->info.si_overrun++;
1308 goto out;
1310 /* Short-circuit ignored signals. */
1311 if (sig_ignored(p, sig)) {
1312 ret = 1;
1313 goto out;
1316 * Deliver the signal to listening signalfds. This must be called
1317 * with the sighand lock held.
1319 signalfd_notify(p, sig);
1321 list_add_tail(&q->list, &p->pending.list);
1322 sigaddset(&p->pending.signal, sig);
1323 if (!sigismember(&p->blocked, sig))
1324 signal_wake_up(p, sig == SIGKILL);
1326 out:
1327 unlock_task_sighand(p, &flags);
1328 out_err:
1329 rcu_read_unlock();
1331 return ret;
1335 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1337 unsigned long flags;
1338 int ret = 0;
1340 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1342 read_lock(&tasklist_lock);
1343 /* Since it_lock is held, p->sighand cannot be NULL. */
1344 spin_lock_irqsave(&p->sighand->siglock, flags);
1345 handle_stop_signal(sig, p);
1347 /* Short-circuit ignored signals. */
1348 if (sig_ignored(p, sig)) {
1349 ret = 1;
1350 goto out;
1353 if (unlikely(!list_empty(&q->list))) {
1355 * If an SI_TIMER entry is already queue just increment
1356 * the overrun count. Other uses should not try to
1357 * send the signal multiple times.
1359 BUG_ON(q->info.si_code != SI_TIMER);
1360 q->info.si_overrun++;
1361 goto out;
1364 * Deliver the signal to listening signalfds. This must be called
1365 * with the sighand lock held.
1367 signalfd_notify(p, sig);
1370 * Put this signal on the shared-pending queue.
1371 * We always use the shared queue for process-wide signals,
1372 * to avoid several races.
1374 list_add_tail(&q->list, &p->signal->shared_pending.list);
1375 sigaddset(&p->signal->shared_pending.signal, sig);
1377 __group_complete_signal(sig, p);
1378 out:
1379 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1380 read_unlock(&tasklist_lock);
1381 return ret;
1385 * Wake up any threads in the parent blocked in wait* syscalls.
1387 static inline void __wake_up_parent(struct task_struct *p,
1388 struct task_struct *parent)
1390 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1394 * Let a parent know about the death of a child.
1395 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1398 void do_notify_parent(struct task_struct *tsk, int sig)
1400 struct siginfo info;
1401 unsigned long flags;
1402 struct sighand_struct *psig;
1404 BUG_ON(sig == -1);
1406 /* do_notify_parent_cldstop should have been called instead. */
1407 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1409 BUG_ON(!tsk->ptrace &&
1410 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1412 info.si_signo = sig;
1413 info.si_errno = 0;
1414 info.si_pid = tsk->pid;
1415 info.si_uid = tsk->uid;
1417 /* FIXME: find out whether or not this is supposed to be c*time. */
1418 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1419 tsk->signal->utime));
1420 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1421 tsk->signal->stime));
1423 info.si_status = tsk->exit_code & 0x7f;
1424 if (tsk->exit_code & 0x80)
1425 info.si_code = CLD_DUMPED;
1426 else if (tsk->exit_code & 0x7f)
1427 info.si_code = CLD_KILLED;
1428 else {
1429 info.si_code = CLD_EXITED;
1430 info.si_status = tsk->exit_code >> 8;
1433 psig = tsk->parent->sighand;
1434 spin_lock_irqsave(&psig->siglock, flags);
1435 if (!tsk->ptrace && sig == SIGCHLD &&
1436 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1437 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1439 * We are exiting and our parent doesn't care. POSIX.1
1440 * defines special semantics for setting SIGCHLD to SIG_IGN
1441 * or setting the SA_NOCLDWAIT flag: we should be reaped
1442 * automatically and not left for our parent's wait4 call.
1443 * Rather than having the parent do it as a magic kind of
1444 * signal handler, we just set this to tell do_exit that we
1445 * can be cleaned up without becoming a zombie. Note that
1446 * we still call __wake_up_parent in this case, because a
1447 * blocked sys_wait4 might now return -ECHILD.
1449 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1450 * is implementation-defined: we do (if you don't want
1451 * it, just use SIG_IGN instead).
1453 tsk->exit_signal = -1;
1454 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1455 sig = 0;
1457 if (valid_signal(sig) && sig > 0)
1458 __group_send_sig_info(sig, &info, tsk->parent);
1459 __wake_up_parent(tsk, tsk->parent);
1460 spin_unlock_irqrestore(&psig->siglock, flags);
1463 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1465 struct siginfo info;
1466 unsigned long flags;
1467 struct task_struct *parent;
1468 struct sighand_struct *sighand;
1470 if (tsk->ptrace & PT_PTRACED)
1471 parent = tsk->parent;
1472 else {
1473 tsk = tsk->group_leader;
1474 parent = tsk->real_parent;
1477 info.si_signo = SIGCHLD;
1478 info.si_errno = 0;
1479 info.si_pid = tsk->pid;
1480 info.si_uid = tsk->uid;
1482 /* FIXME: find out whether or not this is supposed to be c*time. */
1483 info.si_utime = cputime_to_jiffies(tsk->utime);
1484 info.si_stime = cputime_to_jiffies(tsk->stime);
1486 info.si_code = why;
1487 switch (why) {
1488 case CLD_CONTINUED:
1489 info.si_status = SIGCONT;
1490 break;
1491 case CLD_STOPPED:
1492 info.si_status = tsk->signal->group_exit_code & 0x7f;
1493 break;
1494 case CLD_TRAPPED:
1495 info.si_status = tsk->exit_code & 0x7f;
1496 break;
1497 default:
1498 BUG();
1501 sighand = parent->sighand;
1502 spin_lock_irqsave(&sighand->siglock, flags);
1503 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1504 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1505 __group_send_sig_info(SIGCHLD, &info, parent);
1507 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1509 __wake_up_parent(tsk, parent);
1510 spin_unlock_irqrestore(&sighand->siglock, flags);
1513 static inline int may_ptrace_stop(void)
1515 if (!likely(current->ptrace & PT_PTRACED))
1516 return 0;
1518 if (unlikely(current->parent == current->real_parent &&
1519 (current->ptrace & PT_ATTACHED)))
1520 return 0;
1522 if (unlikely(current->signal == current->parent->signal) &&
1523 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1524 return 0;
1527 * Are we in the middle of do_coredump?
1528 * If so and our tracer is also part of the coredump stopping
1529 * is a deadlock situation, and pointless because our tracer
1530 * is dead so don't allow us to stop.
1531 * If SIGKILL was already sent before the caller unlocked
1532 * ->siglock we must see ->core_waiters != 0. Otherwise it
1533 * is safe to enter schedule().
1535 if (unlikely(current->mm->core_waiters) &&
1536 unlikely(current->mm == current->parent->mm))
1537 return 0;
1539 return 1;
1543 * This must be called with current->sighand->siglock held.
1545 * This should be the path for all ptrace stops.
1546 * We always set current->last_siginfo while stopped here.
1547 * That makes it a way to test a stopped process for
1548 * being ptrace-stopped vs being job-control-stopped.
1550 * If we actually decide not to stop at all because the tracer is gone,
1551 * we leave nostop_code in current->exit_code.
1553 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1556 * If there is a group stop in progress,
1557 * we must participate in the bookkeeping.
1559 if (current->signal->group_stop_count > 0)
1560 --current->signal->group_stop_count;
1562 current->last_siginfo = info;
1563 current->exit_code = exit_code;
1565 /* Let the debugger run. */
1566 set_current_state(TASK_TRACED);
1567 spin_unlock_irq(&current->sighand->siglock);
1568 try_to_freeze();
1569 read_lock(&tasklist_lock);
1570 if (may_ptrace_stop()) {
1571 do_notify_parent_cldstop(current, CLD_TRAPPED);
1572 read_unlock(&tasklist_lock);
1573 schedule();
1574 } else {
1576 * By the time we got the lock, our tracer went away.
1577 * Don't stop here.
1579 read_unlock(&tasklist_lock);
1580 set_current_state(TASK_RUNNING);
1581 current->exit_code = nostop_code;
1585 * We are back. Now reacquire the siglock before touching
1586 * last_siginfo, so that we are sure to have synchronized with
1587 * any signal-sending on another CPU that wants to examine it.
1589 spin_lock_irq(&current->sighand->siglock);
1590 current->last_siginfo = NULL;
1593 * Queued signals ignored us while we were stopped for tracing.
1594 * So check for any that we should take before resuming user mode.
1595 * This sets TIF_SIGPENDING, but never clears it.
1597 recalc_sigpending_tsk(current);
1600 void ptrace_notify(int exit_code)
1602 siginfo_t info;
1604 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1606 memset(&info, 0, sizeof info);
1607 info.si_signo = SIGTRAP;
1608 info.si_code = exit_code;
1609 info.si_pid = current->pid;
1610 info.si_uid = current->uid;
1612 /* Let the debugger run. */
1613 spin_lock_irq(&current->sighand->siglock);
1614 ptrace_stop(exit_code, 0, &info);
1615 spin_unlock_irq(&current->sighand->siglock);
1618 static void
1619 finish_stop(int stop_count)
1622 * If there are no other threads in the group, or if there is
1623 * a group stop in progress and we are the last to stop,
1624 * report to the parent. When ptraced, every thread reports itself.
1626 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1627 read_lock(&tasklist_lock);
1628 do_notify_parent_cldstop(current, CLD_STOPPED);
1629 read_unlock(&tasklist_lock);
1632 do {
1633 schedule();
1634 } while (try_to_freeze());
1636 * Now we don't run again until continued.
1638 current->exit_code = 0;
1642 * This performs the stopping for SIGSTOP and other stop signals.
1643 * We have to stop all threads in the thread group.
1644 * Returns nonzero if we've actually stopped and released the siglock.
1645 * Returns zero if we didn't stop and still hold the siglock.
1647 static int do_signal_stop(int signr)
1649 struct signal_struct *sig = current->signal;
1650 int stop_count;
1652 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1653 return 0;
1655 if (sig->group_stop_count > 0) {
1657 * There is a group stop in progress. We don't need to
1658 * start another one.
1660 stop_count = --sig->group_stop_count;
1661 } else {
1663 * There is no group stop already in progress.
1664 * We must initiate one now.
1666 struct task_struct *t;
1668 sig->group_exit_code = signr;
1670 stop_count = 0;
1671 for (t = next_thread(current); t != current; t = next_thread(t))
1673 * Setting state to TASK_STOPPED for a group
1674 * stop is always done with the siglock held,
1675 * so this check has no races.
1677 if (!t->exit_state &&
1678 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1679 stop_count++;
1680 signal_wake_up(t, 0);
1682 sig->group_stop_count = stop_count;
1685 if (stop_count == 0)
1686 sig->flags = SIGNAL_STOP_STOPPED;
1687 current->exit_code = sig->group_exit_code;
1688 __set_current_state(TASK_STOPPED);
1690 spin_unlock_irq(&current->sighand->siglock);
1691 finish_stop(stop_count);
1692 return 1;
1696 * Do appropriate magic when group_stop_count > 0.
1697 * We return nonzero if we stopped, after releasing the siglock.
1698 * We return zero if we still hold the siglock and should look
1699 * for another signal without checking group_stop_count again.
1701 static int handle_group_stop(void)
1703 int stop_count;
1705 if (current->signal->group_exit_task == current) {
1707 * Group stop is so we can do a core dump,
1708 * We are the initiating thread, so get on with it.
1710 current->signal->group_exit_task = NULL;
1711 return 0;
1714 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1716 * Group stop is so another thread can do a core dump,
1717 * or else we are racing against a death signal.
1718 * Just punt the stop so we can get the next signal.
1720 return 0;
1723 * There is a group stop in progress. We stop
1724 * without any associated signal being in our queue.
1726 stop_count = --current->signal->group_stop_count;
1727 if (stop_count == 0)
1728 current->signal->flags = SIGNAL_STOP_STOPPED;
1729 current->exit_code = current->signal->group_exit_code;
1730 set_current_state(TASK_STOPPED);
1731 spin_unlock_irq(&current->sighand->siglock);
1732 finish_stop(stop_count);
1733 return 1;
1736 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1737 struct pt_regs *regs, void *cookie)
1739 sigset_t *mask = &current->blocked;
1740 int signr = 0;
1742 try_to_freeze();
1744 relock:
1745 spin_lock_irq(&current->sighand->siglock);
1746 for (;;) {
1747 struct k_sigaction *ka;
1749 if (unlikely(current->signal->group_stop_count > 0) &&
1750 handle_group_stop())
1751 goto relock;
1753 signr = dequeue_signal(current, mask, info);
1755 if (!signr)
1756 break; /* will return 0 */
1758 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1759 ptrace_signal_deliver(regs, cookie);
1761 /* Let the debugger run. */
1762 ptrace_stop(signr, signr, info);
1764 /* We're back. Did the debugger cancel the sig? */
1765 signr = current->exit_code;
1766 if (signr == 0)
1767 continue;
1769 current->exit_code = 0;
1771 /* Update the siginfo structure if the signal has
1772 changed. If the debugger wanted something
1773 specific in the siginfo structure then it should
1774 have updated *info via PTRACE_SETSIGINFO. */
1775 if (signr != info->si_signo) {
1776 info->si_signo = signr;
1777 info->si_errno = 0;
1778 info->si_code = SI_USER;
1779 info->si_pid = current->parent->pid;
1780 info->si_uid = current->parent->uid;
1783 /* If the (new) signal is now blocked, requeue it. */
1784 if (sigismember(&current->blocked, signr)) {
1785 specific_send_sig_info(signr, info, current);
1786 continue;
1790 ka = &current->sighand->action[signr-1];
1791 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1792 continue;
1793 if (ka->sa.sa_handler != SIG_DFL) {
1794 /* Run the handler. */
1795 *return_ka = *ka;
1797 if (ka->sa.sa_flags & SA_ONESHOT)
1798 ka->sa.sa_handler = SIG_DFL;
1800 break; /* will return non-zero "signr" value */
1804 * Now we are doing the default action for this signal.
1806 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1807 continue;
1810 * Init of a pid space gets no signals it doesn't want from
1811 * within that pid space. It can of course get signals from
1812 * its parent pid space.
1814 if (current == child_reaper(current))
1815 continue;
1817 if (sig_kernel_stop(signr)) {
1819 * The default action is to stop all threads in
1820 * the thread group. The job control signals
1821 * do nothing in an orphaned pgrp, but SIGSTOP
1822 * always works. Note that siglock needs to be
1823 * dropped during the call to is_orphaned_pgrp()
1824 * because of lock ordering with tasklist_lock.
1825 * This allows an intervening SIGCONT to be posted.
1826 * We need to check for that and bail out if necessary.
1828 if (signr != SIGSTOP) {
1829 spin_unlock_irq(&current->sighand->siglock);
1831 /* signals can be posted during this window */
1833 if (is_current_pgrp_orphaned())
1834 goto relock;
1836 spin_lock_irq(&current->sighand->siglock);
1839 if (likely(do_signal_stop(signr))) {
1840 /* It released the siglock. */
1841 goto relock;
1845 * We didn't actually stop, due to a race
1846 * with SIGCONT or something like that.
1848 continue;
1851 spin_unlock_irq(&current->sighand->siglock);
1854 * Anything else is fatal, maybe with a core dump.
1856 current->flags |= PF_SIGNALED;
1857 if (sig_kernel_coredump(signr)) {
1859 * If it was able to dump core, this kills all
1860 * other threads in the group and synchronizes with
1861 * their demise. If we lost the race with another
1862 * thread getting here, it set group_exit_code
1863 * first and our do_group_exit call below will use
1864 * that value and ignore the one we pass it.
1866 do_coredump((long)signr, signr, regs);
1870 * Death signals, no core dump.
1872 do_group_exit(signr);
1873 /* NOTREACHED */
1875 spin_unlock_irq(&current->sighand->siglock);
1876 return signr;
1879 EXPORT_SYMBOL(recalc_sigpending);
1880 EXPORT_SYMBOL_GPL(dequeue_signal);
1881 EXPORT_SYMBOL(flush_signals);
1882 EXPORT_SYMBOL(force_sig);
1883 EXPORT_SYMBOL(kill_proc);
1884 EXPORT_SYMBOL(ptrace_notify);
1885 EXPORT_SYMBOL(send_sig);
1886 EXPORT_SYMBOL(send_sig_info);
1887 EXPORT_SYMBOL(sigprocmask);
1888 EXPORT_SYMBOL(block_all_signals);
1889 EXPORT_SYMBOL(unblock_all_signals);
1893 * System call entry points.
1896 asmlinkage long sys_restart_syscall(void)
1898 struct restart_block *restart = &current_thread_info()->restart_block;
1899 return restart->fn(restart);
1902 long do_no_restart_syscall(struct restart_block *param)
1904 return -EINTR;
1908 * We don't need to get the kernel lock - this is all local to this
1909 * particular thread.. (and that's good, because this is _heavily_
1910 * used by various programs)
1914 * This is also useful for kernel threads that want to temporarily
1915 * (or permanently) block certain signals.
1917 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1918 * interface happily blocks "unblockable" signals like SIGKILL
1919 * and friends.
1921 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1923 int error;
1925 spin_lock_irq(&current->sighand->siglock);
1926 if (oldset)
1927 *oldset = current->blocked;
1929 error = 0;
1930 switch (how) {
1931 case SIG_BLOCK:
1932 sigorsets(&current->blocked, &current->blocked, set);
1933 break;
1934 case SIG_UNBLOCK:
1935 signandsets(&current->blocked, &current->blocked, set);
1936 break;
1937 case SIG_SETMASK:
1938 current->blocked = *set;
1939 break;
1940 default:
1941 error = -EINVAL;
1943 recalc_sigpending();
1944 spin_unlock_irq(&current->sighand->siglock);
1946 return error;
1949 asmlinkage long
1950 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1952 int error = -EINVAL;
1953 sigset_t old_set, new_set;
1955 /* XXX: Don't preclude handling different sized sigset_t's. */
1956 if (sigsetsize != sizeof(sigset_t))
1957 goto out;
1959 if (set) {
1960 error = -EFAULT;
1961 if (copy_from_user(&new_set, set, sizeof(*set)))
1962 goto out;
1963 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1965 error = sigprocmask(how, &new_set, &old_set);
1966 if (error)
1967 goto out;
1968 if (oset)
1969 goto set_old;
1970 } else if (oset) {
1971 spin_lock_irq(&current->sighand->siglock);
1972 old_set = current->blocked;
1973 spin_unlock_irq(&current->sighand->siglock);
1975 set_old:
1976 error = -EFAULT;
1977 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1978 goto out;
1980 error = 0;
1981 out:
1982 return error;
1985 long do_sigpending(void __user *set, unsigned long sigsetsize)
1987 long error = -EINVAL;
1988 sigset_t pending;
1990 if (sigsetsize > sizeof(sigset_t))
1991 goto out;
1993 spin_lock_irq(&current->sighand->siglock);
1994 sigorsets(&pending, &current->pending.signal,
1995 &current->signal->shared_pending.signal);
1996 spin_unlock_irq(&current->sighand->siglock);
1998 /* Outside the lock because only this thread touches it. */
1999 sigandsets(&pending, &current->blocked, &pending);
2001 error = -EFAULT;
2002 if (!copy_to_user(set, &pending, sigsetsize))
2003 error = 0;
2005 out:
2006 return error;
2009 asmlinkage long
2010 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2012 return do_sigpending(set, sigsetsize);
2015 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2017 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2019 int err;
2021 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2022 return -EFAULT;
2023 if (from->si_code < 0)
2024 return __copy_to_user(to, from, sizeof(siginfo_t))
2025 ? -EFAULT : 0;
2027 * If you change siginfo_t structure, please be sure
2028 * this code is fixed accordingly.
2029 * Please remember to update the signalfd_copyinfo() function
2030 * inside fs/signalfd.c too, in case siginfo_t changes.
2031 * It should never copy any pad contained in the structure
2032 * to avoid security leaks, but must copy the generic
2033 * 3 ints plus the relevant union member.
2035 err = __put_user(from->si_signo, &to->si_signo);
2036 err |= __put_user(from->si_errno, &to->si_errno);
2037 err |= __put_user((short)from->si_code, &to->si_code);
2038 switch (from->si_code & __SI_MASK) {
2039 case __SI_KILL:
2040 err |= __put_user(from->si_pid, &to->si_pid);
2041 err |= __put_user(from->si_uid, &to->si_uid);
2042 break;
2043 case __SI_TIMER:
2044 err |= __put_user(from->si_tid, &to->si_tid);
2045 err |= __put_user(from->si_overrun, &to->si_overrun);
2046 err |= __put_user(from->si_ptr, &to->si_ptr);
2047 break;
2048 case __SI_POLL:
2049 err |= __put_user(from->si_band, &to->si_band);
2050 err |= __put_user(from->si_fd, &to->si_fd);
2051 break;
2052 case __SI_FAULT:
2053 err |= __put_user(from->si_addr, &to->si_addr);
2054 #ifdef __ARCH_SI_TRAPNO
2055 err |= __put_user(from->si_trapno, &to->si_trapno);
2056 #endif
2057 break;
2058 case __SI_CHLD:
2059 err |= __put_user(from->si_pid, &to->si_pid);
2060 err |= __put_user(from->si_uid, &to->si_uid);
2061 err |= __put_user(from->si_status, &to->si_status);
2062 err |= __put_user(from->si_utime, &to->si_utime);
2063 err |= __put_user(from->si_stime, &to->si_stime);
2064 break;
2065 case __SI_RT: /* This is not generated by the kernel as of now. */
2066 case __SI_MESGQ: /* But this is */
2067 err |= __put_user(from->si_pid, &to->si_pid);
2068 err |= __put_user(from->si_uid, &to->si_uid);
2069 err |= __put_user(from->si_ptr, &to->si_ptr);
2070 break;
2071 default: /* this is just in case for now ... */
2072 err |= __put_user(from->si_pid, &to->si_pid);
2073 err |= __put_user(from->si_uid, &to->si_uid);
2074 break;
2076 return err;
2079 #endif
2081 asmlinkage long
2082 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2083 siginfo_t __user *uinfo,
2084 const struct timespec __user *uts,
2085 size_t sigsetsize)
2087 int ret, sig;
2088 sigset_t these;
2089 struct timespec ts;
2090 siginfo_t info;
2091 long timeout = 0;
2093 /* XXX: Don't preclude handling different sized sigset_t's. */
2094 if (sigsetsize != sizeof(sigset_t))
2095 return -EINVAL;
2097 if (copy_from_user(&these, uthese, sizeof(these)))
2098 return -EFAULT;
2101 * Invert the set of allowed signals to get those we
2102 * want to block.
2104 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2105 signotset(&these);
2107 if (uts) {
2108 if (copy_from_user(&ts, uts, sizeof(ts)))
2109 return -EFAULT;
2110 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2111 || ts.tv_sec < 0)
2112 return -EINVAL;
2115 spin_lock_irq(&current->sighand->siglock);
2116 sig = dequeue_signal(current, &these, &info);
2117 if (!sig) {
2118 timeout = MAX_SCHEDULE_TIMEOUT;
2119 if (uts)
2120 timeout = (timespec_to_jiffies(&ts)
2121 + (ts.tv_sec || ts.tv_nsec));
2123 if (timeout) {
2124 /* None ready -- temporarily unblock those we're
2125 * interested while we are sleeping in so that we'll
2126 * be awakened when they arrive. */
2127 current->real_blocked = current->blocked;
2128 sigandsets(&current->blocked, &current->blocked, &these);
2129 recalc_sigpending();
2130 spin_unlock_irq(&current->sighand->siglock);
2132 timeout = schedule_timeout_interruptible(timeout);
2134 spin_lock_irq(&current->sighand->siglock);
2135 sig = dequeue_signal(current, &these, &info);
2136 current->blocked = current->real_blocked;
2137 siginitset(&current->real_blocked, 0);
2138 recalc_sigpending();
2141 spin_unlock_irq(&current->sighand->siglock);
2143 if (sig) {
2144 ret = sig;
2145 if (uinfo) {
2146 if (copy_siginfo_to_user(uinfo, &info))
2147 ret = -EFAULT;
2149 } else {
2150 ret = -EAGAIN;
2151 if (timeout)
2152 ret = -EINTR;
2155 return ret;
2158 asmlinkage long
2159 sys_kill(int pid, int sig)
2161 struct siginfo info;
2163 info.si_signo = sig;
2164 info.si_errno = 0;
2165 info.si_code = SI_USER;
2166 info.si_pid = current->tgid;
2167 info.si_uid = current->uid;
2169 return kill_something_info(sig, &info, pid);
2172 static int do_tkill(int tgid, int pid, int sig)
2174 int error;
2175 struct siginfo info;
2176 struct task_struct *p;
2178 error = -ESRCH;
2179 info.si_signo = sig;
2180 info.si_errno = 0;
2181 info.si_code = SI_TKILL;
2182 info.si_pid = current->tgid;
2183 info.si_uid = current->uid;
2185 read_lock(&tasklist_lock);
2186 p = find_task_by_pid(pid);
2187 if (p && (tgid <= 0 || p->tgid == tgid)) {
2188 error = check_kill_permission(sig, &info, p);
2190 * The null signal is a permissions and process existence
2191 * probe. No signal is actually delivered.
2193 if (!error && sig && p->sighand) {
2194 spin_lock_irq(&p->sighand->siglock);
2195 handle_stop_signal(sig, p);
2196 error = specific_send_sig_info(sig, &info, p);
2197 spin_unlock_irq(&p->sighand->siglock);
2200 read_unlock(&tasklist_lock);
2202 return error;
2206 * sys_tgkill - send signal to one specific thread
2207 * @tgid: the thread group ID of the thread
2208 * @pid: the PID of the thread
2209 * @sig: signal to be sent
2211 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2212 * exists but it's not belonging to the target process anymore. This
2213 * method solves the problem of threads exiting and PIDs getting reused.
2215 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2217 /* This is only valid for single tasks */
2218 if (pid <= 0 || tgid <= 0)
2219 return -EINVAL;
2221 return do_tkill(tgid, pid, sig);
2225 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2227 asmlinkage long
2228 sys_tkill(int pid, int sig)
2230 /* This is only valid for single tasks */
2231 if (pid <= 0)
2232 return -EINVAL;
2234 return do_tkill(0, pid, sig);
2237 asmlinkage long
2238 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2240 siginfo_t info;
2242 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2243 return -EFAULT;
2245 /* Not even root can pretend to send signals from the kernel.
2246 Nor can they impersonate a kill(), which adds source info. */
2247 if (info.si_code >= 0)
2248 return -EPERM;
2249 info.si_signo = sig;
2251 /* POSIX.1b doesn't mention process groups. */
2252 return kill_proc_info(sig, &info, pid);
2255 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2257 struct k_sigaction *k;
2258 sigset_t mask;
2260 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2261 return -EINVAL;
2263 k = &current->sighand->action[sig-1];
2265 spin_lock_irq(&current->sighand->siglock);
2266 if (signal_pending(current)) {
2268 * If there might be a fatal signal pending on multiple
2269 * threads, make sure we take it before changing the action.
2271 spin_unlock_irq(&current->sighand->siglock);
2272 return -ERESTARTNOINTR;
2275 if (oact)
2276 *oact = *k;
2278 if (act) {
2279 sigdelsetmask(&act->sa.sa_mask,
2280 sigmask(SIGKILL) | sigmask(SIGSTOP));
2281 *k = *act;
2283 * POSIX 3.3.1.3:
2284 * "Setting a signal action to SIG_IGN for a signal that is
2285 * pending shall cause the pending signal to be discarded,
2286 * whether or not it is blocked."
2288 * "Setting a signal action to SIG_DFL for a signal that is
2289 * pending and whose default action is to ignore the signal
2290 * (for example, SIGCHLD), shall cause the pending signal to
2291 * be discarded, whether or not it is blocked"
2293 if (act->sa.sa_handler == SIG_IGN ||
2294 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2295 struct task_struct *t = current;
2296 sigemptyset(&mask);
2297 sigaddset(&mask, sig);
2298 rm_from_queue_full(&mask, &t->signal->shared_pending);
2299 do {
2300 rm_from_queue_full(&mask, &t->pending);
2301 recalc_sigpending_and_wake(t);
2302 t = next_thread(t);
2303 } while (t != current);
2307 spin_unlock_irq(&current->sighand->siglock);
2308 return 0;
2311 int
2312 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2314 stack_t oss;
2315 int error;
2317 if (uoss) {
2318 oss.ss_sp = (void __user *) current->sas_ss_sp;
2319 oss.ss_size = current->sas_ss_size;
2320 oss.ss_flags = sas_ss_flags(sp);
2323 if (uss) {
2324 void __user *ss_sp;
2325 size_t ss_size;
2326 int ss_flags;
2328 error = -EFAULT;
2329 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2330 || __get_user(ss_sp, &uss->ss_sp)
2331 || __get_user(ss_flags, &uss->ss_flags)
2332 || __get_user(ss_size, &uss->ss_size))
2333 goto out;
2335 error = -EPERM;
2336 if (on_sig_stack(sp))
2337 goto out;
2339 error = -EINVAL;
2342 * Note - this code used to test ss_flags incorrectly
2343 * old code may have been written using ss_flags==0
2344 * to mean ss_flags==SS_ONSTACK (as this was the only
2345 * way that worked) - this fix preserves that older
2346 * mechanism
2348 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2349 goto out;
2351 if (ss_flags == SS_DISABLE) {
2352 ss_size = 0;
2353 ss_sp = NULL;
2354 } else {
2355 error = -ENOMEM;
2356 if (ss_size < MINSIGSTKSZ)
2357 goto out;
2360 current->sas_ss_sp = (unsigned long) ss_sp;
2361 current->sas_ss_size = ss_size;
2364 if (uoss) {
2365 error = -EFAULT;
2366 if (copy_to_user(uoss, &oss, sizeof(oss)))
2367 goto out;
2370 error = 0;
2371 out:
2372 return error;
2375 #ifdef __ARCH_WANT_SYS_SIGPENDING
2377 asmlinkage long
2378 sys_sigpending(old_sigset_t __user *set)
2380 return do_sigpending(set, sizeof(*set));
2383 #endif
2385 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2386 /* Some platforms have their own version with special arguments others
2387 support only sys_rt_sigprocmask. */
2389 asmlinkage long
2390 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2392 int error;
2393 old_sigset_t old_set, new_set;
2395 if (set) {
2396 error = -EFAULT;
2397 if (copy_from_user(&new_set, set, sizeof(*set)))
2398 goto out;
2399 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2401 spin_lock_irq(&current->sighand->siglock);
2402 old_set = current->blocked.sig[0];
2404 error = 0;
2405 switch (how) {
2406 default:
2407 error = -EINVAL;
2408 break;
2409 case SIG_BLOCK:
2410 sigaddsetmask(&current->blocked, new_set);
2411 break;
2412 case SIG_UNBLOCK:
2413 sigdelsetmask(&current->blocked, new_set);
2414 break;
2415 case SIG_SETMASK:
2416 current->blocked.sig[0] = new_set;
2417 break;
2420 recalc_sigpending();
2421 spin_unlock_irq(&current->sighand->siglock);
2422 if (error)
2423 goto out;
2424 if (oset)
2425 goto set_old;
2426 } else if (oset) {
2427 old_set = current->blocked.sig[0];
2428 set_old:
2429 error = -EFAULT;
2430 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2431 goto out;
2433 error = 0;
2434 out:
2435 return error;
2437 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2439 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2440 asmlinkage long
2441 sys_rt_sigaction(int sig,
2442 const struct sigaction __user *act,
2443 struct sigaction __user *oact,
2444 size_t sigsetsize)
2446 struct k_sigaction new_sa, old_sa;
2447 int ret = -EINVAL;
2449 /* XXX: Don't preclude handling different sized sigset_t's. */
2450 if (sigsetsize != sizeof(sigset_t))
2451 goto out;
2453 if (act) {
2454 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2455 return -EFAULT;
2458 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2460 if (!ret && oact) {
2461 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2462 return -EFAULT;
2464 out:
2465 return ret;
2467 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2469 #ifdef __ARCH_WANT_SYS_SGETMASK
2472 * For backwards compatibility. Functionality superseded by sigprocmask.
2474 asmlinkage long
2475 sys_sgetmask(void)
2477 /* SMP safe */
2478 return current->blocked.sig[0];
2481 asmlinkage long
2482 sys_ssetmask(int newmask)
2484 int old;
2486 spin_lock_irq(&current->sighand->siglock);
2487 old = current->blocked.sig[0];
2489 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2490 sigmask(SIGSTOP)));
2491 recalc_sigpending();
2492 spin_unlock_irq(&current->sighand->siglock);
2494 return old;
2496 #endif /* __ARCH_WANT_SGETMASK */
2498 #ifdef __ARCH_WANT_SYS_SIGNAL
2500 * For backwards compatibility. Functionality superseded by sigaction.
2502 asmlinkage unsigned long
2503 sys_signal(int sig, __sighandler_t handler)
2505 struct k_sigaction new_sa, old_sa;
2506 int ret;
2508 new_sa.sa.sa_handler = handler;
2509 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2510 sigemptyset(&new_sa.sa.sa_mask);
2512 ret = do_sigaction(sig, &new_sa, &old_sa);
2514 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2516 #endif /* __ARCH_WANT_SYS_SIGNAL */
2518 #ifdef __ARCH_WANT_SYS_PAUSE
2520 asmlinkage long
2521 sys_pause(void)
2523 current->state = TASK_INTERRUPTIBLE;
2524 schedule();
2525 return -ERESTARTNOHAND;
2528 #endif
2530 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2531 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2533 sigset_t newset;
2535 /* XXX: Don't preclude handling different sized sigset_t's. */
2536 if (sigsetsize != sizeof(sigset_t))
2537 return -EINVAL;
2539 if (copy_from_user(&newset, unewset, sizeof(newset)))
2540 return -EFAULT;
2541 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2543 spin_lock_irq(&current->sighand->siglock);
2544 current->saved_sigmask = current->blocked;
2545 current->blocked = newset;
2546 recalc_sigpending();
2547 spin_unlock_irq(&current->sighand->siglock);
2549 current->state = TASK_INTERRUPTIBLE;
2550 schedule();
2551 set_thread_flag(TIF_RESTORE_SIGMASK);
2552 return -ERESTARTNOHAND;
2554 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2556 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2558 return NULL;
2561 void __init signals_init(void)
2563 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);