ACPI: thinkpad-acpi: add development version tag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / signal.c
blobb1506fb1d74135752978ea3deac2b5b6ad15c123
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
31 #include <asm/param.h>
32 #include <asm/uaccess.h>
33 #include <asm/unistd.h>
34 #include <asm/siginfo.h>
35 #include "audit.h" /* audit_signal_info() */
38 * SLAB caches for signal bits.
41 static struct kmem_cache *sigqueue_cachep;
43 static void __user *sig_handler(struct task_struct *t, int sig)
45 return t->sighand->action[sig - 1].sa.sa_handler;
48 static int sig_handler_ignored(void __user *handler, int sig)
50 /* Is it explicitly or implicitly ignored? */
51 return handler == SIG_IGN ||
52 (handler == SIG_DFL && sig_kernel_ignore(sig));
55 static int sig_ignored(struct task_struct *t, int sig)
57 void __user *handler;
60 * Blocked signals are never ignored, since the
61 * signal handler may change by the time it is
62 * unblocked.
64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
65 return 0;
67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
72 * Tracers may want to know about even ignored signals.
74 return !tracehook_consider_ignored_signal(t, sig, handler);
78 * Re-calculate pending state from the set of locally pending
79 * signals, globally pending signals, and blocked signals.
81 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83 unsigned long ready;
84 long i;
86 switch (_NSIG_WORDS) {
87 default:
88 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
89 ready |= signal->sig[i] &~ blocked->sig[i];
90 break;
92 case 4: ready = signal->sig[3] &~ blocked->sig[3];
93 ready |= signal->sig[2] &~ blocked->sig[2];
94 ready |= signal->sig[1] &~ blocked->sig[1];
95 ready |= signal->sig[0] &~ blocked->sig[0];
96 break;
98 case 2: ready = signal->sig[1] &~ blocked->sig[1];
99 ready |= signal->sig[0] &~ blocked->sig[0];
100 break;
102 case 1: ready = signal->sig[0] &~ blocked->sig[0];
104 return ready != 0;
107 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109 static int recalc_sigpending_tsk(struct task_struct *t)
111 if (t->signal->group_stop_count > 0 ||
112 PENDING(&t->pending, &t->blocked) ||
113 PENDING(&t->signal->shared_pending, &t->blocked)) {
114 set_tsk_thread_flag(t, TIF_SIGPENDING);
115 return 1;
118 * We must never clear the flag in another thread, or in current
119 * when it's possible the current syscall is returning -ERESTART*.
120 * So we don't clear it here, and only callers who know they should do.
122 return 0;
126 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
127 * This is superfluous when called on current, the wakeup is a harmless no-op.
129 void recalc_sigpending_and_wake(struct task_struct *t)
131 if (recalc_sigpending_tsk(t))
132 signal_wake_up(t, 0);
135 void recalc_sigpending(void)
137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
140 clear_thread_flag(TIF_SIGPENDING);
144 /* Given the mask, find the first available signal that should be serviced. */
146 int next_signal(struct sigpending *pending, sigset_t *mask)
148 unsigned long i, *s, *m, x;
149 int sig = 0;
151 s = pending->signal.sig;
152 m = mask->sig;
153 switch (_NSIG_WORDS) {
154 default:
155 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
156 if ((x = *s &~ *m) != 0) {
157 sig = ffz(~x) + i*_NSIG_BPW + 1;
158 break;
160 break;
162 case 2: if ((x = s[0] &~ m[0]) != 0)
163 sig = 1;
164 else if ((x = s[1] &~ m[1]) != 0)
165 sig = _NSIG_BPW + 1;
166 else
167 break;
168 sig += ffz(~x);
169 break;
171 case 1: if ((x = *s &~ *m) != 0)
172 sig = ffz(~x) + 1;
173 break;
176 return sig;
179 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
180 int override_rlimit)
182 struct sigqueue *q = NULL;
183 struct user_struct *user;
186 * In order to avoid problems with "switch_user()", we want to make
187 * sure that the compiler doesn't re-load "t->user"
189 user = t->user;
190 barrier();
191 atomic_inc(&user->sigpending);
192 if (override_rlimit ||
193 atomic_read(&user->sigpending) <=
194 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
195 q = kmem_cache_alloc(sigqueue_cachep, flags);
196 if (unlikely(q == NULL)) {
197 atomic_dec(&user->sigpending);
198 } else {
199 INIT_LIST_HEAD(&q->list);
200 q->flags = 0;
201 q->user = get_uid(user);
203 return(q);
206 static void __sigqueue_free(struct sigqueue *q)
208 if (q->flags & SIGQUEUE_PREALLOC)
209 return;
210 atomic_dec(&q->user->sigpending);
211 free_uid(q->user);
212 kmem_cache_free(sigqueue_cachep, q);
215 void flush_sigqueue(struct sigpending *queue)
217 struct sigqueue *q;
219 sigemptyset(&queue->signal);
220 while (!list_empty(&queue->list)) {
221 q = list_entry(queue->list.next, struct sigqueue , list);
222 list_del_init(&q->list);
223 __sigqueue_free(q);
228 * Flush all pending signals for a task.
230 void flush_signals(struct task_struct *t)
232 unsigned long flags;
234 spin_lock_irqsave(&t->sighand->siglock, flags);
235 clear_tsk_thread_flag(t, TIF_SIGPENDING);
236 flush_sigqueue(&t->pending);
237 flush_sigqueue(&t->signal->shared_pending);
238 spin_unlock_irqrestore(&t->sighand->siglock, flags);
241 static void __flush_itimer_signals(struct sigpending *pending)
243 sigset_t signal, retain;
244 struct sigqueue *q, *n;
246 signal = pending->signal;
247 sigemptyset(&retain);
249 list_for_each_entry_safe(q, n, &pending->list, list) {
250 int sig = q->info.si_signo;
252 if (likely(q->info.si_code != SI_TIMER)) {
253 sigaddset(&retain, sig);
254 } else {
255 sigdelset(&signal, sig);
256 list_del_init(&q->list);
257 __sigqueue_free(q);
261 sigorsets(&pending->signal, &signal, &retain);
264 void flush_itimer_signals(void)
266 struct task_struct *tsk = current;
267 unsigned long flags;
269 spin_lock_irqsave(&tsk->sighand->siglock, flags);
270 __flush_itimer_signals(&tsk->pending);
271 __flush_itimer_signals(&tsk->signal->shared_pending);
272 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
275 void ignore_signals(struct task_struct *t)
277 int i;
279 for (i = 0; i < _NSIG; ++i)
280 t->sighand->action[i].sa.sa_handler = SIG_IGN;
282 flush_signals(t);
286 * Flush all handlers for a task.
289 void
290 flush_signal_handlers(struct task_struct *t, int force_default)
292 int i;
293 struct k_sigaction *ka = &t->sighand->action[0];
294 for (i = _NSIG ; i != 0 ; i--) {
295 if (force_default || ka->sa.sa_handler != SIG_IGN)
296 ka->sa.sa_handler = SIG_DFL;
297 ka->sa.sa_flags = 0;
298 sigemptyset(&ka->sa.sa_mask);
299 ka++;
303 int unhandled_signal(struct task_struct *tsk, int sig)
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
306 if (is_global_init(tsk))
307 return 1;
308 if (handler != SIG_IGN && handler != SIG_DFL)
309 return 0;
310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
314 /* Notify the system that a driver wants to block all signals for this
315 * process, and wants to be notified if any signals at all were to be
316 * sent/acted upon. If the notifier routine returns non-zero, then the
317 * signal will be acted upon after all. If the notifier routine returns 0,
318 * then then signal will be blocked. Only one block per process is
319 * allowed. priv is a pointer to private data that the notifier routine
320 * can use to determine if the signal should be blocked or not. */
322 void
323 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325 unsigned long flags;
327 spin_lock_irqsave(&current->sighand->siglock, flags);
328 current->notifier_mask = mask;
329 current->notifier_data = priv;
330 current->notifier = notifier;
331 spin_unlock_irqrestore(&current->sighand->siglock, flags);
334 /* Notify the system that blocking has ended. */
336 void
337 unblock_all_signals(void)
339 unsigned long flags;
341 spin_lock_irqsave(&current->sighand->siglock, flags);
342 current->notifier = NULL;
343 current->notifier_data = NULL;
344 recalc_sigpending();
345 spin_unlock_irqrestore(&current->sighand->siglock, flags);
348 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
350 struct sigqueue *q, *first = NULL;
353 * Collect the siginfo appropriate to this signal. Check if
354 * there is another siginfo for the same signal.
356 list_for_each_entry(q, &list->list, list) {
357 if (q->info.si_signo == sig) {
358 if (first)
359 goto still_pending;
360 first = q;
364 sigdelset(&list->signal, sig);
366 if (first) {
367 still_pending:
368 list_del_init(&first->list);
369 copy_siginfo(info, &first->info);
370 __sigqueue_free(first);
371 } else {
372 /* Ok, it wasn't in the queue. This must be
373 a fast-pathed signal or we must have been
374 out of queue space. So zero out the info.
376 info->si_signo = sig;
377 info->si_errno = 0;
378 info->si_code = 0;
379 info->si_pid = 0;
380 info->si_uid = 0;
384 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
385 siginfo_t *info)
387 int sig = next_signal(pending, mask);
389 if (sig) {
390 if (current->notifier) {
391 if (sigismember(current->notifier_mask, sig)) {
392 if (!(current->notifier)(current->notifier_data)) {
393 clear_thread_flag(TIF_SIGPENDING);
394 return 0;
399 collect_signal(sig, pending, info);
402 return sig;
406 * Dequeue a signal and return the element to the caller, which is
407 * expected to free it.
409 * All callers have to hold the siglock.
411 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413 int signr;
415 /* We only dequeue private signals from ourselves, we don't let
416 * signalfd steal them
418 signr = __dequeue_signal(&tsk->pending, mask, info);
419 if (!signr) {
420 signr = __dequeue_signal(&tsk->signal->shared_pending,
421 mask, info);
423 * itimer signal ?
425 * itimers are process shared and we restart periodic
426 * itimers in the signal delivery path to prevent DoS
427 * attacks in the high resolution timer case. This is
428 * compliant with the old way of self restarting
429 * itimers, as the SIGALRM is a legacy signal and only
430 * queued once. Changing the restart behaviour to
431 * restart the timer in the signal dequeue path is
432 * reducing the timer noise on heavy loaded !highres
433 * systems too.
435 if (unlikely(signr == SIGALRM)) {
436 struct hrtimer *tmr = &tsk->signal->real_timer;
438 if (!hrtimer_is_queued(tmr) &&
439 tsk->signal->it_real_incr.tv64 != 0) {
440 hrtimer_forward(tmr, tmr->base->get_time(),
441 tsk->signal->it_real_incr);
442 hrtimer_restart(tmr);
447 recalc_sigpending();
448 if (!signr)
449 return 0;
451 if (unlikely(sig_kernel_stop(signr))) {
453 * Set a marker that we have dequeued a stop signal. Our
454 * caller might release the siglock and then the pending
455 * stop signal it is about to process is no longer in the
456 * pending bitmasks, but must still be cleared by a SIGCONT
457 * (and overruled by a SIGKILL). So those cases clear this
458 * shared flag after we've set it. Note that this flag may
459 * remain set after the signal we return is ignored or
460 * handled. That doesn't matter because its only purpose
461 * is to alert stop-signal processing code when another
462 * processor has come along and cleared the flag.
464 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
468 * Release the siglock to ensure proper locking order
469 * of timer locks outside of siglocks. Note, we leave
470 * irqs disabled here, since the posix-timers code is
471 * about to disable them again anyway.
473 spin_unlock(&tsk->sighand->siglock);
474 do_schedule_next_timer(info);
475 spin_lock(&tsk->sighand->siglock);
477 return signr;
481 * Tell a process that it has a new active signal..
483 * NOTE! we rely on the previous spin_lock to
484 * lock interrupts for us! We can only be called with
485 * "siglock" held, and the local interrupt must
486 * have been disabled when that got acquired!
488 * No need to set need_resched since signal event passing
489 * goes through ->blocked
491 void signal_wake_up(struct task_struct *t, int resume)
493 unsigned int mask;
495 set_tsk_thread_flag(t, TIF_SIGPENDING);
498 * For SIGKILL, we want to wake it up in the stopped/traced/killable
499 * case. We don't check t->state here because there is a race with it
500 * executing another processor and just now entering stopped state.
501 * By using wake_up_state, we ensure the process will wake up and
502 * handle its death signal.
504 mask = TASK_INTERRUPTIBLE;
505 if (resume)
506 mask |= TASK_WAKEKILL;
507 if (!wake_up_state(t, mask))
508 kick_process(t);
512 * Remove signals in mask from the pending set and queue.
513 * Returns 1 if any signals were found.
515 * All callers must be holding the siglock.
517 * This version takes a sigset mask and looks at all signals,
518 * not just those in the first mask word.
520 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522 struct sigqueue *q, *n;
523 sigset_t m;
525 sigandsets(&m, mask, &s->signal);
526 if (sigisemptyset(&m))
527 return 0;
529 signandsets(&s->signal, &s->signal, mask);
530 list_for_each_entry_safe(q, n, &s->list, list) {
531 if (sigismember(mask, q->info.si_signo)) {
532 list_del_init(&q->list);
533 __sigqueue_free(q);
536 return 1;
539 * Remove signals in mask from the pending set and queue.
540 * Returns 1 if any signals were found.
542 * All callers must be holding the siglock.
544 static int rm_from_queue(unsigned long mask, struct sigpending *s)
546 struct sigqueue *q, *n;
548 if (!sigtestsetmask(&s->signal, mask))
549 return 0;
551 sigdelsetmask(&s->signal, mask);
552 list_for_each_entry_safe(q, n, &s->list, list) {
553 if (q->info.si_signo < SIGRTMIN &&
554 (mask & sigmask(q->info.si_signo))) {
555 list_del_init(&q->list);
556 __sigqueue_free(q);
559 return 1;
563 * Bad permissions for sending the signal
565 static int check_kill_permission(int sig, struct siginfo *info,
566 struct task_struct *t)
568 struct pid *sid;
569 int error;
571 if (!valid_signal(sig))
572 return -EINVAL;
574 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
575 return 0;
577 error = audit_signal_info(sig, t); /* Let audit system see the signal */
578 if (error)
579 return error;
581 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
582 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
583 !capable(CAP_KILL)) {
584 switch (sig) {
585 case SIGCONT:
586 sid = task_session(t);
588 * We don't return the error if sid == NULL. The
589 * task was unhashed, the caller must notice this.
591 if (!sid || sid == task_session(current))
592 break;
593 default:
594 return -EPERM;
598 return security_task_kill(t, info, sig, 0);
602 * Handle magic process-wide effects of stop/continue signals. Unlike
603 * the signal actions, these happen immediately at signal-generation
604 * time regardless of blocking, ignoring, or handling. This does the
605 * actual continuing for SIGCONT, but not the actual stopping for stop
606 * signals. The process stop is done as a signal action for SIG_DFL.
608 * Returns true if the signal should be actually delivered, otherwise
609 * it should be dropped.
611 static int prepare_signal(int sig, struct task_struct *p)
613 struct signal_struct *signal = p->signal;
614 struct task_struct *t;
616 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
618 * The process is in the middle of dying, nothing to do.
620 } else if (sig_kernel_stop(sig)) {
622 * This is a stop signal. Remove SIGCONT from all queues.
624 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
625 t = p;
626 do {
627 rm_from_queue(sigmask(SIGCONT), &t->pending);
628 } while_each_thread(p, t);
629 } else if (sig == SIGCONT) {
630 unsigned int why;
632 * Remove all stop signals from all queues,
633 * and wake all threads.
635 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
636 t = p;
637 do {
638 unsigned int state;
639 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
641 * If there is a handler for SIGCONT, we must make
642 * sure that no thread returns to user mode before
643 * we post the signal, in case it was the only
644 * thread eligible to run the signal handler--then
645 * it must not do anything between resuming and
646 * running the handler. With the TIF_SIGPENDING
647 * flag set, the thread will pause and acquire the
648 * siglock that we hold now and until we've queued
649 * the pending signal.
651 * Wake up the stopped thread _after_ setting
652 * TIF_SIGPENDING
654 state = __TASK_STOPPED;
655 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
656 set_tsk_thread_flag(t, TIF_SIGPENDING);
657 state |= TASK_INTERRUPTIBLE;
659 wake_up_state(t, state);
660 } while_each_thread(p, t);
663 * Notify the parent with CLD_CONTINUED if we were stopped.
665 * If we were in the middle of a group stop, we pretend it
666 * was already finished, and then continued. Since SIGCHLD
667 * doesn't queue we report only CLD_STOPPED, as if the next
668 * CLD_CONTINUED was dropped.
670 why = 0;
671 if (signal->flags & SIGNAL_STOP_STOPPED)
672 why |= SIGNAL_CLD_CONTINUED;
673 else if (signal->group_stop_count)
674 why |= SIGNAL_CLD_STOPPED;
676 if (why) {
678 * The first thread which returns from finish_stop()
679 * will take ->siglock, notice SIGNAL_CLD_MASK, and
680 * notify its parent. See get_signal_to_deliver().
682 signal->flags = why | SIGNAL_STOP_CONTINUED;
683 signal->group_stop_count = 0;
684 signal->group_exit_code = 0;
685 } else {
687 * We are not stopped, but there could be a stop
688 * signal in the middle of being processed after
689 * being removed from the queue. Clear that too.
691 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
695 return !sig_ignored(p, sig);
699 * Test if P wants to take SIG. After we've checked all threads with this,
700 * it's equivalent to finding no threads not blocking SIG. Any threads not
701 * blocking SIG were ruled out because they are not running and already
702 * have pending signals. Such threads will dequeue from the shared queue
703 * as soon as they're available, so putting the signal on the shared queue
704 * will be equivalent to sending it to one such thread.
706 static inline int wants_signal(int sig, struct task_struct *p)
708 if (sigismember(&p->blocked, sig))
709 return 0;
710 if (p->flags & PF_EXITING)
711 return 0;
712 if (sig == SIGKILL)
713 return 1;
714 if (task_is_stopped_or_traced(p))
715 return 0;
716 return task_curr(p) || !signal_pending(p);
719 static void complete_signal(int sig, struct task_struct *p, int group)
721 struct signal_struct *signal = p->signal;
722 struct task_struct *t;
725 * Now find a thread we can wake up to take the signal off the queue.
727 * If the main thread wants the signal, it gets first crack.
728 * Probably the least surprising to the average bear.
730 if (wants_signal(sig, p))
731 t = p;
732 else if (!group || thread_group_empty(p))
734 * There is just one thread and it does not need to be woken.
735 * It will dequeue unblocked signals before it runs again.
737 return;
738 else {
740 * Otherwise try to find a suitable thread.
742 t = signal->curr_target;
743 while (!wants_signal(sig, t)) {
744 t = next_thread(t);
745 if (t == signal->curr_target)
747 * No thread needs to be woken.
748 * Any eligible threads will see
749 * the signal in the queue soon.
751 return;
753 signal->curr_target = t;
757 * Found a killable thread. If the signal will be fatal,
758 * then start taking the whole group down immediately.
760 if (sig_fatal(p, sig) &&
761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
762 !sigismember(&t->real_blocked, sig) &&
763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
766 * This signal will be fatal to the whole group.
768 if (!sig_kernel_coredump(sig)) {
770 * Start a group exit and wake everybody up.
771 * This way we don't have other threads
772 * running and doing things after a slower
773 * thread has the fatal signal pending.
775 signal->flags = SIGNAL_GROUP_EXIT;
776 signal->group_exit_code = sig;
777 signal->group_stop_count = 0;
778 t = p;
779 do {
780 sigaddset(&t->pending.signal, SIGKILL);
781 signal_wake_up(t, 1);
782 } while_each_thread(p, t);
783 return;
788 * The signal is already in the shared-pending queue.
789 * Tell the chosen thread to wake up and dequeue it.
791 signal_wake_up(t, sig == SIGKILL);
792 return;
795 static inline int legacy_queue(struct sigpending *signals, int sig)
797 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
800 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
801 int group)
803 struct sigpending *pending;
804 struct sigqueue *q;
806 assert_spin_locked(&t->sighand->siglock);
807 if (!prepare_signal(sig, t))
808 return 0;
810 pending = group ? &t->signal->shared_pending : &t->pending;
812 * Short-circuit ignored signals and support queuing
813 * exactly one non-rt signal, so that we can get more
814 * detailed information about the cause of the signal.
816 if (legacy_queue(pending, sig))
817 return 0;
819 * fast-pathed signals for kernel-internal things like SIGSTOP
820 * or SIGKILL.
822 if (info == SEND_SIG_FORCED)
823 goto out_set;
825 /* Real-time signals must be queued if sent by sigqueue, or
826 some other real-time mechanism. It is implementation
827 defined whether kill() does so. We attempt to do so, on
828 the principle of least surprise, but since kill is not
829 allowed to fail with EAGAIN when low on memory we just
830 make sure at least one signal gets delivered and don't
831 pass on the info struct. */
833 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
834 (is_si_special(info) ||
835 info->si_code >= 0)));
836 if (q) {
837 list_add_tail(&q->list, &pending->list);
838 switch ((unsigned long) info) {
839 case (unsigned long) SEND_SIG_NOINFO:
840 q->info.si_signo = sig;
841 q->info.si_errno = 0;
842 q->info.si_code = SI_USER;
843 q->info.si_pid = task_pid_vnr(current);
844 q->info.si_uid = current->uid;
845 break;
846 case (unsigned long) SEND_SIG_PRIV:
847 q->info.si_signo = sig;
848 q->info.si_errno = 0;
849 q->info.si_code = SI_KERNEL;
850 q->info.si_pid = 0;
851 q->info.si_uid = 0;
852 break;
853 default:
854 copy_siginfo(&q->info, info);
855 break;
857 } else if (!is_si_special(info)) {
858 if (sig >= SIGRTMIN && info->si_code != SI_USER)
860 * Queue overflow, abort. We may abort if the signal was rt
861 * and sent by user using something other than kill().
863 return -EAGAIN;
866 out_set:
867 signalfd_notify(t, sig);
868 sigaddset(&pending->signal, sig);
869 complete_signal(sig, t, group);
870 return 0;
873 int print_fatal_signals;
875 static void print_fatal_signal(struct pt_regs *regs, int signr)
877 printk("%s/%d: potentially unexpected fatal signal %d.\n",
878 current->comm, task_pid_nr(current), signr);
880 #if defined(__i386__) && !defined(__arch_um__)
881 printk("code at %08lx: ", regs->ip);
883 int i;
884 for (i = 0; i < 16; i++) {
885 unsigned char insn;
887 if (get_user(insn, (unsigned char *)(regs->ip + i)))
888 break;
889 printk("%02x ", insn);
892 #endif
893 printk("\n");
894 show_regs(regs);
897 static int __init setup_print_fatal_signals(char *str)
899 get_option (&str, &print_fatal_signals);
901 return 1;
904 __setup("print-fatal-signals=", setup_print_fatal_signals);
907 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
909 return send_signal(sig, info, p, 1);
912 static int
913 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
915 return send_signal(sig, info, t, 0);
919 * Force a signal that the process can't ignore: if necessary
920 * we unblock the signal and change any SIG_IGN to SIG_DFL.
922 * Note: If we unblock the signal, we always reset it to SIG_DFL,
923 * since we do not want to have a signal handler that was blocked
924 * be invoked when user space had explicitly blocked it.
926 * We don't want to have recursive SIGSEGV's etc, for example,
927 * that is why we also clear SIGNAL_UNKILLABLE.
930 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
932 unsigned long int flags;
933 int ret, blocked, ignored;
934 struct k_sigaction *action;
936 spin_lock_irqsave(&t->sighand->siglock, flags);
937 action = &t->sighand->action[sig-1];
938 ignored = action->sa.sa_handler == SIG_IGN;
939 blocked = sigismember(&t->blocked, sig);
940 if (blocked || ignored) {
941 action->sa.sa_handler = SIG_DFL;
942 if (blocked) {
943 sigdelset(&t->blocked, sig);
944 recalc_sigpending_and_wake(t);
947 if (action->sa.sa_handler == SIG_DFL)
948 t->signal->flags &= ~SIGNAL_UNKILLABLE;
949 ret = specific_send_sig_info(sig, info, t);
950 spin_unlock_irqrestore(&t->sighand->siglock, flags);
952 return ret;
955 void
956 force_sig_specific(int sig, struct task_struct *t)
958 force_sig_info(sig, SEND_SIG_FORCED, t);
962 * Nuke all other threads in the group.
964 void zap_other_threads(struct task_struct *p)
966 struct task_struct *t;
968 p->signal->group_stop_count = 0;
970 for (t = next_thread(p); t != p; t = next_thread(t)) {
972 * Don't bother with already dead threads
974 if (t->exit_state)
975 continue;
977 /* SIGKILL will be handled before any pending SIGSTOP */
978 sigaddset(&t->pending.signal, SIGKILL);
979 signal_wake_up(t, 1);
983 int __fatal_signal_pending(struct task_struct *tsk)
985 return sigismember(&tsk->pending.signal, SIGKILL);
987 EXPORT_SYMBOL(__fatal_signal_pending);
989 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
991 struct sighand_struct *sighand;
993 rcu_read_lock();
994 for (;;) {
995 sighand = rcu_dereference(tsk->sighand);
996 if (unlikely(sighand == NULL))
997 break;
999 spin_lock_irqsave(&sighand->siglock, *flags);
1000 if (likely(sighand == tsk->sighand))
1001 break;
1002 spin_unlock_irqrestore(&sighand->siglock, *flags);
1004 rcu_read_unlock();
1006 return sighand;
1009 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1011 unsigned long flags;
1012 int ret;
1014 ret = check_kill_permission(sig, info, p);
1016 if (!ret && sig) {
1017 ret = -ESRCH;
1018 if (lock_task_sighand(p, &flags)) {
1019 ret = __group_send_sig_info(sig, info, p);
1020 unlock_task_sighand(p, &flags);
1024 return ret;
1028 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1029 * control characters do (^C, ^Z etc)
1032 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1034 struct task_struct *p = NULL;
1035 int retval, success;
1037 success = 0;
1038 retval = -ESRCH;
1039 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1040 int err = group_send_sig_info(sig, info, p);
1041 success |= !err;
1042 retval = err;
1043 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1044 return success ? 0 : retval;
1047 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1049 int error = -ESRCH;
1050 struct task_struct *p;
1052 rcu_read_lock();
1053 retry:
1054 p = pid_task(pid, PIDTYPE_PID);
1055 if (p) {
1056 error = group_send_sig_info(sig, info, p);
1057 if (unlikely(error == -ESRCH))
1059 * The task was unhashed in between, try again.
1060 * If it is dead, pid_task() will return NULL,
1061 * if we race with de_thread() it will find the
1062 * new leader.
1064 goto retry;
1066 rcu_read_unlock();
1068 return error;
1072 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1074 int error;
1075 rcu_read_lock();
1076 error = kill_pid_info(sig, info, find_vpid(pid));
1077 rcu_read_unlock();
1078 return error;
1081 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1082 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1083 uid_t uid, uid_t euid, u32 secid)
1085 int ret = -EINVAL;
1086 struct task_struct *p;
1088 if (!valid_signal(sig))
1089 return ret;
1091 read_lock(&tasklist_lock);
1092 p = pid_task(pid, PIDTYPE_PID);
1093 if (!p) {
1094 ret = -ESRCH;
1095 goto out_unlock;
1097 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1098 && (euid != p->suid) && (euid != p->uid)
1099 && (uid != p->suid) && (uid != p->uid)) {
1100 ret = -EPERM;
1101 goto out_unlock;
1103 ret = security_task_kill(p, info, sig, secid);
1104 if (ret)
1105 goto out_unlock;
1106 if (sig && p->sighand) {
1107 unsigned long flags;
1108 spin_lock_irqsave(&p->sighand->siglock, flags);
1109 ret = __group_send_sig_info(sig, info, p);
1110 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1112 out_unlock:
1113 read_unlock(&tasklist_lock);
1114 return ret;
1116 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1119 * kill_something_info() interprets pid in interesting ways just like kill(2).
1121 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1122 * is probably wrong. Should make it like BSD or SYSV.
1125 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1127 int ret;
1129 if (pid > 0) {
1130 rcu_read_lock();
1131 ret = kill_pid_info(sig, info, find_vpid(pid));
1132 rcu_read_unlock();
1133 return ret;
1136 read_lock(&tasklist_lock);
1137 if (pid != -1) {
1138 ret = __kill_pgrp_info(sig, info,
1139 pid ? find_vpid(-pid) : task_pgrp(current));
1140 } else {
1141 int retval = 0, count = 0;
1142 struct task_struct * p;
1144 for_each_process(p) {
1145 if (task_pid_vnr(p) > 1 &&
1146 !same_thread_group(p, current)) {
1147 int err = group_send_sig_info(sig, info, p);
1148 ++count;
1149 if (err != -EPERM)
1150 retval = err;
1153 ret = count ? retval : -ESRCH;
1155 read_unlock(&tasklist_lock);
1157 return ret;
1161 * These are for backward compatibility with the rest of the kernel source.
1165 * The caller must ensure the task can't exit.
1168 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1170 int ret;
1171 unsigned long flags;
1174 * Make sure legacy kernel users don't send in bad values
1175 * (normal paths check this in check_kill_permission).
1177 if (!valid_signal(sig))
1178 return -EINVAL;
1180 spin_lock_irqsave(&p->sighand->siglock, flags);
1181 ret = specific_send_sig_info(sig, info, p);
1182 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1183 return ret;
1186 #define __si_special(priv) \
1187 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1190 send_sig(int sig, struct task_struct *p, int priv)
1192 return send_sig_info(sig, __si_special(priv), p);
1195 void
1196 force_sig(int sig, struct task_struct *p)
1198 force_sig_info(sig, SEND_SIG_PRIV, p);
1202 * When things go south during signal handling, we
1203 * will force a SIGSEGV. And if the signal that caused
1204 * the problem was already a SIGSEGV, we'll want to
1205 * make sure we don't even try to deliver the signal..
1208 force_sigsegv(int sig, struct task_struct *p)
1210 if (sig == SIGSEGV) {
1211 unsigned long flags;
1212 spin_lock_irqsave(&p->sighand->siglock, flags);
1213 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1214 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1216 force_sig(SIGSEGV, p);
1217 return 0;
1220 int kill_pgrp(struct pid *pid, int sig, int priv)
1222 int ret;
1224 read_lock(&tasklist_lock);
1225 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1226 read_unlock(&tasklist_lock);
1228 return ret;
1230 EXPORT_SYMBOL(kill_pgrp);
1232 int kill_pid(struct pid *pid, int sig, int priv)
1234 return kill_pid_info(sig, __si_special(priv), pid);
1236 EXPORT_SYMBOL(kill_pid);
1239 * These functions support sending signals using preallocated sigqueue
1240 * structures. This is needed "because realtime applications cannot
1241 * afford to lose notifications of asynchronous events, like timer
1242 * expirations or I/O completions". In the case of Posix Timers
1243 * we allocate the sigqueue structure from the timer_create. If this
1244 * allocation fails we are able to report the failure to the application
1245 * with an EAGAIN error.
1248 struct sigqueue *sigqueue_alloc(void)
1250 struct sigqueue *q;
1252 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1253 q->flags |= SIGQUEUE_PREALLOC;
1254 return(q);
1257 void sigqueue_free(struct sigqueue *q)
1259 unsigned long flags;
1260 spinlock_t *lock = &current->sighand->siglock;
1262 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1264 * We must hold ->siglock while testing q->list
1265 * to serialize with collect_signal() or with
1266 * __exit_signal()->flush_sigqueue().
1268 spin_lock_irqsave(lock, flags);
1269 q->flags &= ~SIGQUEUE_PREALLOC;
1271 * If it is queued it will be freed when dequeued,
1272 * like the "regular" sigqueue.
1274 if (!list_empty(&q->list))
1275 q = NULL;
1276 spin_unlock_irqrestore(lock, flags);
1278 if (q)
1279 __sigqueue_free(q);
1282 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1284 int sig = q->info.si_signo;
1285 struct sigpending *pending;
1286 unsigned long flags;
1287 int ret;
1289 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1291 ret = -1;
1292 if (!likely(lock_task_sighand(t, &flags)))
1293 goto ret;
1295 ret = 1; /* the signal is ignored */
1296 if (!prepare_signal(sig, t))
1297 goto out;
1299 ret = 0;
1300 if (unlikely(!list_empty(&q->list))) {
1302 * If an SI_TIMER entry is already queue just increment
1303 * the overrun count.
1305 BUG_ON(q->info.si_code != SI_TIMER);
1306 q->info.si_overrun++;
1307 goto out;
1309 q->info.si_overrun = 0;
1311 signalfd_notify(t, sig);
1312 pending = group ? &t->signal->shared_pending : &t->pending;
1313 list_add_tail(&q->list, &pending->list);
1314 sigaddset(&pending->signal, sig);
1315 complete_signal(sig, t, group);
1316 out:
1317 unlock_task_sighand(t, &flags);
1318 ret:
1319 return ret;
1323 * Wake up any threads in the parent blocked in wait* syscalls.
1325 static inline void __wake_up_parent(struct task_struct *p,
1326 struct task_struct *parent)
1328 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1332 * Let a parent know about the death of a child.
1333 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1335 * Returns -1 if our parent ignored us and so we've switched to
1336 * self-reaping, or else @sig.
1338 int do_notify_parent(struct task_struct *tsk, int sig)
1340 struct siginfo info;
1341 unsigned long flags;
1342 struct sighand_struct *psig;
1343 int ret = sig;
1345 BUG_ON(sig == -1);
1347 /* do_notify_parent_cldstop should have been called instead. */
1348 BUG_ON(task_is_stopped_or_traced(tsk));
1350 BUG_ON(!tsk->ptrace &&
1351 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1353 info.si_signo = sig;
1354 info.si_errno = 0;
1356 * we are under tasklist_lock here so our parent is tied to
1357 * us and cannot exit and release its namespace.
1359 * the only it can is to switch its nsproxy with sys_unshare,
1360 * bu uncharing pid namespaces is not allowed, so we'll always
1361 * see relevant namespace
1363 * write_lock() currently calls preempt_disable() which is the
1364 * same as rcu_read_lock(), but according to Oleg, this is not
1365 * correct to rely on this
1367 rcu_read_lock();
1368 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1369 rcu_read_unlock();
1371 info.si_uid = tsk->uid;
1373 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1374 tsk->signal->utime));
1375 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1376 tsk->signal->stime));
1378 info.si_status = tsk->exit_code & 0x7f;
1379 if (tsk->exit_code & 0x80)
1380 info.si_code = CLD_DUMPED;
1381 else if (tsk->exit_code & 0x7f)
1382 info.si_code = CLD_KILLED;
1383 else {
1384 info.si_code = CLD_EXITED;
1385 info.si_status = tsk->exit_code >> 8;
1388 psig = tsk->parent->sighand;
1389 spin_lock_irqsave(&psig->siglock, flags);
1390 if (!tsk->ptrace && sig == SIGCHLD &&
1391 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1392 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1394 * We are exiting and our parent doesn't care. POSIX.1
1395 * defines special semantics for setting SIGCHLD to SIG_IGN
1396 * or setting the SA_NOCLDWAIT flag: we should be reaped
1397 * automatically and not left for our parent's wait4 call.
1398 * Rather than having the parent do it as a magic kind of
1399 * signal handler, we just set this to tell do_exit that we
1400 * can be cleaned up without becoming a zombie. Note that
1401 * we still call __wake_up_parent in this case, because a
1402 * blocked sys_wait4 might now return -ECHILD.
1404 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1405 * is implementation-defined: we do (if you don't want
1406 * it, just use SIG_IGN instead).
1408 ret = tsk->exit_signal = -1;
1409 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1410 sig = -1;
1412 if (valid_signal(sig) && sig > 0)
1413 __group_send_sig_info(sig, &info, tsk->parent);
1414 __wake_up_parent(tsk, tsk->parent);
1415 spin_unlock_irqrestore(&psig->siglock, flags);
1417 return ret;
1420 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1422 struct siginfo info;
1423 unsigned long flags;
1424 struct task_struct *parent;
1425 struct sighand_struct *sighand;
1427 if (tsk->ptrace & PT_PTRACED)
1428 parent = tsk->parent;
1429 else {
1430 tsk = tsk->group_leader;
1431 parent = tsk->real_parent;
1434 info.si_signo = SIGCHLD;
1435 info.si_errno = 0;
1437 * see comment in do_notify_parent() abot the following 3 lines
1439 rcu_read_lock();
1440 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1441 rcu_read_unlock();
1443 info.si_uid = tsk->uid;
1445 info.si_utime = cputime_to_clock_t(tsk->utime);
1446 info.si_stime = cputime_to_clock_t(tsk->stime);
1448 info.si_code = why;
1449 switch (why) {
1450 case CLD_CONTINUED:
1451 info.si_status = SIGCONT;
1452 break;
1453 case CLD_STOPPED:
1454 info.si_status = tsk->signal->group_exit_code & 0x7f;
1455 break;
1456 case CLD_TRAPPED:
1457 info.si_status = tsk->exit_code & 0x7f;
1458 break;
1459 default:
1460 BUG();
1463 sighand = parent->sighand;
1464 spin_lock_irqsave(&sighand->siglock, flags);
1465 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1466 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1467 __group_send_sig_info(SIGCHLD, &info, parent);
1469 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1471 __wake_up_parent(tsk, parent);
1472 spin_unlock_irqrestore(&sighand->siglock, flags);
1475 static inline int may_ptrace_stop(void)
1477 if (!likely(current->ptrace & PT_PTRACED))
1478 return 0;
1480 * Are we in the middle of do_coredump?
1481 * If so and our tracer is also part of the coredump stopping
1482 * is a deadlock situation, and pointless because our tracer
1483 * is dead so don't allow us to stop.
1484 * If SIGKILL was already sent before the caller unlocked
1485 * ->siglock we must see ->core_state != NULL. Otherwise it
1486 * is safe to enter schedule().
1488 if (unlikely(current->mm->core_state) &&
1489 unlikely(current->mm == current->parent->mm))
1490 return 0;
1492 return 1;
1496 * Return nonzero if there is a SIGKILL that should be waking us up.
1497 * Called with the siglock held.
1499 static int sigkill_pending(struct task_struct *tsk)
1501 return sigismember(&tsk->pending.signal, SIGKILL) ||
1502 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1506 * This must be called with current->sighand->siglock held.
1508 * This should be the path for all ptrace stops.
1509 * We always set current->last_siginfo while stopped here.
1510 * That makes it a way to test a stopped process for
1511 * being ptrace-stopped vs being job-control-stopped.
1513 * If we actually decide not to stop at all because the tracer
1514 * is gone, we keep current->exit_code unless clear_code.
1516 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1518 if (arch_ptrace_stop_needed(exit_code, info)) {
1520 * The arch code has something special to do before a
1521 * ptrace stop. This is allowed to block, e.g. for faults
1522 * on user stack pages. We can't keep the siglock while
1523 * calling arch_ptrace_stop, so we must release it now.
1524 * To preserve proper semantics, we must do this before
1525 * any signal bookkeeping like checking group_stop_count.
1526 * Meanwhile, a SIGKILL could come in before we retake the
1527 * siglock. That must prevent us from sleeping in TASK_TRACED.
1528 * So after regaining the lock, we must check for SIGKILL.
1530 spin_unlock_irq(&current->sighand->siglock);
1531 arch_ptrace_stop(exit_code, info);
1532 spin_lock_irq(&current->sighand->siglock);
1533 if (sigkill_pending(current))
1534 return;
1538 * If there is a group stop in progress,
1539 * we must participate in the bookkeeping.
1541 if (current->signal->group_stop_count > 0)
1542 --current->signal->group_stop_count;
1544 current->last_siginfo = info;
1545 current->exit_code = exit_code;
1547 /* Let the debugger run. */
1548 __set_current_state(TASK_TRACED);
1549 spin_unlock_irq(&current->sighand->siglock);
1550 read_lock(&tasklist_lock);
1551 if (may_ptrace_stop()) {
1552 do_notify_parent_cldstop(current, CLD_TRAPPED);
1554 * Don't want to allow preemption here, because
1555 * sys_ptrace() needs this task to be inactive.
1557 * XXX: implement read_unlock_no_resched().
1559 preempt_disable();
1560 read_unlock(&tasklist_lock);
1561 preempt_enable_no_resched();
1562 schedule();
1563 } else {
1565 * By the time we got the lock, our tracer went away.
1566 * Don't drop the lock yet, another tracer may come.
1568 __set_current_state(TASK_RUNNING);
1569 if (clear_code)
1570 current->exit_code = 0;
1571 read_unlock(&tasklist_lock);
1575 * While in TASK_TRACED, we were considered "frozen enough".
1576 * Now that we woke up, it's crucial if we're supposed to be
1577 * frozen that we freeze now before running anything substantial.
1579 try_to_freeze();
1582 * We are back. Now reacquire the siglock before touching
1583 * last_siginfo, so that we are sure to have synchronized with
1584 * any signal-sending on another CPU that wants to examine it.
1586 spin_lock_irq(&current->sighand->siglock);
1587 current->last_siginfo = NULL;
1590 * Queued signals ignored us while we were stopped for tracing.
1591 * So check for any that we should take before resuming user mode.
1592 * This sets TIF_SIGPENDING, but never clears it.
1594 recalc_sigpending_tsk(current);
1597 void ptrace_notify(int exit_code)
1599 siginfo_t info;
1601 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1603 memset(&info, 0, sizeof info);
1604 info.si_signo = SIGTRAP;
1605 info.si_code = exit_code;
1606 info.si_pid = task_pid_vnr(current);
1607 info.si_uid = current->uid;
1609 /* Let the debugger run. */
1610 spin_lock_irq(&current->sighand->siglock);
1611 ptrace_stop(exit_code, 1, &info);
1612 spin_unlock_irq(&current->sighand->siglock);
1615 static void
1616 finish_stop(int stop_count)
1619 * If there are no other threads in the group, or if there is
1620 * a group stop in progress and we are the last to stop,
1621 * report to the parent. When ptraced, every thread reports itself.
1623 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1624 read_lock(&tasklist_lock);
1625 do_notify_parent_cldstop(current, CLD_STOPPED);
1626 read_unlock(&tasklist_lock);
1629 do {
1630 schedule();
1631 } while (try_to_freeze());
1633 * Now we don't run again until continued.
1635 current->exit_code = 0;
1639 * This performs the stopping for SIGSTOP and other stop signals.
1640 * We have to stop all threads in the thread group.
1641 * Returns nonzero if we've actually stopped and released the siglock.
1642 * Returns zero if we didn't stop and still hold the siglock.
1644 static int do_signal_stop(int signr)
1646 struct signal_struct *sig = current->signal;
1647 int stop_count;
1649 if (sig->group_stop_count > 0) {
1651 * There is a group stop in progress. We don't need to
1652 * start another one.
1654 stop_count = --sig->group_stop_count;
1655 } else {
1656 struct task_struct *t;
1658 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1659 unlikely(signal_group_exit(sig)))
1660 return 0;
1662 * There is no group stop already in progress.
1663 * We must initiate one now.
1665 sig->group_exit_code = signr;
1667 stop_count = 0;
1668 for (t = next_thread(current); t != current; t = next_thread(t))
1670 * Setting state to TASK_STOPPED for a group
1671 * stop is always done with the siglock held,
1672 * so this check has no races.
1674 if (!(t->flags & PF_EXITING) &&
1675 !task_is_stopped_or_traced(t)) {
1676 stop_count++;
1677 signal_wake_up(t, 0);
1679 sig->group_stop_count = stop_count;
1682 if (stop_count == 0)
1683 sig->flags = SIGNAL_STOP_STOPPED;
1684 current->exit_code = sig->group_exit_code;
1685 __set_current_state(TASK_STOPPED);
1687 spin_unlock_irq(&current->sighand->siglock);
1688 finish_stop(stop_count);
1689 return 1;
1692 static int ptrace_signal(int signr, siginfo_t *info,
1693 struct pt_regs *regs, void *cookie)
1695 if (!(current->ptrace & PT_PTRACED))
1696 return signr;
1698 ptrace_signal_deliver(regs, cookie);
1700 /* Let the debugger run. */
1701 ptrace_stop(signr, 0, info);
1703 /* We're back. Did the debugger cancel the sig? */
1704 signr = current->exit_code;
1705 if (signr == 0)
1706 return signr;
1708 current->exit_code = 0;
1710 /* Update the siginfo structure if the signal has
1711 changed. If the debugger wanted something
1712 specific in the siginfo structure then it should
1713 have updated *info via PTRACE_SETSIGINFO. */
1714 if (signr != info->si_signo) {
1715 info->si_signo = signr;
1716 info->si_errno = 0;
1717 info->si_code = SI_USER;
1718 info->si_pid = task_pid_vnr(current->parent);
1719 info->si_uid = current->parent->uid;
1722 /* If the (new) signal is now blocked, requeue it. */
1723 if (sigismember(&current->blocked, signr)) {
1724 specific_send_sig_info(signr, info, current);
1725 signr = 0;
1728 return signr;
1731 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1732 struct pt_regs *regs, void *cookie)
1734 struct sighand_struct *sighand = current->sighand;
1735 struct signal_struct *signal = current->signal;
1736 int signr;
1738 relock:
1740 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1741 * While in TASK_STOPPED, we were considered "frozen enough".
1742 * Now that we woke up, it's crucial if we're supposed to be
1743 * frozen that we freeze now before running anything substantial.
1745 try_to_freeze();
1747 spin_lock_irq(&sighand->siglock);
1749 * Every stopped thread goes here after wakeup. Check to see if
1750 * we should notify the parent, prepare_signal(SIGCONT) encodes
1751 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1753 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1754 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1755 ? CLD_CONTINUED : CLD_STOPPED;
1756 signal->flags &= ~SIGNAL_CLD_MASK;
1757 spin_unlock_irq(&sighand->siglock);
1759 if (unlikely(!tracehook_notify_jctl(1, why)))
1760 goto relock;
1762 read_lock(&tasklist_lock);
1763 do_notify_parent_cldstop(current->group_leader, why);
1764 read_unlock(&tasklist_lock);
1765 goto relock;
1768 for (;;) {
1769 struct k_sigaction *ka;
1771 if (unlikely(signal->group_stop_count > 0) &&
1772 do_signal_stop(0))
1773 goto relock;
1776 * Tracing can induce an artifical signal and choose sigaction.
1777 * The return value in @signr determines the default action,
1778 * but @info->si_signo is the signal number we will report.
1780 signr = tracehook_get_signal(current, regs, info, return_ka);
1781 if (unlikely(signr < 0))
1782 goto relock;
1783 if (unlikely(signr != 0))
1784 ka = return_ka;
1785 else {
1786 signr = dequeue_signal(current, &current->blocked,
1787 info);
1789 if (!signr)
1790 break; /* will return 0 */
1792 if (signr != SIGKILL) {
1793 signr = ptrace_signal(signr, info,
1794 regs, cookie);
1795 if (!signr)
1796 continue;
1799 ka = &sighand->action[signr-1];
1802 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1803 continue;
1804 if (ka->sa.sa_handler != SIG_DFL) {
1805 /* Run the handler. */
1806 *return_ka = *ka;
1808 if (ka->sa.sa_flags & SA_ONESHOT)
1809 ka->sa.sa_handler = SIG_DFL;
1811 break; /* will return non-zero "signr" value */
1815 * Now we are doing the default action for this signal.
1817 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1818 continue;
1821 * Global init gets no signals it doesn't want.
1823 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1824 !signal_group_exit(signal))
1825 continue;
1827 if (sig_kernel_stop(signr)) {
1829 * The default action is to stop all threads in
1830 * the thread group. The job control signals
1831 * do nothing in an orphaned pgrp, but SIGSTOP
1832 * always works. Note that siglock needs to be
1833 * dropped during the call to is_orphaned_pgrp()
1834 * because of lock ordering with tasklist_lock.
1835 * This allows an intervening SIGCONT to be posted.
1836 * We need to check for that and bail out if necessary.
1838 if (signr != SIGSTOP) {
1839 spin_unlock_irq(&sighand->siglock);
1841 /* signals can be posted during this window */
1843 if (is_current_pgrp_orphaned())
1844 goto relock;
1846 spin_lock_irq(&sighand->siglock);
1849 if (likely(do_signal_stop(info->si_signo))) {
1850 /* It released the siglock. */
1851 goto relock;
1855 * We didn't actually stop, due to a race
1856 * with SIGCONT or something like that.
1858 continue;
1861 spin_unlock_irq(&sighand->siglock);
1864 * Anything else is fatal, maybe with a core dump.
1866 current->flags |= PF_SIGNALED;
1868 if (sig_kernel_coredump(signr)) {
1869 if (print_fatal_signals)
1870 print_fatal_signal(regs, info->si_signo);
1872 * If it was able to dump core, this kills all
1873 * other threads in the group and synchronizes with
1874 * their demise. If we lost the race with another
1875 * thread getting here, it set group_exit_code
1876 * first and our do_group_exit call below will use
1877 * that value and ignore the one we pass it.
1879 do_coredump(info->si_signo, info->si_signo, regs);
1883 * Death signals, no core dump.
1885 do_group_exit(info->si_signo);
1886 /* NOTREACHED */
1888 spin_unlock_irq(&sighand->siglock);
1889 return signr;
1892 void exit_signals(struct task_struct *tsk)
1894 int group_stop = 0;
1895 struct task_struct *t;
1897 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1898 tsk->flags |= PF_EXITING;
1899 return;
1902 spin_lock_irq(&tsk->sighand->siglock);
1904 * From now this task is not visible for group-wide signals,
1905 * see wants_signal(), do_signal_stop().
1907 tsk->flags |= PF_EXITING;
1908 if (!signal_pending(tsk))
1909 goto out;
1911 /* It could be that __group_complete_signal() choose us to
1912 * notify about group-wide signal. Another thread should be
1913 * woken now to take the signal since we will not.
1915 for (t = tsk; (t = next_thread(t)) != tsk; )
1916 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1917 recalc_sigpending_and_wake(t);
1919 if (unlikely(tsk->signal->group_stop_count) &&
1920 !--tsk->signal->group_stop_count) {
1921 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1922 group_stop = 1;
1924 out:
1925 spin_unlock_irq(&tsk->sighand->siglock);
1927 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1928 read_lock(&tasklist_lock);
1929 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1930 read_unlock(&tasklist_lock);
1934 EXPORT_SYMBOL(recalc_sigpending);
1935 EXPORT_SYMBOL_GPL(dequeue_signal);
1936 EXPORT_SYMBOL(flush_signals);
1937 EXPORT_SYMBOL(force_sig);
1938 EXPORT_SYMBOL(send_sig);
1939 EXPORT_SYMBOL(send_sig_info);
1940 EXPORT_SYMBOL(sigprocmask);
1941 EXPORT_SYMBOL(block_all_signals);
1942 EXPORT_SYMBOL(unblock_all_signals);
1946 * System call entry points.
1949 SYSCALL_DEFINE0(restart_syscall)
1951 struct restart_block *restart = &current_thread_info()->restart_block;
1952 return restart->fn(restart);
1955 long do_no_restart_syscall(struct restart_block *param)
1957 return -EINTR;
1961 * We don't need to get the kernel lock - this is all local to this
1962 * particular thread.. (and that's good, because this is _heavily_
1963 * used by various programs)
1967 * This is also useful for kernel threads that want to temporarily
1968 * (or permanently) block certain signals.
1970 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1971 * interface happily blocks "unblockable" signals like SIGKILL
1972 * and friends.
1974 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1976 int error;
1978 spin_lock_irq(&current->sighand->siglock);
1979 if (oldset)
1980 *oldset = current->blocked;
1982 error = 0;
1983 switch (how) {
1984 case SIG_BLOCK:
1985 sigorsets(&current->blocked, &current->blocked, set);
1986 break;
1987 case SIG_UNBLOCK:
1988 signandsets(&current->blocked, &current->blocked, set);
1989 break;
1990 case SIG_SETMASK:
1991 current->blocked = *set;
1992 break;
1993 default:
1994 error = -EINVAL;
1996 recalc_sigpending();
1997 spin_unlock_irq(&current->sighand->siglock);
1999 return error;
2002 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2003 sigset_t __user *, oset, size_t, sigsetsize)
2005 int error = -EINVAL;
2006 sigset_t old_set, new_set;
2008 /* XXX: Don't preclude handling different sized sigset_t's. */
2009 if (sigsetsize != sizeof(sigset_t))
2010 goto out;
2012 if (set) {
2013 error = -EFAULT;
2014 if (copy_from_user(&new_set, set, sizeof(*set)))
2015 goto out;
2016 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2018 error = sigprocmask(how, &new_set, &old_set);
2019 if (error)
2020 goto out;
2021 if (oset)
2022 goto set_old;
2023 } else if (oset) {
2024 spin_lock_irq(&current->sighand->siglock);
2025 old_set = current->blocked;
2026 spin_unlock_irq(&current->sighand->siglock);
2028 set_old:
2029 error = -EFAULT;
2030 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2031 goto out;
2033 error = 0;
2034 out:
2035 return error;
2038 long do_sigpending(void __user *set, unsigned long sigsetsize)
2040 long error = -EINVAL;
2041 sigset_t pending;
2043 if (sigsetsize > sizeof(sigset_t))
2044 goto out;
2046 spin_lock_irq(&current->sighand->siglock);
2047 sigorsets(&pending, &current->pending.signal,
2048 &current->signal->shared_pending.signal);
2049 spin_unlock_irq(&current->sighand->siglock);
2051 /* Outside the lock because only this thread touches it. */
2052 sigandsets(&pending, &current->blocked, &pending);
2054 error = -EFAULT;
2055 if (!copy_to_user(set, &pending, sigsetsize))
2056 error = 0;
2058 out:
2059 return error;
2062 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2064 return do_sigpending(set, sigsetsize);
2067 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2069 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2071 int err;
2073 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2074 return -EFAULT;
2075 if (from->si_code < 0)
2076 return __copy_to_user(to, from, sizeof(siginfo_t))
2077 ? -EFAULT : 0;
2079 * If you change siginfo_t structure, please be sure
2080 * this code is fixed accordingly.
2081 * Please remember to update the signalfd_copyinfo() function
2082 * inside fs/signalfd.c too, in case siginfo_t changes.
2083 * It should never copy any pad contained in the structure
2084 * to avoid security leaks, but must copy the generic
2085 * 3 ints plus the relevant union member.
2087 err = __put_user(from->si_signo, &to->si_signo);
2088 err |= __put_user(from->si_errno, &to->si_errno);
2089 err |= __put_user((short)from->si_code, &to->si_code);
2090 switch (from->si_code & __SI_MASK) {
2091 case __SI_KILL:
2092 err |= __put_user(from->si_pid, &to->si_pid);
2093 err |= __put_user(from->si_uid, &to->si_uid);
2094 break;
2095 case __SI_TIMER:
2096 err |= __put_user(from->si_tid, &to->si_tid);
2097 err |= __put_user(from->si_overrun, &to->si_overrun);
2098 err |= __put_user(from->si_ptr, &to->si_ptr);
2099 break;
2100 case __SI_POLL:
2101 err |= __put_user(from->si_band, &to->si_band);
2102 err |= __put_user(from->si_fd, &to->si_fd);
2103 break;
2104 case __SI_FAULT:
2105 err |= __put_user(from->si_addr, &to->si_addr);
2106 #ifdef __ARCH_SI_TRAPNO
2107 err |= __put_user(from->si_trapno, &to->si_trapno);
2108 #endif
2109 break;
2110 case __SI_CHLD:
2111 err |= __put_user(from->si_pid, &to->si_pid);
2112 err |= __put_user(from->si_uid, &to->si_uid);
2113 err |= __put_user(from->si_status, &to->si_status);
2114 err |= __put_user(from->si_utime, &to->si_utime);
2115 err |= __put_user(from->si_stime, &to->si_stime);
2116 break;
2117 case __SI_RT: /* This is not generated by the kernel as of now. */
2118 case __SI_MESGQ: /* But this is */
2119 err |= __put_user(from->si_pid, &to->si_pid);
2120 err |= __put_user(from->si_uid, &to->si_uid);
2121 err |= __put_user(from->si_ptr, &to->si_ptr);
2122 break;
2123 default: /* this is just in case for now ... */
2124 err |= __put_user(from->si_pid, &to->si_pid);
2125 err |= __put_user(from->si_uid, &to->si_uid);
2126 break;
2128 return err;
2131 #endif
2133 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2134 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2135 size_t, sigsetsize)
2137 int ret, sig;
2138 sigset_t these;
2139 struct timespec ts;
2140 siginfo_t info;
2141 long timeout = 0;
2143 /* XXX: Don't preclude handling different sized sigset_t's. */
2144 if (sigsetsize != sizeof(sigset_t))
2145 return -EINVAL;
2147 if (copy_from_user(&these, uthese, sizeof(these)))
2148 return -EFAULT;
2151 * Invert the set of allowed signals to get those we
2152 * want to block.
2154 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2155 signotset(&these);
2157 if (uts) {
2158 if (copy_from_user(&ts, uts, sizeof(ts)))
2159 return -EFAULT;
2160 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2161 || ts.tv_sec < 0)
2162 return -EINVAL;
2165 spin_lock_irq(&current->sighand->siglock);
2166 sig = dequeue_signal(current, &these, &info);
2167 if (!sig) {
2168 timeout = MAX_SCHEDULE_TIMEOUT;
2169 if (uts)
2170 timeout = (timespec_to_jiffies(&ts)
2171 + (ts.tv_sec || ts.tv_nsec));
2173 if (timeout) {
2174 /* None ready -- temporarily unblock those we're
2175 * interested while we are sleeping in so that we'll
2176 * be awakened when they arrive. */
2177 current->real_blocked = current->blocked;
2178 sigandsets(&current->blocked, &current->blocked, &these);
2179 recalc_sigpending();
2180 spin_unlock_irq(&current->sighand->siglock);
2182 timeout = schedule_timeout_interruptible(timeout);
2184 spin_lock_irq(&current->sighand->siglock);
2185 sig = dequeue_signal(current, &these, &info);
2186 current->blocked = current->real_blocked;
2187 siginitset(&current->real_blocked, 0);
2188 recalc_sigpending();
2191 spin_unlock_irq(&current->sighand->siglock);
2193 if (sig) {
2194 ret = sig;
2195 if (uinfo) {
2196 if (copy_siginfo_to_user(uinfo, &info))
2197 ret = -EFAULT;
2199 } else {
2200 ret = -EAGAIN;
2201 if (timeout)
2202 ret = -EINTR;
2205 return ret;
2208 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2210 struct siginfo info;
2212 info.si_signo = sig;
2213 info.si_errno = 0;
2214 info.si_code = SI_USER;
2215 info.si_pid = task_tgid_vnr(current);
2216 info.si_uid = current->uid;
2218 return kill_something_info(sig, &info, pid);
2221 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2223 int error;
2224 struct siginfo info;
2225 struct task_struct *p;
2226 unsigned long flags;
2228 error = -ESRCH;
2229 info.si_signo = sig;
2230 info.si_errno = 0;
2231 info.si_code = SI_TKILL;
2232 info.si_pid = task_tgid_vnr(current);
2233 info.si_uid = current->uid;
2235 rcu_read_lock();
2236 p = find_task_by_vpid(pid);
2237 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2238 error = check_kill_permission(sig, &info, p);
2240 * The null signal is a permissions and process existence
2241 * probe. No signal is actually delivered.
2243 * If lock_task_sighand() fails we pretend the task dies
2244 * after receiving the signal. The window is tiny, and the
2245 * signal is private anyway.
2247 if (!error && sig && lock_task_sighand(p, &flags)) {
2248 error = specific_send_sig_info(sig, &info, p);
2249 unlock_task_sighand(p, &flags);
2252 rcu_read_unlock();
2254 return error;
2258 * sys_tgkill - send signal to one specific thread
2259 * @tgid: the thread group ID of the thread
2260 * @pid: the PID of the thread
2261 * @sig: signal to be sent
2263 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2264 * exists but it's not belonging to the target process anymore. This
2265 * method solves the problem of threads exiting and PIDs getting reused.
2267 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2269 /* This is only valid for single tasks */
2270 if (pid <= 0 || tgid <= 0)
2271 return -EINVAL;
2273 return do_tkill(tgid, pid, sig);
2277 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2279 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2281 /* This is only valid for single tasks */
2282 if (pid <= 0)
2283 return -EINVAL;
2285 return do_tkill(0, pid, sig);
2288 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2289 siginfo_t __user *, uinfo)
2291 siginfo_t info;
2293 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2294 return -EFAULT;
2296 /* Not even root can pretend to send signals from the kernel.
2297 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2299 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2300 /* We used to allow any < 0 si_code */
2301 WARN_ON_ONCE(info.si_code < 0);
2302 return -EPERM;
2304 info.si_signo = sig;
2306 /* POSIX.1b doesn't mention process groups. */
2307 return kill_proc_info(sig, &info, pid);
2310 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2312 struct task_struct *t = current;
2313 struct k_sigaction *k;
2314 sigset_t mask;
2316 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2317 return -EINVAL;
2319 k = &t->sighand->action[sig-1];
2321 spin_lock_irq(&current->sighand->siglock);
2322 if (oact)
2323 *oact = *k;
2325 if (act) {
2326 sigdelsetmask(&act->sa.sa_mask,
2327 sigmask(SIGKILL) | sigmask(SIGSTOP));
2328 *k = *act;
2330 * POSIX 3.3.1.3:
2331 * "Setting a signal action to SIG_IGN for a signal that is
2332 * pending shall cause the pending signal to be discarded,
2333 * whether or not it is blocked."
2335 * "Setting a signal action to SIG_DFL for a signal that is
2336 * pending and whose default action is to ignore the signal
2337 * (for example, SIGCHLD), shall cause the pending signal to
2338 * be discarded, whether or not it is blocked"
2340 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2341 sigemptyset(&mask);
2342 sigaddset(&mask, sig);
2343 rm_from_queue_full(&mask, &t->signal->shared_pending);
2344 do {
2345 rm_from_queue_full(&mask, &t->pending);
2346 t = next_thread(t);
2347 } while (t != current);
2351 spin_unlock_irq(&current->sighand->siglock);
2352 return 0;
2355 int
2356 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2358 stack_t oss;
2359 int error;
2361 oss.ss_sp = (void __user *) current->sas_ss_sp;
2362 oss.ss_size = current->sas_ss_size;
2363 oss.ss_flags = sas_ss_flags(sp);
2365 if (uss) {
2366 void __user *ss_sp;
2367 size_t ss_size;
2368 int ss_flags;
2370 error = -EFAULT;
2371 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2372 || __get_user(ss_sp, &uss->ss_sp)
2373 || __get_user(ss_flags, &uss->ss_flags)
2374 || __get_user(ss_size, &uss->ss_size))
2375 goto out;
2377 error = -EPERM;
2378 if (on_sig_stack(sp))
2379 goto out;
2381 error = -EINVAL;
2384 * Note - this code used to test ss_flags incorrectly
2385 * old code may have been written using ss_flags==0
2386 * to mean ss_flags==SS_ONSTACK (as this was the only
2387 * way that worked) - this fix preserves that older
2388 * mechanism
2390 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2391 goto out;
2393 if (ss_flags == SS_DISABLE) {
2394 ss_size = 0;
2395 ss_sp = NULL;
2396 } else {
2397 error = -ENOMEM;
2398 if (ss_size < MINSIGSTKSZ)
2399 goto out;
2402 current->sas_ss_sp = (unsigned long) ss_sp;
2403 current->sas_ss_size = ss_size;
2406 error = 0;
2407 if (uoss) {
2408 error = -EFAULT;
2409 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2410 goto out;
2411 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2412 __put_user(oss.ss_size, &uoss->ss_size) |
2413 __put_user(oss.ss_flags, &uoss->ss_flags);
2416 out:
2417 return error;
2420 #ifdef __ARCH_WANT_SYS_SIGPENDING
2422 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2424 return do_sigpending(set, sizeof(*set));
2427 #endif
2429 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2430 /* Some platforms have their own version with special arguments others
2431 support only sys_rt_sigprocmask. */
2433 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2434 old_sigset_t __user *, oset)
2436 int error;
2437 old_sigset_t old_set, new_set;
2439 if (set) {
2440 error = -EFAULT;
2441 if (copy_from_user(&new_set, set, sizeof(*set)))
2442 goto out;
2443 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2445 spin_lock_irq(&current->sighand->siglock);
2446 old_set = current->blocked.sig[0];
2448 error = 0;
2449 switch (how) {
2450 default:
2451 error = -EINVAL;
2452 break;
2453 case SIG_BLOCK:
2454 sigaddsetmask(&current->blocked, new_set);
2455 break;
2456 case SIG_UNBLOCK:
2457 sigdelsetmask(&current->blocked, new_set);
2458 break;
2459 case SIG_SETMASK:
2460 current->blocked.sig[0] = new_set;
2461 break;
2464 recalc_sigpending();
2465 spin_unlock_irq(&current->sighand->siglock);
2466 if (error)
2467 goto out;
2468 if (oset)
2469 goto set_old;
2470 } else if (oset) {
2471 old_set = current->blocked.sig[0];
2472 set_old:
2473 error = -EFAULT;
2474 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2475 goto out;
2477 error = 0;
2478 out:
2479 return error;
2481 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2483 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2484 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2485 const struct sigaction __user *, act,
2486 struct sigaction __user *, oact,
2487 size_t, sigsetsize)
2489 struct k_sigaction new_sa, old_sa;
2490 int ret = -EINVAL;
2492 /* XXX: Don't preclude handling different sized sigset_t's. */
2493 if (sigsetsize != sizeof(sigset_t))
2494 goto out;
2496 if (act) {
2497 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2498 return -EFAULT;
2501 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2503 if (!ret && oact) {
2504 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2505 return -EFAULT;
2507 out:
2508 return ret;
2510 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2512 #ifdef __ARCH_WANT_SYS_SGETMASK
2515 * For backwards compatibility. Functionality superseded by sigprocmask.
2517 SYSCALL_DEFINE0(sgetmask)
2519 /* SMP safe */
2520 return current->blocked.sig[0];
2523 SYSCALL_DEFINE1(ssetmask, int, newmask)
2525 int old;
2527 spin_lock_irq(&current->sighand->siglock);
2528 old = current->blocked.sig[0];
2530 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2531 sigmask(SIGSTOP)));
2532 recalc_sigpending();
2533 spin_unlock_irq(&current->sighand->siglock);
2535 return old;
2537 #endif /* __ARCH_WANT_SGETMASK */
2539 #ifdef __ARCH_WANT_SYS_SIGNAL
2541 * For backwards compatibility. Functionality superseded by sigaction.
2543 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2545 struct k_sigaction new_sa, old_sa;
2546 int ret;
2548 new_sa.sa.sa_handler = handler;
2549 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2550 sigemptyset(&new_sa.sa.sa_mask);
2552 ret = do_sigaction(sig, &new_sa, &old_sa);
2554 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2556 #endif /* __ARCH_WANT_SYS_SIGNAL */
2558 #ifdef __ARCH_WANT_SYS_PAUSE
2560 SYSCALL_DEFINE0(pause)
2562 current->state = TASK_INTERRUPTIBLE;
2563 schedule();
2564 return -ERESTARTNOHAND;
2567 #endif
2569 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2572 sigset_t newset;
2574 /* XXX: Don't preclude handling different sized sigset_t's. */
2575 if (sigsetsize != sizeof(sigset_t))
2576 return -EINVAL;
2578 if (copy_from_user(&newset, unewset, sizeof(newset)))
2579 return -EFAULT;
2580 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2582 spin_lock_irq(&current->sighand->siglock);
2583 current->saved_sigmask = current->blocked;
2584 current->blocked = newset;
2585 recalc_sigpending();
2586 spin_unlock_irq(&current->sighand->siglock);
2588 current->state = TASK_INTERRUPTIBLE;
2589 schedule();
2590 set_restore_sigmask();
2591 return -ERESTARTNOHAND;
2593 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2595 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2597 return NULL;
2600 void __init signals_init(void)
2602 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);