[PATCH] x86: avoid wasting IRQs for PCI devices
[linux-2.6/suspend2-2.6.18.git] / kernel / signal.c
blobc89821b69ae3e10be8447d29d3f8bca893beb233
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
34 * SLAB caches for signal bits.
37 static kmem_cache_t *sigqueue_cachep;
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
113 #ifdef SIGEMT
114 #define M_SIGEMT M(SIGEMT)
115 #else
116 #define M_SIGEMT 0
117 #endif
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157 static int sig_ignored(struct task_struct *t, int sig)
159 void __user * handler;
162 * Tracers always want to know about signals..
164 if (t->ptrace & PT_PTRACED)
165 return 0;
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
170 * unblocked.
172 if (sigismember(&t->blocked, sig))
173 return 0;
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
187 unsigned long ready;
188 long i;
190 switch (_NSIG_WORDS) {
191 default:
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
194 break;
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
200 break;
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
208 return ready != 0;
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 if (t->signal->group_stop_count > 0 ||
216 PENDING(&t->pending, &t->blocked) ||
217 PENDING(&t->signal->shared_pending, &t->blocked))
218 set_tsk_thread_flag(t, TIF_SIGPENDING);
219 else
220 clear_tsk_thread_flag(t, TIF_SIGPENDING);
223 void recalc_sigpending(void)
225 recalc_sigpending_tsk(current);
228 /* Given the mask, find the first available signal that should be serviced. */
230 static int
231 next_signal(struct sigpending *pending, sigset_t *mask)
233 unsigned long i, *s, *m, x;
234 int sig = 0;
236 s = pending->signal.sig;
237 m = mask->sig;
238 switch (_NSIG_WORDS) {
239 default:
240 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
241 if ((x = *s &~ *m) != 0) {
242 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 break;
245 break;
247 case 2: if ((x = s[0] &~ m[0]) != 0)
248 sig = 1;
249 else if ((x = s[1] &~ m[1]) != 0)
250 sig = _NSIG_BPW + 1;
251 else
252 break;
253 sig += ffz(~x);
254 break;
256 case 1: if ((x = *s &~ *m) != 0)
257 sig = ffz(~x) + 1;
258 break;
261 return sig;
264 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
265 int override_rlimit)
267 struct sigqueue *q = NULL;
269 atomic_inc(&t->user->sigpending);
270 if (override_rlimit ||
271 atomic_read(&t->user->sigpending) <=
272 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
273 q = kmem_cache_alloc(sigqueue_cachep, flags);
274 if (unlikely(q == NULL)) {
275 atomic_dec(&t->user->sigpending);
276 } else {
277 INIT_LIST_HEAD(&q->list);
278 q->flags = 0;
279 q->lock = NULL;
280 q->user = get_uid(t->user);
282 return(q);
285 static inline void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
288 return;
289 atomic_dec(&q->user->sigpending);
290 free_uid(q->user);
291 kmem_cache_free(sigqueue_cachep, q);
294 static void flush_sigqueue(struct sigpending *queue)
296 struct sigqueue *q;
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
302 __sigqueue_free(q);
307 * Flush all pending signals for a task.
310 void
311 flush_signals(struct task_struct *t)
313 unsigned long flags;
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
323 * This function expects the tasklist_lock write-locked.
325 void __exit_sighand(struct task_struct *tsk)
327 struct sighand_struct * sighand = tsk->sighand;
329 /* Ok, we're done with the signal handlers */
330 tsk->sighand = NULL;
331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand);
335 void exit_sighand(struct task_struct *tsk)
337 write_lock_irq(&tasklist_lock);
338 __exit_sighand(tsk);
339 write_unlock_irq(&tasklist_lock);
343 * This function expects the tasklist_lock write-locked.
345 void __exit_signal(struct task_struct *tsk)
347 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand;
350 if (!sig)
351 BUG();
352 if (!atomic_read(&sig->count))
353 BUG();
354 spin_lock(&sighand->siglock);
355 posix_cpu_timers_exit(tsk);
356 if (atomic_dec_and_test(&sig->count)) {
357 posix_cpu_timers_exit_group(tsk);
358 if (tsk == sig->curr_target)
359 sig->curr_target = next_thread(tsk);
360 tsk->signal = NULL;
361 spin_unlock(&sighand->siglock);
362 flush_sigqueue(&sig->shared_pending);
363 } else {
365 * If there is any task waiting for the group exit
366 * then notify it:
368 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369 wake_up_process(sig->group_exit_task);
370 sig->group_exit_task = NULL;
372 if (tsk == sig->curr_target)
373 sig->curr_target = next_thread(tsk);
374 tsk->signal = NULL;
376 * Accumulate here the counters for all threads but the
377 * group leader as they die, so they can be added into
378 * the process-wide totals when those are taken.
379 * The group leader stays around as a zombie as long
380 * as there are other threads. When it gets reaped,
381 * the exit.c code will add its counts into these totals.
382 * We won't ever get here for the group leader, since it
383 * will have been the last reference on the signal_struct.
385 sig->utime = cputime_add(sig->utime, tsk->utime);
386 sig->stime = cputime_add(sig->stime, tsk->stime);
387 sig->min_flt += tsk->min_flt;
388 sig->maj_flt += tsk->maj_flt;
389 sig->nvcsw += tsk->nvcsw;
390 sig->nivcsw += tsk->nivcsw;
391 sig->sched_time += tsk->sched_time;
392 spin_unlock(&sighand->siglock);
393 sig = NULL; /* Marker for below. */
395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending);
397 if (sig) {
399 * We are cleaning up the signal_struct here. We delayed
400 * calling exit_itimers until after flush_sigqueue, just in
401 * case our thread-local pending queue contained a queued
402 * timer signal that would have been cleared in
403 * exit_itimers. When that called sigqueue_free, it would
404 * attempt to re-take the tasklist_lock and deadlock. This
405 * can never happen if we ensure that all queues the
406 * timer's signal might be queued on have been flushed
407 * first. The shared_pending queue, and our own pending
408 * queue are the only queues the timer could be on, since
409 * there are no other threads left in the group and timer
410 * signals are constrained to threads inside the group.
412 exit_itimers(sig);
413 exit_thread_group_keys(sig);
414 kmem_cache_free(signal_cachep, sig);
418 void exit_signal(struct task_struct *tsk)
420 write_lock_irq(&tasklist_lock);
421 __exit_signal(tsk);
422 write_unlock_irq(&tasklist_lock);
426 * Flush all handlers for a task.
429 void
430 flush_signal_handlers(struct task_struct *t, int force_default)
432 int i;
433 struct k_sigaction *ka = &t->sighand->action[0];
434 for (i = _NSIG ; i != 0 ; i--) {
435 if (force_default || ka->sa.sa_handler != SIG_IGN)
436 ka->sa.sa_handler = SIG_DFL;
437 ka->sa.sa_flags = 0;
438 sigemptyset(&ka->sa.sa_mask);
439 ka++;
444 /* Notify the system that a driver wants to block all signals for this
445 * process, and wants to be notified if any signals at all were to be
446 * sent/acted upon. If the notifier routine returns non-zero, then the
447 * signal will be acted upon after all. If the notifier routine returns 0,
448 * then then signal will be blocked. Only one block per process is
449 * allowed. priv is a pointer to private data that the notifier routine
450 * can use to determine if the signal should be blocked or not. */
452 void
453 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
455 unsigned long flags;
457 spin_lock_irqsave(&current->sighand->siglock, flags);
458 current->notifier_mask = mask;
459 current->notifier_data = priv;
460 current->notifier = notifier;
461 spin_unlock_irqrestore(&current->sighand->siglock, flags);
464 /* Notify the system that blocking has ended. */
466 void
467 unblock_all_signals(void)
469 unsigned long flags;
471 spin_lock_irqsave(&current->sighand->siglock, flags);
472 current->notifier = NULL;
473 current->notifier_data = NULL;
474 recalc_sigpending();
475 spin_unlock_irqrestore(&current->sighand->siglock, flags);
478 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480 struct sigqueue *q, *first = NULL;
481 int still_pending = 0;
483 if (unlikely(!sigismember(&list->signal, sig)))
484 return 0;
487 * Collect the siginfo appropriate to this signal. Check if
488 * there is another siginfo for the same signal.
490 list_for_each_entry(q, &list->list, list) {
491 if (q->info.si_signo == sig) {
492 if (first) {
493 still_pending = 1;
494 break;
496 first = q;
499 if (first) {
500 list_del_init(&first->list);
501 copy_siginfo(info, &first->info);
502 __sigqueue_free(first);
503 if (!still_pending)
504 sigdelset(&list->signal, sig);
505 } else {
507 /* Ok, it wasn't in the queue. This must be
508 a fast-pathed signal or we must have been
509 out of queue space. So zero out the info.
511 sigdelset(&list->signal, sig);
512 info->si_signo = sig;
513 info->si_errno = 0;
514 info->si_code = 0;
515 info->si_pid = 0;
516 info->si_uid = 0;
518 return 1;
521 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
522 siginfo_t *info)
524 int sig = 0;
526 /* SIGKILL must have priority, otherwise it is quite easy
527 * to create an unkillable process, sending sig < SIGKILL
528 * to self */
529 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
530 if (!sigismember(mask, SIGKILL))
531 sig = SIGKILL;
534 if (likely(!sig))
535 sig = next_signal(pending, mask);
536 if (sig) {
537 if (current->notifier) {
538 if (sigismember(current->notifier_mask, sig)) {
539 if (!(current->notifier)(current->notifier_data)) {
540 clear_thread_flag(TIF_SIGPENDING);
541 return 0;
546 if (!collect_signal(sig, pending, info))
547 sig = 0;
550 recalc_sigpending();
552 return sig;
556 * Dequeue a signal and return the element to the caller, which is
557 * expected to free it.
559 * All callers have to hold the siglock.
561 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
563 int signr = __dequeue_signal(&tsk->pending, mask, info);
564 if (!signr)
565 signr = __dequeue_signal(&tsk->signal->shared_pending,
566 mask, info);
567 if (signr && unlikely(sig_kernel_stop(signr))) {
569 * Set a marker that we have dequeued a stop signal. Our
570 * caller might release the siglock and then the pending
571 * stop signal it is about to process is no longer in the
572 * pending bitmasks, but must still be cleared by a SIGCONT
573 * (and overruled by a SIGKILL). So those cases clear this
574 * shared flag after we've set it. Note that this flag may
575 * remain set after the signal we return is ignored or
576 * handled. That doesn't matter because its only purpose
577 * is to alert stop-signal processing code when another
578 * processor has come along and cleared the flag.
580 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
582 if ( signr &&
583 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
584 info->si_sys_private){
586 * Release the siglock to ensure proper locking order
587 * of timer locks outside of siglocks. Note, we leave
588 * irqs disabled here, since the posix-timers code is
589 * about to disable them again anyway.
591 spin_unlock(&tsk->sighand->siglock);
592 do_schedule_next_timer(info);
593 spin_lock(&tsk->sighand->siglock);
595 return signr;
599 * Tell a process that it has a new active signal..
601 * NOTE! we rely on the previous spin_lock to
602 * lock interrupts for us! We can only be called with
603 * "siglock" held, and the local interrupt must
604 * have been disabled when that got acquired!
606 * No need to set need_resched since signal event passing
607 * goes through ->blocked
609 void signal_wake_up(struct task_struct *t, int resume)
611 unsigned int mask;
613 set_tsk_thread_flag(t, TIF_SIGPENDING);
616 * For SIGKILL, we want to wake it up in the stopped/traced case.
617 * We don't check t->state here because there is a race with it
618 * executing another processor and just now entering stopped state.
619 * By using wake_up_state, we ensure the process will wake up and
620 * handle its death signal.
622 mask = TASK_INTERRUPTIBLE;
623 if (resume)
624 mask |= TASK_STOPPED | TASK_TRACED;
625 if (!wake_up_state(t, mask))
626 kick_process(t);
630 * Remove signals in mask from the pending set and queue.
631 * Returns 1 if any signals were found.
633 * All callers must be holding the siglock.
635 static int rm_from_queue(unsigned long mask, struct sigpending *s)
637 struct sigqueue *q, *n;
639 if (!sigtestsetmask(&s->signal, mask))
640 return 0;
642 sigdelsetmask(&s->signal, mask);
643 list_for_each_entry_safe(q, n, &s->list, list) {
644 if (q->info.si_signo < SIGRTMIN &&
645 (mask & sigmask(q->info.si_signo))) {
646 list_del_init(&q->list);
647 __sigqueue_free(q);
650 return 1;
654 * Bad permissions for sending the signal
656 static int check_kill_permission(int sig, struct siginfo *info,
657 struct task_struct *t)
659 int error = -EINVAL;
660 if (!valid_signal(sig))
661 return error;
662 error = -EPERM;
663 if ((!info || ((unsigned long)info != 1 &&
664 (unsigned long)info != 2 && SI_FROMUSER(info)))
665 && ((sig != SIGCONT) ||
666 (current->signal->session != t->signal->session))
667 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
668 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
669 && !capable(CAP_KILL))
670 return error;
672 error = security_task_kill(t, info, sig);
673 if (!error)
674 audit_signal_info(sig, t); /* Let audit system see the signal */
675 return error;
678 /* forward decl */
679 static void do_notify_parent_cldstop(struct task_struct *tsk,
680 struct task_struct *parent,
681 int why);
684 * Handle magic process-wide effects of stop/continue signals.
685 * Unlike the signal actions, these happen immediately at signal-generation
686 * time regardless of blocking, ignoring, or handling. This does the
687 * actual continuing for SIGCONT, but not the actual stopping for stop
688 * signals. The process stop is done as a signal action for SIG_DFL.
690 static void handle_stop_signal(int sig, struct task_struct *p)
692 struct task_struct *t;
694 if (p->flags & SIGNAL_GROUP_EXIT)
696 * The process is in the middle of dying already.
698 return;
700 if (sig_kernel_stop(sig)) {
702 * This is a stop signal. Remove SIGCONT from all queues.
704 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
705 t = p;
706 do {
707 rm_from_queue(sigmask(SIGCONT), &t->pending);
708 t = next_thread(t);
709 } while (t != p);
710 } else if (sig == SIGCONT) {
712 * Remove all stop signals from all queues,
713 * and wake all threads.
715 if (unlikely(p->signal->group_stop_count > 0)) {
717 * There was a group stop in progress. We'll
718 * pretend it finished before we got here. We are
719 * obliged to report it to the parent: if the
720 * SIGSTOP happened "after" this SIGCONT, then it
721 * would have cleared this pending SIGCONT. If it
722 * happened "before" this SIGCONT, then the parent
723 * got the SIGCHLD about the stop finishing before
724 * the continue happened. We do the notification
725 * now, and it's as if the stop had finished and
726 * the SIGCHLD was pending on entry to this kill.
728 p->signal->group_stop_count = 0;
729 p->signal->flags = SIGNAL_STOP_CONTINUED;
730 spin_unlock(&p->sighand->siglock);
731 if (p->ptrace & PT_PTRACED)
732 do_notify_parent_cldstop(p, p->parent,
733 CLD_STOPPED);
734 else
735 do_notify_parent_cldstop(
736 p->group_leader,
737 p->group_leader->real_parent,
738 CLD_STOPPED);
739 spin_lock(&p->sighand->siglock);
741 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
742 t = p;
743 do {
744 unsigned int state;
745 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
748 * If there is a handler for SIGCONT, we must make
749 * sure that no thread returns to user mode before
750 * we post the signal, in case it was the only
751 * thread eligible to run the signal handler--then
752 * it must not do anything between resuming and
753 * running the handler. With the TIF_SIGPENDING
754 * flag set, the thread will pause and acquire the
755 * siglock that we hold now and until we've queued
756 * the pending signal.
758 * Wake up the stopped thread _after_ setting
759 * TIF_SIGPENDING
761 state = TASK_STOPPED;
762 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
763 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 state |= TASK_INTERRUPTIBLE;
766 wake_up_state(t, state);
768 t = next_thread(t);
769 } while (t != p);
771 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
773 * We were in fact stopped, and are now continued.
774 * Notify the parent with CLD_CONTINUED.
776 p->signal->flags = SIGNAL_STOP_CONTINUED;
777 p->signal->group_exit_code = 0;
778 spin_unlock(&p->sighand->siglock);
779 if (p->ptrace & PT_PTRACED)
780 do_notify_parent_cldstop(p, p->parent,
781 CLD_CONTINUED);
782 else
783 do_notify_parent_cldstop(
784 p->group_leader,
785 p->group_leader->real_parent,
786 CLD_CONTINUED);
787 spin_lock(&p->sighand->siglock);
788 } else {
790 * We are not stopped, but there could be a stop
791 * signal in the middle of being processed after
792 * being removed from the queue. Clear that too.
794 p->signal->flags = 0;
796 } else if (sig == SIGKILL) {
798 * Make sure that any pending stop signal already dequeued
799 * is undone by the wakeup for SIGKILL.
801 p->signal->flags = 0;
805 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
806 struct sigpending *signals)
808 struct sigqueue * q = NULL;
809 int ret = 0;
812 * fast-pathed signals for kernel-internal things like SIGSTOP
813 * or SIGKILL.
815 if ((unsigned long)info == 2)
816 goto out_set;
818 /* Real-time signals must be queued if sent by sigqueue, or
819 some other real-time mechanism. It is implementation
820 defined whether kill() does so. We attempt to do so, on
821 the principle of least surprise, but since kill is not
822 allowed to fail with EAGAIN when low on memory we just
823 make sure at least one signal gets delivered and don't
824 pass on the info struct. */
826 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
827 ((unsigned long) info < 2 ||
828 info->si_code >= 0)));
829 if (q) {
830 list_add_tail(&q->list, &signals->list);
831 switch ((unsigned long) info) {
832 case 0:
833 q->info.si_signo = sig;
834 q->info.si_errno = 0;
835 q->info.si_code = SI_USER;
836 q->info.si_pid = current->pid;
837 q->info.si_uid = current->uid;
838 break;
839 case 1:
840 q->info.si_signo = sig;
841 q->info.si_errno = 0;
842 q->info.si_code = SI_KERNEL;
843 q->info.si_pid = 0;
844 q->info.si_uid = 0;
845 break;
846 default:
847 copy_siginfo(&q->info, info);
848 break;
850 } else {
851 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
852 && info->si_code != SI_USER)
854 * Queue overflow, abort. We may abort if the signal was rt
855 * and sent by user using something other than kill().
857 return -EAGAIN;
858 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
860 * Set up a return to indicate that we dropped
861 * the signal.
863 ret = info->si_sys_private;
866 out_set:
867 sigaddset(&signals->signal, sig);
868 return ret;
871 #define LEGACY_QUEUE(sigptr, sig) \
872 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
875 static int
876 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
878 int ret = 0;
880 if (!irqs_disabled())
881 BUG();
882 assert_spin_locked(&t->sighand->siglock);
884 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
886 * Set up a return to indicate that we dropped the signal.
888 ret = info->si_sys_private;
890 /* Short-circuit ignored signals. */
891 if (sig_ignored(t, sig))
892 goto out;
894 /* Support queueing exactly one non-rt signal, so that we
895 can get more detailed information about the cause of
896 the signal. */
897 if (LEGACY_QUEUE(&t->pending, sig))
898 goto out;
900 ret = send_signal(sig, info, t, &t->pending);
901 if (!ret && !sigismember(&t->blocked, sig))
902 signal_wake_up(t, sig == SIGKILL);
903 out:
904 return ret;
908 * Force a signal that the process can't ignore: if necessary
909 * we unblock the signal and change any SIG_IGN to SIG_DFL.
913 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
915 unsigned long int flags;
916 int ret;
918 spin_lock_irqsave(&t->sighand->siglock, flags);
919 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
920 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
921 sigdelset(&t->blocked, sig);
922 recalc_sigpending_tsk(t);
924 ret = specific_send_sig_info(sig, info, t);
925 spin_unlock_irqrestore(&t->sighand->siglock, flags);
927 return ret;
930 void
931 force_sig_specific(int sig, struct task_struct *t)
933 unsigned long int flags;
935 spin_lock_irqsave(&t->sighand->siglock, flags);
936 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
937 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
938 sigdelset(&t->blocked, sig);
939 recalc_sigpending_tsk(t);
940 specific_send_sig_info(sig, (void *)2, t);
941 spin_unlock_irqrestore(&t->sighand->siglock, flags);
945 * Test if P wants to take SIG. After we've checked all threads with this,
946 * it's equivalent to finding no threads not blocking SIG. Any threads not
947 * blocking SIG were ruled out because they are not running and already
948 * have pending signals. Such threads will dequeue from the shared queue
949 * as soon as they're available, so putting the signal on the shared queue
950 * will be equivalent to sending it to one such thread.
952 #define wants_signal(sig, p, mask) \
953 (!sigismember(&(p)->blocked, sig) \
954 && !((p)->state & mask) \
955 && !((p)->flags & PF_EXITING) \
956 && (task_curr(p) || !signal_pending(p)))
959 static void
960 __group_complete_signal(int sig, struct task_struct *p)
962 unsigned int mask;
963 struct task_struct *t;
966 * Don't bother traced and stopped tasks (but
967 * SIGKILL will punch through that).
969 mask = TASK_STOPPED | TASK_TRACED;
970 if (sig == SIGKILL)
971 mask = 0;
974 * Now find a thread we can wake up to take the signal off the queue.
976 * If the main thread wants the signal, it gets first crack.
977 * Probably the least surprising to the average bear.
979 if (wants_signal(sig, p, mask))
980 t = p;
981 else if (thread_group_empty(p))
983 * There is just one thread and it does not need to be woken.
984 * It will dequeue unblocked signals before it runs again.
986 return;
987 else {
989 * Otherwise try to find a suitable thread.
991 t = p->signal->curr_target;
992 if (t == NULL)
993 /* restart balancing at this thread */
994 t = p->signal->curr_target = p;
995 BUG_ON(t->tgid != p->tgid);
997 while (!wants_signal(sig, t, mask)) {
998 t = next_thread(t);
999 if (t == p->signal->curr_target)
1001 * No thread needs to be woken.
1002 * Any eligible threads will see
1003 * the signal in the queue soon.
1005 return;
1007 p->signal->curr_target = t;
1011 * Found a killable thread. If the signal will be fatal,
1012 * then start taking the whole group down immediately.
1014 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1015 !sigismember(&t->real_blocked, sig) &&
1016 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1018 * This signal will be fatal to the whole group.
1020 if (!sig_kernel_coredump(sig)) {
1022 * Start a group exit and wake everybody up.
1023 * This way we don't have other threads
1024 * running and doing things after a slower
1025 * thread has the fatal signal pending.
1027 p->signal->flags = SIGNAL_GROUP_EXIT;
1028 p->signal->group_exit_code = sig;
1029 p->signal->group_stop_count = 0;
1030 t = p;
1031 do {
1032 sigaddset(&t->pending.signal, SIGKILL);
1033 signal_wake_up(t, 1);
1034 t = next_thread(t);
1035 } while (t != p);
1036 return;
1040 * There will be a core dump. We make all threads other
1041 * than the chosen one go into a group stop so that nothing
1042 * happens until it gets scheduled, takes the signal off
1043 * the shared queue, and does the core dump. This is a
1044 * little more complicated than strictly necessary, but it
1045 * keeps the signal state that winds up in the core dump
1046 * unchanged from the death state, e.g. which thread had
1047 * the core-dump signal unblocked.
1049 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1050 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1051 p->signal->group_stop_count = 0;
1052 p->signal->group_exit_task = t;
1053 t = p;
1054 do {
1055 p->signal->group_stop_count++;
1056 signal_wake_up(t, 0);
1057 t = next_thread(t);
1058 } while (t != p);
1059 wake_up_process(p->signal->group_exit_task);
1060 return;
1064 * The signal is already in the shared-pending queue.
1065 * Tell the chosen thread to wake up and dequeue it.
1067 signal_wake_up(t, sig == SIGKILL);
1068 return;
1072 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1074 int ret = 0;
1076 assert_spin_locked(&p->sighand->siglock);
1077 handle_stop_signal(sig, p);
1079 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1081 * Set up a return to indicate that we dropped the signal.
1083 ret = info->si_sys_private;
1085 /* Short-circuit ignored signals. */
1086 if (sig_ignored(p, sig))
1087 return ret;
1089 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1090 /* This is a non-RT signal and we already have one queued. */
1091 return ret;
1094 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1095 * We always use the shared queue for process-wide signals,
1096 * to avoid several races.
1098 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1099 if (unlikely(ret))
1100 return ret;
1102 __group_complete_signal(sig, p);
1103 return 0;
1107 * Nuke all other threads in the group.
1109 void zap_other_threads(struct task_struct *p)
1111 struct task_struct *t;
1113 p->signal->flags = SIGNAL_GROUP_EXIT;
1114 p->signal->group_stop_count = 0;
1116 if (thread_group_empty(p))
1117 return;
1119 for (t = next_thread(p); t != p; t = next_thread(t)) {
1121 * Don't bother with already dead threads
1123 if (t->exit_state)
1124 continue;
1127 * We don't want to notify the parent, since we are
1128 * killed as part of a thread group due to another
1129 * thread doing an execve() or similar. So set the
1130 * exit signal to -1 to allow immediate reaping of
1131 * the process. But don't detach the thread group
1132 * leader.
1134 if (t != p->group_leader)
1135 t->exit_signal = -1;
1137 sigaddset(&t->pending.signal, SIGKILL);
1138 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1139 signal_wake_up(t, 1);
1144 * Must be called with the tasklist_lock held for reading!
1146 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1148 unsigned long flags;
1149 int ret;
1151 ret = check_kill_permission(sig, info, p);
1152 if (!ret && sig && p->sighand) {
1153 spin_lock_irqsave(&p->sighand->siglock, flags);
1154 ret = __group_send_sig_info(sig, info, p);
1155 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1158 return ret;
1162 * kill_pg_info() sends a signal to a process group: this is what the tty
1163 * control characters do (^C, ^Z etc)
1166 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1168 struct task_struct *p = NULL;
1169 int retval, success;
1171 if (pgrp <= 0)
1172 return -EINVAL;
1174 success = 0;
1175 retval = -ESRCH;
1176 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1177 int err = group_send_sig_info(sig, info, p);
1178 success |= !err;
1179 retval = err;
1180 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1181 return success ? 0 : retval;
1185 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1187 int retval;
1189 read_lock(&tasklist_lock);
1190 retval = __kill_pg_info(sig, info, pgrp);
1191 read_unlock(&tasklist_lock);
1193 return retval;
1197 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1199 int error;
1200 struct task_struct *p;
1202 read_lock(&tasklist_lock);
1203 p = find_task_by_pid(pid);
1204 error = -ESRCH;
1205 if (p)
1206 error = group_send_sig_info(sig, info, p);
1207 read_unlock(&tasklist_lock);
1208 return error;
1213 * kill_something_info() interprets pid in interesting ways just like kill(2).
1215 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1216 * is probably wrong. Should make it like BSD or SYSV.
1219 static int kill_something_info(int sig, struct siginfo *info, int pid)
1221 if (!pid) {
1222 return kill_pg_info(sig, info, process_group(current));
1223 } else if (pid == -1) {
1224 int retval = 0, count = 0;
1225 struct task_struct * p;
1227 read_lock(&tasklist_lock);
1228 for_each_process(p) {
1229 if (p->pid > 1 && p->tgid != current->tgid) {
1230 int err = group_send_sig_info(sig, info, p);
1231 ++count;
1232 if (err != -EPERM)
1233 retval = err;
1236 read_unlock(&tasklist_lock);
1237 return count ? retval : -ESRCH;
1238 } else if (pid < 0) {
1239 return kill_pg_info(sig, info, -pid);
1240 } else {
1241 return kill_proc_info(sig, info, pid);
1246 * These are for backward compatibility with the rest of the kernel source.
1250 * These two are the most common entry points. They send a signal
1251 * just to the specific thread.
1254 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1256 int ret;
1257 unsigned long flags;
1260 * Make sure legacy kernel users don't send in bad values
1261 * (normal paths check this in check_kill_permission).
1263 if (!valid_signal(sig))
1264 return -EINVAL;
1267 * We need the tasklist lock even for the specific
1268 * thread case (when we don't need to follow the group
1269 * lists) in order to avoid races with "p->sighand"
1270 * going away or changing from under us.
1272 read_lock(&tasklist_lock);
1273 spin_lock_irqsave(&p->sighand->siglock, flags);
1274 ret = specific_send_sig_info(sig, info, p);
1275 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1276 read_unlock(&tasklist_lock);
1277 return ret;
1281 send_sig(int sig, struct task_struct *p, int priv)
1283 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1287 * This is the entry point for "process-wide" signals.
1288 * They will go to an appropriate thread in the thread group.
1291 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1293 int ret;
1294 read_lock(&tasklist_lock);
1295 ret = group_send_sig_info(sig, info, p);
1296 read_unlock(&tasklist_lock);
1297 return ret;
1300 void
1301 force_sig(int sig, struct task_struct *p)
1303 force_sig_info(sig, (void*)1L, p);
1307 * When things go south during signal handling, we
1308 * will force a SIGSEGV. And if the signal that caused
1309 * the problem was already a SIGSEGV, we'll want to
1310 * make sure we don't even try to deliver the signal..
1313 force_sigsegv(int sig, struct task_struct *p)
1315 if (sig == SIGSEGV) {
1316 unsigned long flags;
1317 spin_lock_irqsave(&p->sighand->siglock, flags);
1318 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1319 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1321 force_sig(SIGSEGV, p);
1322 return 0;
1326 kill_pg(pid_t pgrp, int sig, int priv)
1328 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1332 kill_proc(pid_t pid, int sig, int priv)
1334 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1338 * These functions support sending signals using preallocated sigqueue
1339 * structures. This is needed "because realtime applications cannot
1340 * afford to lose notifications of asynchronous events, like timer
1341 * expirations or I/O completions". In the case of Posix Timers
1342 * we allocate the sigqueue structure from the timer_create. If this
1343 * allocation fails we are able to report the failure to the application
1344 * with an EAGAIN error.
1347 struct sigqueue *sigqueue_alloc(void)
1349 struct sigqueue *q;
1351 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1352 q->flags |= SIGQUEUE_PREALLOC;
1353 return(q);
1356 void sigqueue_free(struct sigqueue *q)
1358 unsigned long flags;
1359 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1361 * If the signal is still pending remove it from the
1362 * pending queue.
1364 if (unlikely(!list_empty(&q->list))) {
1365 read_lock(&tasklist_lock);
1366 spin_lock_irqsave(q->lock, flags);
1367 if (!list_empty(&q->list))
1368 list_del_init(&q->list);
1369 spin_unlock_irqrestore(q->lock, flags);
1370 read_unlock(&tasklist_lock);
1372 q->flags &= ~SIGQUEUE_PREALLOC;
1373 __sigqueue_free(q);
1377 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1379 unsigned long flags;
1380 int ret = 0;
1383 * We need the tasklist lock even for the specific
1384 * thread case (when we don't need to follow the group
1385 * lists) in order to avoid races with "p->sighand"
1386 * going away or changing from under us.
1388 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1389 read_lock(&tasklist_lock);
1390 spin_lock_irqsave(&p->sighand->siglock, flags);
1392 if (unlikely(!list_empty(&q->list))) {
1394 * If an SI_TIMER entry is already queue just increment
1395 * the overrun count.
1397 if (q->info.si_code != SI_TIMER)
1398 BUG();
1399 q->info.si_overrun++;
1400 goto out;
1402 /* Short-circuit ignored signals. */
1403 if (sig_ignored(p, sig)) {
1404 ret = 1;
1405 goto out;
1408 q->lock = &p->sighand->siglock;
1409 list_add_tail(&q->list, &p->pending.list);
1410 sigaddset(&p->pending.signal, sig);
1411 if (!sigismember(&p->blocked, sig))
1412 signal_wake_up(p, sig == SIGKILL);
1414 out:
1415 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1416 read_unlock(&tasklist_lock);
1417 return(ret);
1421 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1423 unsigned long flags;
1424 int ret = 0;
1426 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1427 read_lock(&tasklist_lock);
1428 spin_lock_irqsave(&p->sighand->siglock, flags);
1429 handle_stop_signal(sig, p);
1431 /* Short-circuit ignored signals. */
1432 if (sig_ignored(p, sig)) {
1433 ret = 1;
1434 goto out;
1437 if (unlikely(!list_empty(&q->list))) {
1439 * If an SI_TIMER entry is already queue just increment
1440 * the overrun count. Other uses should not try to
1441 * send the signal multiple times.
1443 if (q->info.si_code != SI_TIMER)
1444 BUG();
1445 q->info.si_overrun++;
1446 goto out;
1450 * Put this signal on the shared-pending queue.
1451 * We always use the shared queue for process-wide signals,
1452 * to avoid several races.
1454 q->lock = &p->sighand->siglock;
1455 list_add_tail(&q->list, &p->signal->shared_pending.list);
1456 sigaddset(&p->signal->shared_pending.signal, sig);
1458 __group_complete_signal(sig, p);
1459 out:
1460 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1461 read_unlock(&tasklist_lock);
1462 return(ret);
1466 * Wake up any threads in the parent blocked in wait* syscalls.
1468 static inline void __wake_up_parent(struct task_struct *p,
1469 struct task_struct *parent)
1471 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1475 * Let a parent know about the death of a child.
1476 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1479 void do_notify_parent(struct task_struct *tsk, int sig)
1481 struct siginfo info;
1482 unsigned long flags;
1483 struct sighand_struct *psig;
1485 BUG_ON(sig == -1);
1487 /* do_notify_parent_cldstop should have been called instead. */
1488 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1490 BUG_ON(!tsk->ptrace &&
1491 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1493 info.si_signo = sig;
1494 info.si_errno = 0;
1495 info.si_pid = tsk->pid;
1496 info.si_uid = tsk->uid;
1498 /* FIXME: find out whether or not this is supposed to be c*time. */
1499 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1500 tsk->signal->utime));
1501 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1502 tsk->signal->stime));
1504 info.si_status = tsk->exit_code & 0x7f;
1505 if (tsk->exit_code & 0x80)
1506 info.si_code = CLD_DUMPED;
1507 else if (tsk->exit_code & 0x7f)
1508 info.si_code = CLD_KILLED;
1509 else {
1510 info.si_code = CLD_EXITED;
1511 info.si_status = tsk->exit_code >> 8;
1514 psig = tsk->parent->sighand;
1515 spin_lock_irqsave(&psig->siglock, flags);
1516 if (sig == SIGCHLD &&
1517 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1518 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1520 * We are exiting and our parent doesn't care. POSIX.1
1521 * defines special semantics for setting SIGCHLD to SIG_IGN
1522 * or setting the SA_NOCLDWAIT flag: we should be reaped
1523 * automatically and not left for our parent's wait4 call.
1524 * Rather than having the parent do it as a magic kind of
1525 * signal handler, we just set this to tell do_exit that we
1526 * can be cleaned up without becoming a zombie. Note that
1527 * we still call __wake_up_parent in this case, because a
1528 * blocked sys_wait4 might now return -ECHILD.
1530 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1531 * is implementation-defined: we do (if you don't want
1532 * it, just use SIG_IGN instead).
1534 tsk->exit_signal = -1;
1535 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1536 sig = 0;
1538 if (valid_signal(sig) && sig > 0)
1539 __group_send_sig_info(sig, &info, tsk->parent);
1540 __wake_up_parent(tsk, tsk->parent);
1541 spin_unlock_irqrestore(&psig->siglock, flags);
1544 static void
1545 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1546 int why)
1548 struct siginfo info;
1549 unsigned long flags;
1550 struct sighand_struct *sighand;
1552 info.si_signo = SIGCHLD;
1553 info.si_errno = 0;
1554 info.si_pid = tsk->pid;
1555 info.si_uid = tsk->uid;
1557 /* FIXME: find out whether or not this is supposed to be c*time. */
1558 info.si_utime = cputime_to_jiffies(tsk->utime);
1559 info.si_stime = cputime_to_jiffies(tsk->stime);
1561 info.si_code = why;
1562 switch (why) {
1563 case CLD_CONTINUED:
1564 info.si_status = SIGCONT;
1565 break;
1566 case CLD_STOPPED:
1567 info.si_status = tsk->signal->group_exit_code & 0x7f;
1568 break;
1569 case CLD_TRAPPED:
1570 info.si_status = tsk->exit_code & 0x7f;
1571 break;
1572 default:
1573 BUG();
1576 sighand = parent->sighand;
1577 spin_lock_irqsave(&sighand->siglock, flags);
1578 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1579 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1580 __group_send_sig_info(SIGCHLD, &info, parent);
1582 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1584 __wake_up_parent(tsk, parent);
1585 spin_unlock_irqrestore(&sighand->siglock, flags);
1589 * This must be called with current->sighand->siglock held.
1591 * This should be the path for all ptrace stops.
1592 * We always set current->last_siginfo while stopped here.
1593 * That makes it a way to test a stopped process for
1594 * being ptrace-stopped vs being job-control-stopped.
1596 * If we actually decide not to stop at all because the tracer is gone,
1597 * we leave nostop_code in current->exit_code.
1599 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1602 * If there is a group stop in progress,
1603 * we must participate in the bookkeeping.
1605 if (current->signal->group_stop_count > 0)
1606 --current->signal->group_stop_count;
1608 current->last_siginfo = info;
1609 current->exit_code = exit_code;
1611 /* Let the debugger run. */
1612 set_current_state(TASK_TRACED);
1613 spin_unlock_irq(&current->sighand->siglock);
1614 read_lock(&tasklist_lock);
1615 if (likely(current->ptrace & PT_PTRACED) &&
1616 likely(current->parent != current->real_parent ||
1617 !(current->ptrace & PT_ATTACHED)) &&
1618 (likely(current->parent->signal != current->signal) ||
1619 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1620 do_notify_parent_cldstop(current, current->parent,
1621 CLD_TRAPPED);
1622 read_unlock(&tasklist_lock);
1623 schedule();
1624 } else {
1626 * By the time we got the lock, our tracer went away.
1627 * Don't stop here.
1629 read_unlock(&tasklist_lock);
1630 set_current_state(TASK_RUNNING);
1631 current->exit_code = nostop_code;
1635 * We are back. Now reacquire the siglock before touching
1636 * last_siginfo, so that we are sure to have synchronized with
1637 * any signal-sending on another CPU that wants to examine it.
1639 spin_lock_irq(&current->sighand->siglock);
1640 current->last_siginfo = NULL;
1643 * Queued signals ignored us while we were stopped for tracing.
1644 * So check for any that we should take before resuming user mode.
1646 recalc_sigpending();
1649 void ptrace_notify(int exit_code)
1651 siginfo_t info;
1653 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1655 memset(&info, 0, sizeof info);
1656 info.si_signo = SIGTRAP;
1657 info.si_code = exit_code;
1658 info.si_pid = current->pid;
1659 info.si_uid = current->uid;
1661 /* Let the debugger run. */
1662 spin_lock_irq(&current->sighand->siglock);
1663 ptrace_stop(exit_code, 0, &info);
1664 spin_unlock_irq(&current->sighand->siglock);
1667 static void
1668 finish_stop(int stop_count)
1671 * If there are no other threads in the group, or if there is
1672 * a group stop in progress and we are the last to stop,
1673 * report to the parent. When ptraced, every thread reports itself.
1675 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1676 read_lock(&tasklist_lock);
1677 do_notify_parent_cldstop(current, current->parent,
1678 CLD_STOPPED);
1679 read_unlock(&tasklist_lock);
1681 else if (stop_count == 0) {
1682 read_lock(&tasklist_lock);
1683 do_notify_parent_cldstop(current->group_leader,
1684 current->group_leader->real_parent,
1685 CLD_STOPPED);
1686 read_unlock(&tasklist_lock);
1689 schedule();
1691 * Now we don't run again until continued.
1693 current->exit_code = 0;
1697 * This performs the stopping for SIGSTOP and other stop signals.
1698 * We have to stop all threads in the thread group.
1699 * Returns nonzero if we've actually stopped and released the siglock.
1700 * Returns zero if we didn't stop and still hold the siglock.
1702 static int
1703 do_signal_stop(int signr)
1705 struct signal_struct *sig = current->signal;
1706 struct sighand_struct *sighand = current->sighand;
1707 int stop_count = -1;
1709 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1710 return 0;
1712 if (sig->group_stop_count > 0) {
1714 * There is a group stop in progress. We don't need to
1715 * start another one.
1717 signr = sig->group_exit_code;
1718 stop_count = --sig->group_stop_count;
1719 current->exit_code = signr;
1720 set_current_state(TASK_STOPPED);
1721 if (stop_count == 0)
1722 sig->flags = SIGNAL_STOP_STOPPED;
1723 spin_unlock_irq(&sighand->siglock);
1725 else if (thread_group_empty(current)) {
1727 * Lock must be held through transition to stopped state.
1729 current->exit_code = current->signal->group_exit_code = signr;
1730 set_current_state(TASK_STOPPED);
1731 sig->flags = SIGNAL_STOP_STOPPED;
1732 spin_unlock_irq(&sighand->siglock);
1734 else {
1736 * There is no group stop already in progress.
1737 * We must initiate one now, but that requires
1738 * dropping siglock to get both the tasklist lock
1739 * and siglock again in the proper order. Note that
1740 * this allows an intervening SIGCONT to be posted.
1741 * We need to check for that and bail out if necessary.
1743 struct task_struct *t;
1745 spin_unlock_irq(&sighand->siglock);
1747 /* signals can be posted during this window */
1749 read_lock(&tasklist_lock);
1750 spin_lock_irq(&sighand->siglock);
1752 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1754 * Another stop or continue happened while we
1755 * didn't have the lock. We can just swallow this
1756 * signal now. If we raced with a SIGCONT, that
1757 * should have just cleared it now. If we raced
1758 * with another processor delivering a stop signal,
1759 * then the SIGCONT that wakes us up should clear it.
1761 read_unlock(&tasklist_lock);
1762 return 0;
1765 if (sig->group_stop_count == 0) {
1766 sig->group_exit_code = signr;
1767 stop_count = 0;
1768 for (t = next_thread(current); t != current;
1769 t = next_thread(t))
1771 * Setting state to TASK_STOPPED for a group
1772 * stop is always done with the siglock held,
1773 * so this check has no races.
1775 if (t->state < TASK_STOPPED) {
1776 stop_count++;
1777 signal_wake_up(t, 0);
1779 sig->group_stop_count = stop_count;
1781 else {
1782 /* A race with another thread while unlocked. */
1783 signr = sig->group_exit_code;
1784 stop_count = --sig->group_stop_count;
1787 current->exit_code = signr;
1788 set_current_state(TASK_STOPPED);
1789 if (stop_count == 0)
1790 sig->flags = SIGNAL_STOP_STOPPED;
1792 spin_unlock_irq(&sighand->siglock);
1793 read_unlock(&tasklist_lock);
1796 finish_stop(stop_count);
1797 return 1;
1801 * Do appropriate magic when group_stop_count > 0.
1802 * We return nonzero if we stopped, after releasing the siglock.
1803 * We return zero if we still hold the siglock and should look
1804 * for another signal without checking group_stop_count again.
1806 static inline int handle_group_stop(void)
1808 int stop_count;
1810 if (current->signal->group_exit_task == current) {
1812 * Group stop is so we can do a core dump,
1813 * We are the initiating thread, so get on with it.
1815 current->signal->group_exit_task = NULL;
1816 return 0;
1819 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1821 * Group stop is so another thread can do a core dump,
1822 * or else we are racing against a death signal.
1823 * Just punt the stop so we can get the next signal.
1825 return 0;
1828 * There is a group stop in progress. We stop
1829 * without any associated signal being in our queue.
1831 stop_count = --current->signal->group_stop_count;
1832 if (stop_count == 0)
1833 current->signal->flags = SIGNAL_STOP_STOPPED;
1834 current->exit_code = current->signal->group_exit_code;
1835 set_current_state(TASK_STOPPED);
1836 spin_unlock_irq(&current->sighand->siglock);
1837 finish_stop(stop_count);
1838 return 1;
1841 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1842 struct pt_regs *regs, void *cookie)
1844 sigset_t *mask = &current->blocked;
1845 int signr = 0;
1847 relock:
1848 spin_lock_irq(&current->sighand->siglock);
1849 for (;;) {
1850 struct k_sigaction *ka;
1852 if (unlikely(current->signal->group_stop_count > 0) &&
1853 handle_group_stop())
1854 goto relock;
1856 signr = dequeue_signal(current, mask, info);
1858 if (!signr)
1859 break; /* will return 0 */
1861 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1862 ptrace_signal_deliver(regs, cookie);
1864 /* Let the debugger run. */
1865 ptrace_stop(signr, signr, info);
1867 /* We're back. Did the debugger cancel the sig? */
1868 signr = current->exit_code;
1869 if (signr == 0)
1870 continue;
1872 current->exit_code = 0;
1874 /* Update the siginfo structure if the signal has
1875 changed. If the debugger wanted something
1876 specific in the siginfo structure then it should
1877 have updated *info via PTRACE_SETSIGINFO. */
1878 if (signr != info->si_signo) {
1879 info->si_signo = signr;
1880 info->si_errno = 0;
1881 info->si_code = SI_USER;
1882 info->si_pid = current->parent->pid;
1883 info->si_uid = current->parent->uid;
1886 /* If the (new) signal is now blocked, requeue it. */
1887 if (sigismember(&current->blocked, signr)) {
1888 specific_send_sig_info(signr, info, current);
1889 continue;
1893 ka = &current->sighand->action[signr-1];
1894 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1895 continue;
1896 if (ka->sa.sa_handler != SIG_DFL) {
1897 /* Run the handler. */
1898 *return_ka = *ka;
1900 if (ka->sa.sa_flags & SA_ONESHOT)
1901 ka->sa.sa_handler = SIG_DFL;
1903 break; /* will return non-zero "signr" value */
1907 * Now we are doing the default action for this signal.
1909 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1910 continue;
1912 /* Init gets no signals it doesn't want. */
1913 if (current->pid == 1)
1914 continue;
1916 if (sig_kernel_stop(signr)) {
1918 * The default action is to stop all threads in
1919 * the thread group. The job control signals
1920 * do nothing in an orphaned pgrp, but SIGSTOP
1921 * always works. Note that siglock needs to be
1922 * dropped during the call to is_orphaned_pgrp()
1923 * because of lock ordering with tasklist_lock.
1924 * This allows an intervening SIGCONT to be posted.
1925 * We need to check for that and bail out if necessary.
1927 if (signr != SIGSTOP) {
1928 spin_unlock_irq(&current->sighand->siglock);
1930 /* signals can be posted during this window */
1932 if (is_orphaned_pgrp(process_group(current)))
1933 goto relock;
1935 spin_lock_irq(&current->sighand->siglock);
1938 if (likely(do_signal_stop(signr))) {
1939 /* It released the siglock. */
1940 goto relock;
1944 * We didn't actually stop, due to a race
1945 * with SIGCONT or something like that.
1947 continue;
1950 spin_unlock_irq(&current->sighand->siglock);
1953 * Anything else is fatal, maybe with a core dump.
1955 current->flags |= PF_SIGNALED;
1956 if (sig_kernel_coredump(signr)) {
1958 * If it was able to dump core, this kills all
1959 * other threads in the group and synchronizes with
1960 * their demise. If we lost the race with another
1961 * thread getting here, it set group_exit_code
1962 * first and our do_group_exit call below will use
1963 * that value and ignore the one we pass it.
1965 do_coredump((long)signr, signr, regs);
1969 * Death signals, no core dump.
1971 do_group_exit(signr);
1972 /* NOTREACHED */
1974 spin_unlock_irq(&current->sighand->siglock);
1975 return signr;
1978 EXPORT_SYMBOL(recalc_sigpending);
1979 EXPORT_SYMBOL_GPL(dequeue_signal);
1980 EXPORT_SYMBOL(flush_signals);
1981 EXPORT_SYMBOL(force_sig);
1982 EXPORT_SYMBOL(kill_pg);
1983 EXPORT_SYMBOL(kill_proc);
1984 EXPORT_SYMBOL(ptrace_notify);
1985 EXPORT_SYMBOL(send_sig);
1986 EXPORT_SYMBOL(send_sig_info);
1987 EXPORT_SYMBOL(sigprocmask);
1988 EXPORT_SYMBOL(block_all_signals);
1989 EXPORT_SYMBOL(unblock_all_signals);
1993 * System call entry points.
1996 asmlinkage long sys_restart_syscall(void)
1998 struct restart_block *restart = &current_thread_info()->restart_block;
1999 return restart->fn(restart);
2002 long do_no_restart_syscall(struct restart_block *param)
2004 return -EINTR;
2008 * We don't need to get the kernel lock - this is all local to this
2009 * particular thread.. (and that's good, because this is _heavily_
2010 * used by various programs)
2014 * This is also useful for kernel threads that want to temporarily
2015 * (or permanently) block certain signals.
2017 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2018 * interface happily blocks "unblockable" signals like SIGKILL
2019 * and friends.
2021 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2023 int error;
2024 sigset_t old_block;
2026 spin_lock_irq(&current->sighand->siglock);
2027 old_block = current->blocked;
2028 error = 0;
2029 switch (how) {
2030 case SIG_BLOCK:
2031 sigorsets(&current->blocked, &current->blocked, set);
2032 break;
2033 case SIG_UNBLOCK:
2034 signandsets(&current->blocked, &current->blocked, set);
2035 break;
2036 case SIG_SETMASK:
2037 current->blocked = *set;
2038 break;
2039 default:
2040 error = -EINVAL;
2042 recalc_sigpending();
2043 spin_unlock_irq(&current->sighand->siglock);
2044 if (oldset)
2045 *oldset = old_block;
2046 return error;
2049 asmlinkage long
2050 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2052 int error = -EINVAL;
2053 sigset_t old_set, new_set;
2055 /* XXX: Don't preclude handling different sized sigset_t's. */
2056 if (sigsetsize != sizeof(sigset_t))
2057 goto out;
2059 if (set) {
2060 error = -EFAULT;
2061 if (copy_from_user(&new_set, set, sizeof(*set)))
2062 goto out;
2063 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2065 error = sigprocmask(how, &new_set, &old_set);
2066 if (error)
2067 goto out;
2068 if (oset)
2069 goto set_old;
2070 } else if (oset) {
2071 spin_lock_irq(&current->sighand->siglock);
2072 old_set = current->blocked;
2073 spin_unlock_irq(&current->sighand->siglock);
2075 set_old:
2076 error = -EFAULT;
2077 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2078 goto out;
2080 error = 0;
2081 out:
2082 return error;
2085 long do_sigpending(void __user *set, unsigned long sigsetsize)
2087 long error = -EINVAL;
2088 sigset_t pending;
2090 if (sigsetsize > sizeof(sigset_t))
2091 goto out;
2093 spin_lock_irq(&current->sighand->siglock);
2094 sigorsets(&pending, &current->pending.signal,
2095 &current->signal->shared_pending.signal);
2096 spin_unlock_irq(&current->sighand->siglock);
2098 /* Outside the lock because only this thread touches it. */
2099 sigandsets(&pending, &current->blocked, &pending);
2101 error = -EFAULT;
2102 if (!copy_to_user(set, &pending, sigsetsize))
2103 error = 0;
2105 out:
2106 return error;
2109 asmlinkage long
2110 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2112 return do_sigpending(set, sigsetsize);
2115 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2117 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2119 int err;
2121 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2122 return -EFAULT;
2123 if (from->si_code < 0)
2124 return __copy_to_user(to, from, sizeof(siginfo_t))
2125 ? -EFAULT : 0;
2127 * If you change siginfo_t structure, please be sure
2128 * this code is fixed accordingly.
2129 * It should never copy any pad contained in the structure
2130 * to avoid security leaks, but must copy the generic
2131 * 3 ints plus the relevant union member.
2133 err = __put_user(from->si_signo, &to->si_signo);
2134 err |= __put_user(from->si_errno, &to->si_errno);
2135 err |= __put_user((short)from->si_code, &to->si_code);
2136 switch (from->si_code & __SI_MASK) {
2137 case __SI_KILL:
2138 err |= __put_user(from->si_pid, &to->si_pid);
2139 err |= __put_user(from->si_uid, &to->si_uid);
2140 break;
2141 case __SI_TIMER:
2142 err |= __put_user(from->si_tid, &to->si_tid);
2143 err |= __put_user(from->si_overrun, &to->si_overrun);
2144 err |= __put_user(from->si_ptr, &to->si_ptr);
2145 break;
2146 case __SI_POLL:
2147 err |= __put_user(from->si_band, &to->si_band);
2148 err |= __put_user(from->si_fd, &to->si_fd);
2149 break;
2150 case __SI_FAULT:
2151 err |= __put_user(from->si_addr, &to->si_addr);
2152 #ifdef __ARCH_SI_TRAPNO
2153 err |= __put_user(from->si_trapno, &to->si_trapno);
2154 #endif
2155 break;
2156 case __SI_CHLD:
2157 err |= __put_user(from->si_pid, &to->si_pid);
2158 err |= __put_user(from->si_uid, &to->si_uid);
2159 err |= __put_user(from->si_status, &to->si_status);
2160 err |= __put_user(from->si_utime, &to->si_utime);
2161 err |= __put_user(from->si_stime, &to->si_stime);
2162 break;
2163 case __SI_RT: /* This is not generated by the kernel as of now. */
2164 case __SI_MESGQ: /* But this is */
2165 err |= __put_user(from->si_pid, &to->si_pid);
2166 err |= __put_user(from->si_uid, &to->si_uid);
2167 err |= __put_user(from->si_ptr, &to->si_ptr);
2168 break;
2169 default: /* this is just in case for now ... */
2170 err |= __put_user(from->si_pid, &to->si_pid);
2171 err |= __put_user(from->si_uid, &to->si_uid);
2172 break;
2174 return err;
2177 #endif
2179 asmlinkage long
2180 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2181 siginfo_t __user *uinfo,
2182 const struct timespec __user *uts,
2183 size_t sigsetsize)
2185 int ret, sig;
2186 sigset_t these;
2187 struct timespec ts;
2188 siginfo_t info;
2189 long timeout = 0;
2191 /* XXX: Don't preclude handling different sized sigset_t's. */
2192 if (sigsetsize != sizeof(sigset_t))
2193 return -EINVAL;
2195 if (copy_from_user(&these, uthese, sizeof(these)))
2196 return -EFAULT;
2199 * Invert the set of allowed signals to get those we
2200 * want to block.
2202 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2203 signotset(&these);
2205 if (uts) {
2206 if (copy_from_user(&ts, uts, sizeof(ts)))
2207 return -EFAULT;
2208 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2209 || ts.tv_sec < 0)
2210 return -EINVAL;
2213 spin_lock_irq(&current->sighand->siglock);
2214 sig = dequeue_signal(current, &these, &info);
2215 if (!sig) {
2216 timeout = MAX_SCHEDULE_TIMEOUT;
2217 if (uts)
2218 timeout = (timespec_to_jiffies(&ts)
2219 + (ts.tv_sec || ts.tv_nsec));
2221 if (timeout) {
2222 /* None ready -- temporarily unblock those we're
2223 * interested while we are sleeping in so that we'll
2224 * be awakened when they arrive. */
2225 current->real_blocked = current->blocked;
2226 sigandsets(&current->blocked, &current->blocked, &these);
2227 recalc_sigpending();
2228 spin_unlock_irq(&current->sighand->siglock);
2230 current->state = TASK_INTERRUPTIBLE;
2231 timeout = schedule_timeout(timeout);
2233 if (current->flags & PF_FREEZE)
2234 refrigerator(PF_FREEZE);
2235 spin_lock_irq(&current->sighand->siglock);
2236 sig = dequeue_signal(current, &these, &info);
2237 current->blocked = current->real_blocked;
2238 siginitset(&current->real_blocked, 0);
2239 recalc_sigpending();
2242 spin_unlock_irq(&current->sighand->siglock);
2244 if (sig) {
2245 ret = sig;
2246 if (uinfo) {
2247 if (copy_siginfo_to_user(uinfo, &info))
2248 ret = -EFAULT;
2250 } else {
2251 ret = -EAGAIN;
2252 if (timeout)
2253 ret = -EINTR;
2256 return ret;
2259 asmlinkage long
2260 sys_kill(int pid, int sig)
2262 struct siginfo info;
2264 info.si_signo = sig;
2265 info.si_errno = 0;
2266 info.si_code = SI_USER;
2267 info.si_pid = current->tgid;
2268 info.si_uid = current->uid;
2270 return kill_something_info(sig, &info, pid);
2274 * sys_tgkill - send signal to one specific thread
2275 * @tgid: the thread group ID of the thread
2276 * @pid: the PID of the thread
2277 * @sig: signal to be sent
2279 * This syscall also checks the tgid and returns -ESRCH even if the PID
2280 * exists but it's not belonging to the target process anymore. This
2281 * method solves the problem of threads exiting and PIDs getting reused.
2283 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2285 struct siginfo info;
2286 int error;
2287 struct task_struct *p;
2289 /* This is only valid for single tasks */
2290 if (pid <= 0 || tgid <= 0)
2291 return -EINVAL;
2293 info.si_signo = sig;
2294 info.si_errno = 0;
2295 info.si_code = SI_TKILL;
2296 info.si_pid = current->tgid;
2297 info.si_uid = current->uid;
2299 read_lock(&tasklist_lock);
2300 p = find_task_by_pid(pid);
2301 error = -ESRCH;
2302 if (p && (p->tgid == tgid)) {
2303 error = check_kill_permission(sig, &info, p);
2305 * The null signal is a permissions and process existence
2306 * probe. No signal is actually delivered.
2308 if (!error && sig && p->sighand) {
2309 spin_lock_irq(&p->sighand->siglock);
2310 handle_stop_signal(sig, p);
2311 error = specific_send_sig_info(sig, &info, p);
2312 spin_unlock_irq(&p->sighand->siglock);
2315 read_unlock(&tasklist_lock);
2316 return error;
2320 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2322 asmlinkage long
2323 sys_tkill(int pid, int sig)
2325 struct siginfo info;
2326 int error;
2327 struct task_struct *p;
2329 /* This is only valid for single tasks */
2330 if (pid <= 0)
2331 return -EINVAL;
2333 info.si_signo = sig;
2334 info.si_errno = 0;
2335 info.si_code = SI_TKILL;
2336 info.si_pid = current->tgid;
2337 info.si_uid = current->uid;
2339 read_lock(&tasklist_lock);
2340 p = find_task_by_pid(pid);
2341 error = -ESRCH;
2342 if (p) {
2343 error = check_kill_permission(sig, &info, p);
2345 * The null signal is a permissions and process existence
2346 * probe. No signal is actually delivered.
2348 if (!error && sig && p->sighand) {
2349 spin_lock_irq(&p->sighand->siglock);
2350 handle_stop_signal(sig, p);
2351 error = specific_send_sig_info(sig, &info, p);
2352 spin_unlock_irq(&p->sighand->siglock);
2355 read_unlock(&tasklist_lock);
2356 return error;
2359 asmlinkage long
2360 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2362 siginfo_t info;
2364 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2365 return -EFAULT;
2367 /* Not even root can pretend to send signals from the kernel.
2368 Nor can they impersonate a kill(), which adds source info. */
2369 if (info.si_code >= 0)
2370 return -EPERM;
2371 info.si_signo = sig;
2373 /* POSIX.1b doesn't mention process groups. */
2374 return kill_proc_info(sig, &info, pid);
2378 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2380 struct k_sigaction *k;
2382 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2383 return -EINVAL;
2385 k = &current->sighand->action[sig-1];
2387 spin_lock_irq(&current->sighand->siglock);
2388 if (signal_pending(current)) {
2390 * If there might be a fatal signal pending on multiple
2391 * threads, make sure we take it before changing the action.
2393 spin_unlock_irq(&current->sighand->siglock);
2394 return -ERESTARTNOINTR;
2397 if (oact)
2398 *oact = *k;
2400 if (act) {
2402 * POSIX 3.3.1.3:
2403 * "Setting a signal action to SIG_IGN for a signal that is
2404 * pending shall cause the pending signal to be discarded,
2405 * whether or not it is blocked."
2407 * "Setting a signal action to SIG_DFL for a signal that is
2408 * pending and whose default action is to ignore the signal
2409 * (for example, SIGCHLD), shall cause the pending signal to
2410 * be discarded, whether or not it is blocked"
2412 if (act->sa.sa_handler == SIG_IGN ||
2413 (act->sa.sa_handler == SIG_DFL &&
2414 sig_kernel_ignore(sig))) {
2416 * This is a fairly rare case, so we only take the
2417 * tasklist_lock once we're sure we'll need it.
2418 * Now we must do this little unlock and relock
2419 * dance to maintain the lock hierarchy.
2421 struct task_struct *t = current;
2422 spin_unlock_irq(&t->sighand->siglock);
2423 read_lock(&tasklist_lock);
2424 spin_lock_irq(&t->sighand->siglock);
2425 *k = *act;
2426 sigdelsetmask(&k->sa.sa_mask,
2427 sigmask(SIGKILL) | sigmask(SIGSTOP));
2428 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2429 do {
2430 rm_from_queue(sigmask(sig), &t->pending);
2431 recalc_sigpending_tsk(t);
2432 t = next_thread(t);
2433 } while (t != current);
2434 spin_unlock_irq(&current->sighand->siglock);
2435 read_unlock(&tasklist_lock);
2436 return 0;
2439 *k = *act;
2440 sigdelsetmask(&k->sa.sa_mask,
2441 sigmask(SIGKILL) | sigmask(SIGSTOP));
2444 spin_unlock_irq(&current->sighand->siglock);
2445 return 0;
2448 int
2449 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2451 stack_t oss;
2452 int error;
2454 if (uoss) {
2455 oss.ss_sp = (void __user *) current->sas_ss_sp;
2456 oss.ss_size = current->sas_ss_size;
2457 oss.ss_flags = sas_ss_flags(sp);
2460 if (uss) {
2461 void __user *ss_sp;
2462 size_t ss_size;
2463 int ss_flags;
2465 error = -EFAULT;
2466 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2467 || __get_user(ss_sp, &uss->ss_sp)
2468 || __get_user(ss_flags, &uss->ss_flags)
2469 || __get_user(ss_size, &uss->ss_size))
2470 goto out;
2472 error = -EPERM;
2473 if (on_sig_stack(sp))
2474 goto out;
2476 error = -EINVAL;
2479 * Note - this code used to test ss_flags incorrectly
2480 * old code may have been written using ss_flags==0
2481 * to mean ss_flags==SS_ONSTACK (as this was the only
2482 * way that worked) - this fix preserves that older
2483 * mechanism
2485 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2486 goto out;
2488 if (ss_flags == SS_DISABLE) {
2489 ss_size = 0;
2490 ss_sp = NULL;
2491 } else {
2492 error = -ENOMEM;
2493 if (ss_size < MINSIGSTKSZ)
2494 goto out;
2497 current->sas_ss_sp = (unsigned long) ss_sp;
2498 current->sas_ss_size = ss_size;
2501 if (uoss) {
2502 error = -EFAULT;
2503 if (copy_to_user(uoss, &oss, sizeof(oss)))
2504 goto out;
2507 error = 0;
2508 out:
2509 return error;
2512 #ifdef __ARCH_WANT_SYS_SIGPENDING
2514 asmlinkage long
2515 sys_sigpending(old_sigset_t __user *set)
2517 return do_sigpending(set, sizeof(*set));
2520 #endif
2522 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2523 /* Some platforms have their own version with special arguments others
2524 support only sys_rt_sigprocmask. */
2526 asmlinkage long
2527 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2529 int error;
2530 old_sigset_t old_set, new_set;
2532 if (set) {
2533 error = -EFAULT;
2534 if (copy_from_user(&new_set, set, sizeof(*set)))
2535 goto out;
2536 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2538 spin_lock_irq(&current->sighand->siglock);
2539 old_set = current->blocked.sig[0];
2541 error = 0;
2542 switch (how) {
2543 default:
2544 error = -EINVAL;
2545 break;
2546 case SIG_BLOCK:
2547 sigaddsetmask(&current->blocked, new_set);
2548 break;
2549 case SIG_UNBLOCK:
2550 sigdelsetmask(&current->blocked, new_set);
2551 break;
2552 case SIG_SETMASK:
2553 current->blocked.sig[0] = new_set;
2554 break;
2557 recalc_sigpending();
2558 spin_unlock_irq(&current->sighand->siglock);
2559 if (error)
2560 goto out;
2561 if (oset)
2562 goto set_old;
2563 } else if (oset) {
2564 old_set = current->blocked.sig[0];
2565 set_old:
2566 error = -EFAULT;
2567 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2568 goto out;
2570 error = 0;
2571 out:
2572 return error;
2574 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2576 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2577 asmlinkage long
2578 sys_rt_sigaction(int sig,
2579 const struct sigaction __user *act,
2580 struct sigaction __user *oact,
2581 size_t sigsetsize)
2583 struct k_sigaction new_sa, old_sa;
2584 int ret = -EINVAL;
2586 /* XXX: Don't preclude handling different sized sigset_t's. */
2587 if (sigsetsize != sizeof(sigset_t))
2588 goto out;
2590 if (act) {
2591 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2592 return -EFAULT;
2595 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2597 if (!ret && oact) {
2598 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2599 return -EFAULT;
2601 out:
2602 return ret;
2604 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2606 #ifdef __ARCH_WANT_SYS_SGETMASK
2609 * For backwards compatibility. Functionality superseded by sigprocmask.
2611 asmlinkage long
2612 sys_sgetmask(void)
2614 /* SMP safe */
2615 return current->blocked.sig[0];
2618 asmlinkage long
2619 sys_ssetmask(int newmask)
2621 int old;
2623 spin_lock_irq(&current->sighand->siglock);
2624 old = current->blocked.sig[0];
2626 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2627 sigmask(SIGSTOP)));
2628 recalc_sigpending();
2629 spin_unlock_irq(&current->sighand->siglock);
2631 return old;
2633 #endif /* __ARCH_WANT_SGETMASK */
2635 #ifdef __ARCH_WANT_SYS_SIGNAL
2637 * For backwards compatibility. Functionality superseded by sigaction.
2639 asmlinkage unsigned long
2640 sys_signal(int sig, __sighandler_t handler)
2642 struct k_sigaction new_sa, old_sa;
2643 int ret;
2645 new_sa.sa.sa_handler = handler;
2646 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2648 ret = do_sigaction(sig, &new_sa, &old_sa);
2650 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2652 #endif /* __ARCH_WANT_SYS_SIGNAL */
2654 #ifdef __ARCH_WANT_SYS_PAUSE
2656 asmlinkage long
2657 sys_pause(void)
2659 current->state = TASK_INTERRUPTIBLE;
2660 schedule();
2661 return -ERESTARTNOHAND;
2664 #endif
2666 void __init signals_init(void)
2668 sigqueue_cachep =
2669 kmem_cache_create("sigqueue",
2670 sizeof(struct sigqueue),
2671 __alignof__(struct sigqueue),
2672 SLAB_PANIC, NULL, NULL);