[ARM] 2960/1: collie: Add missing scoop call parameters
[linux-2.6/sactl.git] / kernel / signal.c
blobcba193ceda0db5ac793669440cd25dfc29968441
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
34 * SLAB caches for signal bits.
37 static kmem_cache_t *sigqueue_cachep;
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
113 #ifdef SIGEMT
114 #define M_SIGEMT M(SIGEMT)
115 #else
116 #define M_SIGEMT 0
117 #endif
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157 static int sig_ignored(struct task_struct *t, int sig)
159 void __user * handler;
162 * Tracers always want to know about signals..
164 if (t->ptrace & PT_PTRACED)
165 return 0;
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
170 * unblocked.
172 if (sigismember(&t->blocked, sig))
173 return 0;
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
187 unsigned long ready;
188 long i;
190 switch (_NSIG_WORDS) {
191 default:
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
194 break;
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
200 break;
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
208 return ready != 0;
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 if (t->signal->group_stop_count > 0 ||
216 (freezing(t)) ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
220 else
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
235 int sig = 0;
237 s = pending->signal.sig;
238 m = mask->sig;
239 switch (_NSIG_WORDS) {
240 default:
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 break;
246 break;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
249 sig = 1;
250 else if ((x = s[1] &~ m[1]) != 0)
251 sig = _NSIG_BPW + 1;
252 else
253 break;
254 sig += ffz(~x);
255 break;
257 case 1: if ((x = *s &~ *m) != 0)
258 sig = ffz(~x) + 1;
259 break;
262 return sig;
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266 int override_rlimit)
268 struct sigqueue *q = NULL;
270 atomic_inc(&t->user->sigpending);
271 if (override_rlimit ||
272 atomic_read(&t->user->sigpending) <=
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
275 if (unlikely(q == NULL)) {
276 atomic_dec(&t->user->sigpending);
277 } else {
278 INIT_LIST_HEAD(&q->list);
279 q->flags = 0;
280 q->lock = NULL;
281 q->user = get_uid(t->user);
283 return(q);
286 static inline void __sigqueue_free(struct sigqueue *q)
288 if (q->flags & SIGQUEUE_PREALLOC)
289 return;
290 atomic_dec(&q->user->sigpending);
291 free_uid(q->user);
292 kmem_cache_free(sigqueue_cachep, q);
295 static void flush_sigqueue(struct sigpending *queue)
297 struct sigqueue *q;
299 sigemptyset(&queue->signal);
300 while (!list_empty(&queue->list)) {
301 q = list_entry(queue->list.next, struct sigqueue , list);
302 list_del_init(&q->list);
303 __sigqueue_free(q);
308 * Flush all pending signals for a task.
311 void
312 flush_signals(struct task_struct *t)
314 unsigned long flags;
316 spin_lock_irqsave(&t->sighand->siglock, flags);
317 clear_tsk_thread_flag(t,TIF_SIGPENDING);
318 flush_sigqueue(&t->pending);
319 flush_sigqueue(&t->signal->shared_pending);
320 spin_unlock_irqrestore(&t->sighand->siglock, flags);
324 * This function expects the tasklist_lock write-locked.
326 void __exit_sighand(struct task_struct *tsk)
328 struct sighand_struct * sighand = tsk->sighand;
330 /* Ok, we're done with the signal handlers */
331 tsk->sighand = NULL;
332 if (atomic_dec_and_test(&sighand->count))
333 kmem_cache_free(sighand_cachep, sighand);
336 void exit_sighand(struct task_struct *tsk)
338 write_lock_irq(&tasklist_lock);
339 __exit_sighand(tsk);
340 write_unlock_irq(&tasklist_lock);
344 * This function expects the tasklist_lock write-locked.
346 void __exit_signal(struct task_struct *tsk)
348 struct signal_struct * sig = tsk->signal;
349 struct sighand_struct * sighand = tsk->sighand;
351 if (!sig)
352 BUG();
353 if (!atomic_read(&sig->count))
354 BUG();
355 spin_lock(&sighand->siglock);
356 posix_cpu_timers_exit(tsk);
357 if (atomic_dec_and_test(&sig->count)) {
358 posix_cpu_timers_exit_group(tsk);
359 if (tsk == sig->curr_target)
360 sig->curr_target = next_thread(tsk);
361 tsk->signal = NULL;
362 spin_unlock(&sighand->siglock);
363 flush_sigqueue(&sig->shared_pending);
364 } else {
366 * If there is any task waiting for the group exit
367 * then notify it:
369 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
370 wake_up_process(sig->group_exit_task);
371 sig->group_exit_task = NULL;
373 if (tsk == sig->curr_target)
374 sig->curr_target = next_thread(tsk);
375 tsk->signal = NULL;
377 * Accumulate here the counters for all threads but the
378 * group leader as they die, so they can be added into
379 * the process-wide totals when those are taken.
380 * The group leader stays around as a zombie as long
381 * as there are other threads. When it gets reaped,
382 * the exit.c code will add its counts into these totals.
383 * We won't ever get here for the group leader, since it
384 * will have been the last reference on the signal_struct.
386 sig->utime = cputime_add(sig->utime, tsk->utime);
387 sig->stime = cputime_add(sig->stime, tsk->stime);
388 sig->min_flt += tsk->min_flt;
389 sig->maj_flt += tsk->maj_flt;
390 sig->nvcsw += tsk->nvcsw;
391 sig->nivcsw += tsk->nivcsw;
392 sig->sched_time += tsk->sched_time;
393 spin_unlock(&sighand->siglock);
394 sig = NULL; /* Marker for below. */
396 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
397 flush_sigqueue(&tsk->pending);
398 if (sig) {
400 * We are cleaning up the signal_struct here. We delayed
401 * calling exit_itimers until after flush_sigqueue, just in
402 * case our thread-local pending queue contained a queued
403 * timer signal that would have been cleared in
404 * exit_itimers. When that called sigqueue_free, it would
405 * attempt to re-take the tasklist_lock and deadlock. This
406 * can never happen if we ensure that all queues the
407 * timer's signal might be queued on have been flushed
408 * first. The shared_pending queue, and our own pending
409 * queue are the only queues the timer could be on, since
410 * there are no other threads left in the group and timer
411 * signals are constrained to threads inside the group.
413 exit_itimers(sig);
414 exit_thread_group_keys(sig);
415 kmem_cache_free(signal_cachep, sig);
419 void exit_signal(struct task_struct *tsk)
421 write_lock_irq(&tasklist_lock);
422 __exit_signal(tsk);
423 write_unlock_irq(&tasklist_lock);
427 * Flush all handlers for a task.
430 void
431 flush_signal_handlers(struct task_struct *t, int force_default)
433 int i;
434 struct k_sigaction *ka = &t->sighand->action[0];
435 for (i = _NSIG ; i != 0 ; i--) {
436 if (force_default || ka->sa.sa_handler != SIG_IGN)
437 ka->sa.sa_handler = SIG_DFL;
438 ka->sa.sa_flags = 0;
439 sigemptyset(&ka->sa.sa_mask);
440 ka++;
445 /* Notify the system that a driver wants to block all signals for this
446 * process, and wants to be notified if any signals at all were to be
447 * sent/acted upon. If the notifier routine returns non-zero, then the
448 * signal will be acted upon after all. If the notifier routine returns 0,
449 * then then signal will be blocked. Only one block per process is
450 * allowed. priv is a pointer to private data that the notifier routine
451 * can use to determine if the signal should be blocked or not. */
453 void
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
456 unsigned long flags;
458 spin_lock_irqsave(&current->sighand->siglock, flags);
459 current->notifier_mask = mask;
460 current->notifier_data = priv;
461 current->notifier = notifier;
462 spin_unlock_irqrestore(&current->sighand->siglock, flags);
465 /* Notify the system that blocking has ended. */
467 void
468 unblock_all_signals(void)
470 unsigned long flags;
472 spin_lock_irqsave(&current->sighand->siglock, flags);
473 current->notifier = NULL;
474 current->notifier_data = NULL;
475 recalc_sigpending();
476 spin_unlock_irqrestore(&current->sighand->siglock, flags);
479 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
481 struct sigqueue *q, *first = NULL;
482 int still_pending = 0;
484 if (unlikely(!sigismember(&list->signal, sig)))
485 return 0;
488 * Collect the siginfo appropriate to this signal. Check if
489 * there is another siginfo for the same signal.
491 list_for_each_entry(q, &list->list, list) {
492 if (q->info.si_signo == sig) {
493 if (first) {
494 still_pending = 1;
495 break;
497 first = q;
500 if (first) {
501 list_del_init(&first->list);
502 copy_siginfo(info, &first->info);
503 __sigqueue_free(first);
504 if (!still_pending)
505 sigdelset(&list->signal, sig);
506 } else {
508 /* Ok, it wasn't in the queue. This must be
509 a fast-pathed signal or we must have been
510 out of queue space. So zero out the info.
512 sigdelset(&list->signal, sig);
513 info->si_signo = sig;
514 info->si_errno = 0;
515 info->si_code = 0;
516 info->si_pid = 0;
517 info->si_uid = 0;
519 return 1;
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523 siginfo_t *info)
525 int sig = 0;
527 /* SIGKILL must have priority, otherwise it is quite easy
528 * to create an unkillable process, sending sig < SIGKILL
529 * to self */
530 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
531 if (!sigismember(mask, SIGKILL))
532 sig = SIGKILL;
535 if (likely(!sig))
536 sig = next_signal(pending, mask);
537 if (sig) {
538 if (current->notifier) {
539 if (sigismember(current->notifier_mask, sig)) {
540 if (!(current->notifier)(current->notifier_data)) {
541 clear_thread_flag(TIF_SIGPENDING);
542 return 0;
547 if (!collect_signal(sig, pending, info))
548 sig = 0;
551 recalc_sigpending();
553 return sig;
557 * Dequeue a signal and return the element to the caller, which is
558 * expected to free it.
560 * All callers have to hold the siglock.
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
564 int signr = __dequeue_signal(&tsk->pending, mask, info);
565 if (!signr)
566 signr = __dequeue_signal(&tsk->signal->shared_pending,
567 mask, info);
568 if (signr && unlikely(sig_kernel_stop(signr))) {
570 * Set a marker that we have dequeued a stop signal. Our
571 * caller might release the siglock and then the pending
572 * stop signal it is about to process is no longer in the
573 * pending bitmasks, but must still be cleared by a SIGCONT
574 * (and overruled by a SIGKILL). So those cases clear this
575 * shared flag after we've set it. Note that this flag may
576 * remain set after the signal we return is ignored or
577 * handled. That doesn't matter because its only purpose
578 * is to alert stop-signal processing code when another
579 * processor has come along and cleared the flag.
581 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
582 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
584 if ( signr &&
585 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
586 info->si_sys_private){
588 * Release the siglock to ensure proper locking order
589 * of timer locks outside of siglocks. Note, we leave
590 * irqs disabled here, since the posix-timers code is
591 * about to disable them again anyway.
593 spin_unlock(&tsk->sighand->siglock);
594 do_schedule_next_timer(info);
595 spin_lock(&tsk->sighand->siglock);
597 return signr;
601 * Tell a process that it has a new active signal..
603 * NOTE! we rely on the previous spin_lock to
604 * lock interrupts for us! We can only be called with
605 * "siglock" held, and the local interrupt must
606 * have been disabled when that got acquired!
608 * No need to set need_resched since signal event passing
609 * goes through ->blocked
611 void signal_wake_up(struct task_struct *t, int resume)
613 unsigned int mask;
615 set_tsk_thread_flag(t, TIF_SIGPENDING);
618 * For SIGKILL, we want to wake it up in the stopped/traced case.
619 * We don't check t->state here because there is a race with it
620 * executing another processor and just now entering stopped state.
621 * By using wake_up_state, we ensure the process will wake up and
622 * handle its death signal.
624 mask = TASK_INTERRUPTIBLE;
625 if (resume)
626 mask |= TASK_STOPPED | TASK_TRACED;
627 if (!wake_up_state(t, mask))
628 kick_process(t);
632 * Remove signals in mask from the pending set and queue.
633 * Returns 1 if any signals were found.
635 * All callers must be holding the siglock.
637 static int rm_from_queue(unsigned long mask, struct sigpending *s)
639 struct sigqueue *q, *n;
641 if (!sigtestsetmask(&s->signal, mask))
642 return 0;
644 sigdelsetmask(&s->signal, mask);
645 list_for_each_entry_safe(q, n, &s->list, list) {
646 if (q->info.si_signo < SIGRTMIN &&
647 (mask & sigmask(q->info.si_signo))) {
648 list_del_init(&q->list);
649 __sigqueue_free(q);
652 return 1;
656 * Bad permissions for sending the signal
658 static int check_kill_permission(int sig, struct siginfo *info,
659 struct task_struct *t)
661 int error = -EINVAL;
662 if (!valid_signal(sig))
663 return error;
664 error = -EPERM;
665 if ((!info || ((unsigned long)info != 1 &&
666 (unsigned long)info != 2 && SI_FROMUSER(info)))
667 && ((sig != SIGCONT) ||
668 (current->signal->session != t->signal->session))
669 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
670 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
671 && !capable(CAP_KILL))
672 return error;
674 error = security_task_kill(t, info, sig);
675 if (!error)
676 audit_signal_info(sig, t); /* Let audit system see the signal */
677 return error;
680 /* forward decl */
681 static void do_notify_parent_cldstop(struct task_struct *tsk,
682 int to_self,
683 int why);
686 * Handle magic process-wide effects of stop/continue signals.
687 * Unlike the signal actions, these happen immediately at signal-generation
688 * time regardless of blocking, ignoring, or handling. This does the
689 * actual continuing for SIGCONT, but not the actual stopping for stop
690 * signals. The process stop is done as a signal action for SIG_DFL.
692 static void handle_stop_signal(int sig, struct task_struct *p)
694 struct task_struct *t;
696 if (p->signal->flags & SIGNAL_GROUP_EXIT)
698 * The process is in the middle of dying already.
700 return;
702 if (sig_kernel_stop(sig)) {
704 * This is a stop signal. Remove SIGCONT from all queues.
706 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
707 t = p;
708 do {
709 rm_from_queue(sigmask(SIGCONT), &t->pending);
710 t = next_thread(t);
711 } while (t != p);
712 } else if (sig == SIGCONT) {
714 * Remove all stop signals from all queues,
715 * and wake all threads.
717 if (unlikely(p->signal->group_stop_count > 0)) {
719 * There was a group stop in progress. We'll
720 * pretend it finished before we got here. We are
721 * obliged to report it to the parent: if the
722 * SIGSTOP happened "after" this SIGCONT, then it
723 * would have cleared this pending SIGCONT. If it
724 * happened "before" this SIGCONT, then the parent
725 * got the SIGCHLD about the stop finishing before
726 * the continue happened. We do the notification
727 * now, and it's as if the stop had finished and
728 * the SIGCHLD was pending on entry to this kill.
730 p->signal->group_stop_count = 0;
731 p->signal->flags = SIGNAL_STOP_CONTINUED;
732 spin_unlock(&p->sighand->siglock);
733 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
734 spin_lock(&p->sighand->siglock);
736 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
737 t = p;
738 do {
739 unsigned int state;
740 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
743 * If there is a handler for SIGCONT, we must make
744 * sure that no thread returns to user mode before
745 * we post the signal, in case it was the only
746 * thread eligible to run the signal handler--then
747 * it must not do anything between resuming and
748 * running the handler. With the TIF_SIGPENDING
749 * flag set, the thread will pause and acquire the
750 * siglock that we hold now and until we've queued
751 * the pending signal.
753 * Wake up the stopped thread _after_ setting
754 * TIF_SIGPENDING
756 state = TASK_STOPPED;
757 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
758 set_tsk_thread_flag(t, TIF_SIGPENDING);
759 state |= TASK_INTERRUPTIBLE;
761 wake_up_state(t, state);
763 t = next_thread(t);
764 } while (t != p);
766 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
768 * We were in fact stopped, and are now continued.
769 * Notify the parent with CLD_CONTINUED.
771 p->signal->flags = SIGNAL_STOP_CONTINUED;
772 p->signal->group_exit_code = 0;
773 spin_unlock(&p->sighand->siglock);
774 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
775 spin_lock(&p->sighand->siglock);
776 } else {
778 * We are not stopped, but there could be a stop
779 * signal in the middle of being processed after
780 * being removed from the queue. Clear that too.
782 p->signal->flags = 0;
784 } else if (sig == SIGKILL) {
786 * Make sure that any pending stop signal already dequeued
787 * is undone by the wakeup for SIGKILL.
789 p->signal->flags = 0;
793 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
794 struct sigpending *signals)
796 struct sigqueue * q = NULL;
797 int ret = 0;
800 * fast-pathed signals for kernel-internal things like SIGSTOP
801 * or SIGKILL.
803 if ((unsigned long)info == 2)
804 goto out_set;
806 /* Real-time signals must be queued if sent by sigqueue, or
807 some other real-time mechanism. It is implementation
808 defined whether kill() does so. We attempt to do so, on
809 the principle of least surprise, but since kill is not
810 allowed to fail with EAGAIN when low on memory we just
811 make sure at least one signal gets delivered and don't
812 pass on the info struct. */
814 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
815 ((unsigned long) info < 2 ||
816 info->si_code >= 0)));
817 if (q) {
818 list_add_tail(&q->list, &signals->list);
819 switch ((unsigned long) info) {
820 case 0:
821 q->info.si_signo = sig;
822 q->info.si_errno = 0;
823 q->info.si_code = SI_USER;
824 q->info.si_pid = current->pid;
825 q->info.si_uid = current->uid;
826 break;
827 case 1:
828 q->info.si_signo = sig;
829 q->info.si_errno = 0;
830 q->info.si_code = SI_KERNEL;
831 q->info.si_pid = 0;
832 q->info.si_uid = 0;
833 break;
834 default:
835 copy_siginfo(&q->info, info);
836 break;
838 } else {
839 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
840 && info->si_code != SI_USER)
842 * Queue overflow, abort. We may abort if the signal was rt
843 * and sent by user using something other than kill().
845 return -EAGAIN;
846 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
848 * Set up a return to indicate that we dropped
849 * the signal.
851 ret = info->si_sys_private;
854 out_set:
855 sigaddset(&signals->signal, sig);
856 return ret;
859 #define LEGACY_QUEUE(sigptr, sig) \
860 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
863 static int
864 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
866 int ret = 0;
868 if (!irqs_disabled())
869 BUG();
870 assert_spin_locked(&t->sighand->siglock);
872 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
874 * Set up a return to indicate that we dropped the signal.
876 ret = info->si_sys_private;
878 /* Short-circuit ignored signals. */
879 if (sig_ignored(t, sig))
880 goto out;
882 /* Support queueing exactly one non-rt signal, so that we
883 can get more detailed information about the cause of
884 the signal. */
885 if (LEGACY_QUEUE(&t->pending, sig))
886 goto out;
888 ret = send_signal(sig, info, t, &t->pending);
889 if (!ret && !sigismember(&t->blocked, sig))
890 signal_wake_up(t, sig == SIGKILL);
891 out:
892 return ret;
896 * Force a signal that the process can't ignore: if necessary
897 * we unblock the signal and change any SIG_IGN to SIG_DFL.
901 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
903 unsigned long int flags;
904 int ret;
906 spin_lock_irqsave(&t->sighand->siglock, flags);
907 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
908 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
909 sigdelset(&t->blocked, sig);
910 recalc_sigpending_tsk(t);
912 ret = specific_send_sig_info(sig, info, t);
913 spin_unlock_irqrestore(&t->sighand->siglock, flags);
915 return ret;
918 void
919 force_sig_specific(int sig, struct task_struct *t)
921 unsigned long int flags;
923 spin_lock_irqsave(&t->sighand->siglock, flags);
924 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
925 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
926 sigdelset(&t->blocked, sig);
927 recalc_sigpending_tsk(t);
928 specific_send_sig_info(sig, (void *)2, t);
929 spin_unlock_irqrestore(&t->sighand->siglock, flags);
933 * Test if P wants to take SIG. After we've checked all threads with this,
934 * it's equivalent to finding no threads not blocking SIG. Any threads not
935 * blocking SIG were ruled out because they are not running and already
936 * have pending signals. Such threads will dequeue from the shared queue
937 * as soon as they're available, so putting the signal on the shared queue
938 * will be equivalent to sending it to one such thread.
940 static inline int wants_signal(int sig, struct task_struct *p)
942 if (sigismember(&p->blocked, sig))
943 return 0;
944 if (p->flags & PF_EXITING)
945 return 0;
946 if (sig == SIGKILL)
947 return 1;
948 if (p->state & (TASK_STOPPED | TASK_TRACED))
949 return 0;
950 return task_curr(p) || !signal_pending(p);
953 static void
954 __group_complete_signal(int sig, struct task_struct *p)
956 struct task_struct *t;
959 * Now find a thread we can wake up to take the signal off the queue.
961 * If the main thread wants the signal, it gets first crack.
962 * Probably the least surprising to the average bear.
964 if (wants_signal(sig, p))
965 t = p;
966 else if (thread_group_empty(p))
968 * There is just one thread and it does not need to be woken.
969 * It will dequeue unblocked signals before it runs again.
971 return;
972 else {
974 * Otherwise try to find a suitable thread.
976 t = p->signal->curr_target;
977 if (t == NULL)
978 /* restart balancing at this thread */
979 t = p->signal->curr_target = p;
980 BUG_ON(t->tgid != p->tgid);
982 while (!wants_signal(sig, t)) {
983 t = next_thread(t);
984 if (t == p->signal->curr_target)
986 * No thread needs to be woken.
987 * Any eligible threads will see
988 * the signal in the queue soon.
990 return;
992 p->signal->curr_target = t;
996 * Found a killable thread. If the signal will be fatal,
997 * then start taking the whole group down immediately.
999 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1000 !sigismember(&t->real_blocked, sig) &&
1001 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1003 * This signal will be fatal to the whole group.
1005 if (!sig_kernel_coredump(sig)) {
1007 * Start a group exit and wake everybody up.
1008 * This way we don't have other threads
1009 * running and doing things after a slower
1010 * thread has the fatal signal pending.
1012 p->signal->flags = SIGNAL_GROUP_EXIT;
1013 p->signal->group_exit_code = sig;
1014 p->signal->group_stop_count = 0;
1015 t = p;
1016 do {
1017 sigaddset(&t->pending.signal, SIGKILL);
1018 signal_wake_up(t, 1);
1019 t = next_thread(t);
1020 } while (t != p);
1021 return;
1025 * There will be a core dump. We make all threads other
1026 * than the chosen one go into a group stop so that nothing
1027 * happens until it gets scheduled, takes the signal off
1028 * the shared queue, and does the core dump. This is a
1029 * little more complicated than strictly necessary, but it
1030 * keeps the signal state that winds up in the core dump
1031 * unchanged from the death state, e.g. which thread had
1032 * the core-dump signal unblocked.
1034 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1035 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1036 p->signal->group_stop_count = 0;
1037 p->signal->group_exit_task = t;
1038 t = p;
1039 do {
1040 p->signal->group_stop_count++;
1041 signal_wake_up(t, 0);
1042 t = next_thread(t);
1043 } while (t != p);
1044 wake_up_process(p->signal->group_exit_task);
1045 return;
1049 * The signal is already in the shared-pending queue.
1050 * Tell the chosen thread to wake up and dequeue it.
1052 signal_wake_up(t, sig == SIGKILL);
1053 return;
1057 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1059 int ret = 0;
1061 assert_spin_locked(&p->sighand->siglock);
1062 handle_stop_signal(sig, p);
1064 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1066 * Set up a return to indicate that we dropped the signal.
1068 ret = info->si_sys_private;
1070 /* Short-circuit ignored signals. */
1071 if (sig_ignored(p, sig))
1072 return ret;
1074 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1075 /* This is a non-RT signal and we already have one queued. */
1076 return ret;
1079 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1080 * We always use the shared queue for process-wide signals,
1081 * to avoid several races.
1083 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1084 if (unlikely(ret))
1085 return ret;
1087 __group_complete_signal(sig, p);
1088 return 0;
1092 * Nuke all other threads in the group.
1094 void zap_other_threads(struct task_struct *p)
1096 struct task_struct *t;
1098 p->signal->flags = SIGNAL_GROUP_EXIT;
1099 p->signal->group_stop_count = 0;
1101 if (thread_group_empty(p))
1102 return;
1104 for (t = next_thread(p); t != p; t = next_thread(t)) {
1106 * Don't bother with already dead threads
1108 if (t->exit_state)
1109 continue;
1112 * We don't want to notify the parent, since we are
1113 * killed as part of a thread group due to another
1114 * thread doing an execve() or similar. So set the
1115 * exit signal to -1 to allow immediate reaping of
1116 * the process. But don't detach the thread group
1117 * leader.
1119 if (t != p->group_leader)
1120 t->exit_signal = -1;
1122 sigaddset(&t->pending.signal, SIGKILL);
1123 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1124 signal_wake_up(t, 1);
1129 * Must be called with the tasklist_lock held for reading!
1131 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1133 unsigned long flags;
1134 int ret;
1136 ret = check_kill_permission(sig, info, p);
1137 if (!ret && sig && p->sighand) {
1138 spin_lock_irqsave(&p->sighand->siglock, flags);
1139 ret = __group_send_sig_info(sig, info, p);
1140 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1143 return ret;
1147 * kill_pg_info() sends a signal to a process group: this is what the tty
1148 * control characters do (^C, ^Z etc)
1151 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1153 struct task_struct *p = NULL;
1154 int retval, success;
1156 if (pgrp <= 0)
1157 return -EINVAL;
1159 success = 0;
1160 retval = -ESRCH;
1161 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1162 int err = group_send_sig_info(sig, info, p);
1163 success |= !err;
1164 retval = err;
1165 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1166 return success ? 0 : retval;
1170 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1172 int retval;
1174 read_lock(&tasklist_lock);
1175 retval = __kill_pg_info(sig, info, pgrp);
1176 read_unlock(&tasklist_lock);
1178 return retval;
1182 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1184 int error;
1185 struct task_struct *p;
1187 read_lock(&tasklist_lock);
1188 p = find_task_by_pid(pid);
1189 error = -ESRCH;
1190 if (p)
1191 error = group_send_sig_info(sig, info, p);
1192 read_unlock(&tasklist_lock);
1193 return error;
1198 * kill_something_info() interprets pid in interesting ways just like kill(2).
1200 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1201 * is probably wrong. Should make it like BSD or SYSV.
1204 static int kill_something_info(int sig, struct siginfo *info, int pid)
1206 if (!pid) {
1207 return kill_pg_info(sig, info, process_group(current));
1208 } else if (pid == -1) {
1209 int retval = 0, count = 0;
1210 struct task_struct * p;
1212 read_lock(&tasklist_lock);
1213 for_each_process(p) {
1214 if (p->pid > 1 && p->tgid != current->tgid) {
1215 int err = group_send_sig_info(sig, info, p);
1216 ++count;
1217 if (err != -EPERM)
1218 retval = err;
1221 read_unlock(&tasklist_lock);
1222 return count ? retval : -ESRCH;
1223 } else if (pid < 0) {
1224 return kill_pg_info(sig, info, -pid);
1225 } else {
1226 return kill_proc_info(sig, info, pid);
1231 * These are for backward compatibility with the rest of the kernel source.
1235 * These two are the most common entry points. They send a signal
1236 * just to the specific thread.
1239 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1241 int ret;
1242 unsigned long flags;
1245 * Make sure legacy kernel users don't send in bad values
1246 * (normal paths check this in check_kill_permission).
1248 if (!valid_signal(sig))
1249 return -EINVAL;
1252 * We need the tasklist lock even for the specific
1253 * thread case (when we don't need to follow the group
1254 * lists) in order to avoid races with "p->sighand"
1255 * going away or changing from under us.
1257 read_lock(&tasklist_lock);
1258 spin_lock_irqsave(&p->sighand->siglock, flags);
1259 ret = specific_send_sig_info(sig, info, p);
1260 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1261 read_unlock(&tasklist_lock);
1262 return ret;
1266 send_sig(int sig, struct task_struct *p, int priv)
1268 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1272 * This is the entry point for "process-wide" signals.
1273 * They will go to an appropriate thread in the thread group.
1276 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1278 int ret;
1279 read_lock(&tasklist_lock);
1280 ret = group_send_sig_info(sig, info, p);
1281 read_unlock(&tasklist_lock);
1282 return ret;
1285 void
1286 force_sig(int sig, struct task_struct *p)
1288 force_sig_info(sig, (void*)1L, p);
1292 * When things go south during signal handling, we
1293 * will force a SIGSEGV. And if the signal that caused
1294 * the problem was already a SIGSEGV, we'll want to
1295 * make sure we don't even try to deliver the signal..
1298 force_sigsegv(int sig, struct task_struct *p)
1300 if (sig == SIGSEGV) {
1301 unsigned long flags;
1302 spin_lock_irqsave(&p->sighand->siglock, flags);
1303 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1304 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1306 force_sig(SIGSEGV, p);
1307 return 0;
1311 kill_pg(pid_t pgrp, int sig, int priv)
1313 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1317 kill_proc(pid_t pid, int sig, int priv)
1319 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1323 * These functions support sending signals using preallocated sigqueue
1324 * structures. This is needed "because realtime applications cannot
1325 * afford to lose notifications of asynchronous events, like timer
1326 * expirations or I/O completions". In the case of Posix Timers
1327 * we allocate the sigqueue structure from the timer_create. If this
1328 * allocation fails we are able to report the failure to the application
1329 * with an EAGAIN error.
1332 struct sigqueue *sigqueue_alloc(void)
1334 struct sigqueue *q;
1336 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1337 q->flags |= SIGQUEUE_PREALLOC;
1338 return(q);
1341 void sigqueue_free(struct sigqueue *q)
1343 unsigned long flags;
1344 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1346 * If the signal is still pending remove it from the
1347 * pending queue.
1349 if (unlikely(!list_empty(&q->list))) {
1350 read_lock(&tasklist_lock);
1351 spin_lock_irqsave(q->lock, flags);
1352 if (!list_empty(&q->list))
1353 list_del_init(&q->list);
1354 spin_unlock_irqrestore(q->lock, flags);
1355 read_unlock(&tasklist_lock);
1357 q->flags &= ~SIGQUEUE_PREALLOC;
1358 __sigqueue_free(q);
1362 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1364 unsigned long flags;
1365 int ret = 0;
1367 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1368 read_lock(&tasklist_lock);
1370 if (unlikely(p->flags & PF_EXITING)) {
1371 ret = -1;
1372 goto out_err;
1375 spin_lock_irqsave(&p->sighand->siglock, flags);
1377 if (unlikely(!list_empty(&q->list))) {
1379 * If an SI_TIMER entry is already queue just increment
1380 * the overrun count.
1382 if (q->info.si_code != SI_TIMER)
1383 BUG();
1384 q->info.si_overrun++;
1385 goto out;
1387 /* Short-circuit ignored signals. */
1388 if (sig_ignored(p, sig)) {
1389 ret = 1;
1390 goto out;
1393 q->lock = &p->sighand->siglock;
1394 list_add_tail(&q->list, &p->pending.list);
1395 sigaddset(&p->pending.signal, sig);
1396 if (!sigismember(&p->blocked, sig))
1397 signal_wake_up(p, sig == SIGKILL);
1399 out:
1400 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1401 out_err:
1402 read_unlock(&tasklist_lock);
1404 return ret;
1408 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1410 unsigned long flags;
1411 int ret = 0;
1413 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1414 read_lock(&tasklist_lock);
1415 spin_lock_irqsave(&p->sighand->siglock, flags);
1416 handle_stop_signal(sig, p);
1418 /* Short-circuit ignored signals. */
1419 if (sig_ignored(p, sig)) {
1420 ret = 1;
1421 goto out;
1424 if (unlikely(!list_empty(&q->list))) {
1426 * If an SI_TIMER entry is already queue just increment
1427 * the overrun count. Other uses should not try to
1428 * send the signal multiple times.
1430 if (q->info.si_code != SI_TIMER)
1431 BUG();
1432 q->info.si_overrun++;
1433 goto out;
1437 * Put this signal on the shared-pending queue.
1438 * We always use the shared queue for process-wide signals,
1439 * to avoid several races.
1441 q->lock = &p->sighand->siglock;
1442 list_add_tail(&q->list, &p->signal->shared_pending.list);
1443 sigaddset(&p->signal->shared_pending.signal, sig);
1445 __group_complete_signal(sig, p);
1446 out:
1447 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1448 read_unlock(&tasklist_lock);
1449 return(ret);
1453 * Wake up any threads in the parent blocked in wait* syscalls.
1455 static inline void __wake_up_parent(struct task_struct *p,
1456 struct task_struct *parent)
1458 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1462 * Let a parent know about the death of a child.
1463 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1466 void do_notify_parent(struct task_struct *tsk, int sig)
1468 struct siginfo info;
1469 unsigned long flags;
1470 struct sighand_struct *psig;
1472 BUG_ON(sig == -1);
1474 /* do_notify_parent_cldstop should have been called instead. */
1475 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1477 BUG_ON(!tsk->ptrace &&
1478 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1480 info.si_signo = sig;
1481 info.si_errno = 0;
1482 info.si_pid = tsk->pid;
1483 info.si_uid = tsk->uid;
1485 /* FIXME: find out whether or not this is supposed to be c*time. */
1486 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1487 tsk->signal->utime));
1488 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1489 tsk->signal->stime));
1491 info.si_status = tsk->exit_code & 0x7f;
1492 if (tsk->exit_code & 0x80)
1493 info.si_code = CLD_DUMPED;
1494 else if (tsk->exit_code & 0x7f)
1495 info.si_code = CLD_KILLED;
1496 else {
1497 info.si_code = CLD_EXITED;
1498 info.si_status = tsk->exit_code >> 8;
1501 psig = tsk->parent->sighand;
1502 spin_lock_irqsave(&psig->siglock, flags);
1503 if (sig == SIGCHLD &&
1504 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1505 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1507 * We are exiting and our parent doesn't care. POSIX.1
1508 * defines special semantics for setting SIGCHLD to SIG_IGN
1509 * or setting the SA_NOCLDWAIT flag: we should be reaped
1510 * automatically and not left for our parent's wait4 call.
1511 * Rather than having the parent do it as a magic kind of
1512 * signal handler, we just set this to tell do_exit that we
1513 * can be cleaned up without becoming a zombie. Note that
1514 * we still call __wake_up_parent in this case, because a
1515 * blocked sys_wait4 might now return -ECHILD.
1517 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1518 * is implementation-defined: we do (if you don't want
1519 * it, just use SIG_IGN instead).
1521 tsk->exit_signal = -1;
1522 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1523 sig = 0;
1525 if (valid_signal(sig) && sig > 0)
1526 __group_send_sig_info(sig, &info, tsk->parent);
1527 __wake_up_parent(tsk, tsk->parent);
1528 spin_unlock_irqrestore(&psig->siglock, flags);
1531 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1533 struct siginfo info;
1534 unsigned long flags;
1535 struct task_struct *parent;
1536 struct sighand_struct *sighand;
1538 if (to_self)
1539 parent = tsk->parent;
1540 else {
1541 tsk = tsk->group_leader;
1542 parent = tsk->real_parent;
1545 info.si_signo = SIGCHLD;
1546 info.si_errno = 0;
1547 info.si_pid = tsk->pid;
1548 info.si_uid = tsk->uid;
1550 /* FIXME: find out whether or not this is supposed to be c*time. */
1551 info.si_utime = cputime_to_jiffies(tsk->utime);
1552 info.si_stime = cputime_to_jiffies(tsk->stime);
1554 info.si_code = why;
1555 switch (why) {
1556 case CLD_CONTINUED:
1557 info.si_status = SIGCONT;
1558 break;
1559 case CLD_STOPPED:
1560 info.si_status = tsk->signal->group_exit_code & 0x7f;
1561 break;
1562 case CLD_TRAPPED:
1563 info.si_status = tsk->exit_code & 0x7f;
1564 break;
1565 default:
1566 BUG();
1569 sighand = parent->sighand;
1570 spin_lock_irqsave(&sighand->siglock, flags);
1571 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1572 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1573 __group_send_sig_info(SIGCHLD, &info, parent);
1575 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1577 __wake_up_parent(tsk, parent);
1578 spin_unlock_irqrestore(&sighand->siglock, flags);
1582 * This must be called with current->sighand->siglock held.
1584 * This should be the path for all ptrace stops.
1585 * We always set current->last_siginfo while stopped here.
1586 * That makes it a way to test a stopped process for
1587 * being ptrace-stopped vs being job-control-stopped.
1589 * If we actually decide not to stop at all because the tracer is gone,
1590 * we leave nostop_code in current->exit_code.
1592 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1595 * If there is a group stop in progress,
1596 * we must participate in the bookkeeping.
1598 if (current->signal->group_stop_count > 0)
1599 --current->signal->group_stop_count;
1601 current->last_siginfo = info;
1602 current->exit_code = exit_code;
1604 /* Let the debugger run. */
1605 set_current_state(TASK_TRACED);
1606 spin_unlock_irq(&current->sighand->siglock);
1607 read_lock(&tasklist_lock);
1608 if (likely(current->ptrace & PT_PTRACED) &&
1609 likely(current->parent != current->real_parent ||
1610 !(current->ptrace & PT_ATTACHED)) &&
1611 (likely(current->parent->signal != current->signal) ||
1612 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1613 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1614 read_unlock(&tasklist_lock);
1615 schedule();
1616 } else {
1618 * By the time we got the lock, our tracer went away.
1619 * Don't stop here.
1621 read_unlock(&tasklist_lock);
1622 set_current_state(TASK_RUNNING);
1623 current->exit_code = nostop_code;
1627 * We are back. Now reacquire the siglock before touching
1628 * last_siginfo, so that we are sure to have synchronized with
1629 * any signal-sending on another CPU that wants to examine it.
1631 spin_lock_irq(&current->sighand->siglock);
1632 current->last_siginfo = NULL;
1635 * Queued signals ignored us while we were stopped for tracing.
1636 * So check for any that we should take before resuming user mode.
1638 recalc_sigpending();
1641 void ptrace_notify(int exit_code)
1643 siginfo_t info;
1645 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1647 memset(&info, 0, sizeof info);
1648 info.si_signo = SIGTRAP;
1649 info.si_code = exit_code;
1650 info.si_pid = current->pid;
1651 info.si_uid = current->uid;
1653 /* Let the debugger run. */
1654 spin_lock_irq(&current->sighand->siglock);
1655 ptrace_stop(exit_code, 0, &info);
1656 spin_unlock_irq(&current->sighand->siglock);
1659 static void
1660 finish_stop(int stop_count)
1662 int to_self;
1665 * If there are no other threads in the group, or if there is
1666 * a group stop in progress and we are the last to stop,
1667 * report to the parent. When ptraced, every thread reports itself.
1669 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1670 to_self = 1;
1671 else if (stop_count == 0)
1672 to_self = 0;
1673 else
1674 goto out;
1676 read_lock(&tasklist_lock);
1677 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1678 read_unlock(&tasklist_lock);
1680 out:
1681 schedule();
1683 * Now we don't run again until continued.
1685 current->exit_code = 0;
1689 * This performs the stopping for SIGSTOP and other stop signals.
1690 * We have to stop all threads in the thread group.
1691 * Returns nonzero if we've actually stopped and released the siglock.
1692 * Returns zero if we didn't stop and still hold the siglock.
1694 static int
1695 do_signal_stop(int signr)
1697 struct signal_struct *sig = current->signal;
1698 struct sighand_struct *sighand = current->sighand;
1699 int stop_count = -1;
1701 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1702 return 0;
1704 if (sig->group_stop_count > 0) {
1706 * There is a group stop in progress. We don't need to
1707 * start another one.
1709 signr = sig->group_exit_code;
1710 stop_count = --sig->group_stop_count;
1711 current->exit_code = signr;
1712 set_current_state(TASK_STOPPED);
1713 if (stop_count == 0)
1714 sig->flags = SIGNAL_STOP_STOPPED;
1715 spin_unlock_irq(&sighand->siglock);
1717 else if (thread_group_empty(current)) {
1719 * Lock must be held through transition to stopped state.
1721 current->exit_code = current->signal->group_exit_code = signr;
1722 set_current_state(TASK_STOPPED);
1723 sig->flags = SIGNAL_STOP_STOPPED;
1724 spin_unlock_irq(&sighand->siglock);
1726 else {
1728 * There is no group stop already in progress.
1729 * We must initiate one now, but that requires
1730 * dropping siglock to get both the tasklist lock
1731 * and siglock again in the proper order. Note that
1732 * this allows an intervening SIGCONT to be posted.
1733 * We need to check for that and bail out if necessary.
1735 struct task_struct *t;
1737 spin_unlock_irq(&sighand->siglock);
1739 /* signals can be posted during this window */
1741 read_lock(&tasklist_lock);
1742 spin_lock_irq(&sighand->siglock);
1744 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1746 * Another stop or continue happened while we
1747 * didn't have the lock. We can just swallow this
1748 * signal now. If we raced with a SIGCONT, that
1749 * should have just cleared it now. If we raced
1750 * with another processor delivering a stop signal,
1751 * then the SIGCONT that wakes us up should clear it.
1753 read_unlock(&tasklist_lock);
1754 return 0;
1757 if (sig->group_stop_count == 0) {
1758 sig->group_exit_code = signr;
1759 stop_count = 0;
1760 for (t = next_thread(current); t != current;
1761 t = next_thread(t))
1763 * Setting state to TASK_STOPPED for a group
1764 * stop is always done with the siglock held,
1765 * so this check has no races.
1767 if (!t->exit_state &&
1768 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1769 stop_count++;
1770 signal_wake_up(t, 0);
1772 sig->group_stop_count = stop_count;
1774 else {
1775 /* A race with another thread while unlocked. */
1776 signr = sig->group_exit_code;
1777 stop_count = --sig->group_stop_count;
1780 current->exit_code = signr;
1781 set_current_state(TASK_STOPPED);
1782 if (stop_count == 0)
1783 sig->flags = SIGNAL_STOP_STOPPED;
1785 spin_unlock_irq(&sighand->siglock);
1786 read_unlock(&tasklist_lock);
1789 finish_stop(stop_count);
1790 return 1;
1794 * Do appropriate magic when group_stop_count > 0.
1795 * We return nonzero if we stopped, after releasing the siglock.
1796 * We return zero if we still hold the siglock and should look
1797 * for another signal without checking group_stop_count again.
1799 static inline int handle_group_stop(void)
1801 int stop_count;
1803 if (current->signal->group_exit_task == current) {
1805 * Group stop is so we can do a core dump,
1806 * We are the initiating thread, so get on with it.
1808 current->signal->group_exit_task = NULL;
1809 return 0;
1812 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1814 * Group stop is so another thread can do a core dump,
1815 * or else we are racing against a death signal.
1816 * Just punt the stop so we can get the next signal.
1818 return 0;
1821 * There is a group stop in progress. We stop
1822 * without any associated signal being in our queue.
1824 stop_count = --current->signal->group_stop_count;
1825 if (stop_count == 0)
1826 current->signal->flags = SIGNAL_STOP_STOPPED;
1827 current->exit_code = current->signal->group_exit_code;
1828 set_current_state(TASK_STOPPED);
1829 spin_unlock_irq(&current->sighand->siglock);
1830 finish_stop(stop_count);
1831 return 1;
1834 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1835 struct pt_regs *regs, void *cookie)
1837 sigset_t *mask = &current->blocked;
1838 int signr = 0;
1840 relock:
1841 spin_lock_irq(&current->sighand->siglock);
1842 for (;;) {
1843 struct k_sigaction *ka;
1845 if (unlikely(current->signal->group_stop_count > 0) &&
1846 handle_group_stop())
1847 goto relock;
1849 signr = dequeue_signal(current, mask, info);
1851 if (!signr)
1852 break; /* will return 0 */
1854 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1855 ptrace_signal_deliver(regs, cookie);
1857 /* Let the debugger run. */
1858 ptrace_stop(signr, signr, info);
1860 /* We're back. Did the debugger cancel the sig? */
1861 signr = current->exit_code;
1862 if (signr == 0)
1863 continue;
1865 current->exit_code = 0;
1867 /* Update the siginfo structure if the signal has
1868 changed. If the debugger wanted something
1869 specific in the siginfo structure then it should
1870 have updated *info via PTRACE_SETSIGINFO. */
1871 if (signr != info->si_signo) {
1872 info->si_signo = signr;
1873 info->si_errno = 0;
1874 info->si_code = SI_USER;
1875 info->si_pid = current->parent->pid;
1876 info->si_uid = current->parent->uid;
1879 /* If the (new) signal is now blocked, requeue it. */
1880 if (sigismember(&current->blocked, signr)) {
1881 specific_send_sig_info(signr, info, current);
1882 continue;
1886 ka = &current->sighand->action[signr-1];
1887 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1888 continue;
1889 if (ka->sa.sa_handler != SIG_DFL) {
1890 /* Run the handler. */
1891 *return_ka = *ka;
1893 if (ka->sa.sa_flags & SA_ONESHOT)
1894 ka->sa.sa_handler = SIG_DFL;
1896 break; /* will return non-zero "signr" value */
1900 * Now we are doing the default action for this signal.
1902 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1903 continue;
1905 /* Init gets no signals it doesn't want. */
1906 if (current->pid == 1)
1907 continue;
1909 if (sig_kernel_stop(signr)) {
1911 * The default action is to stop all threads in
1912 * the thread group. The job control signals
1913 * do nothing in an orphaned pgrp, but SIGSTOP
1914 * always works. Note that siglock needs to be
1915 * dropped during the call to is_orphaned_pgrp()
1916 * because of lock ordering with tasklist_lock.
1917 * This allows an intervening SIGCONT to be posted.
1918 * We need to check for that and bail out if necessary.
1920 if (signr != SIGSTOP) {
1921 spin_unlock_irq(&current->sighand->siglock);
1923 /* signals can be posted during this window */
1925 if (is_orphaned_pgrp(process_group(current)))
1926 goto relock;
1928 spin_lock_irq(&current->sighand->siglock);
1931 if (likely(do_signal_stop(signr))) {
1932 /* It released the siglock. */
1933 goto relock;
1937 * We didn't actually stop, due to a race
1938 * with SIGCONT or something like that.
1940 continue;
1943 spin_unlock_irq(&current->sighand->siglock);
1946 * Anything else is fatal, maybe with a core dump.
1948 current->flags |= PF_SIGNALED;
1949 if (sig_kernel_coredump(signr)) {
1951 * If it was able to dump core, this kills all
1952 * other threads in the group and synchronizes with
1953 * their demise. If we lost the race with another
1954 * thread getting here, it set group_exit_code
1955 * first and our do_group_exit call below will use
1956 * that value and ignore the one we pass it.
1958 do_coredump((long)signr, signr, regs);
1962 * Death signals, no core dump.
1964 do_group_exit(signr);
1965 /* NOTREACHED */
1967 spin_unlock_irq(&current->sighand->siglock);
1968 return signr;
1971 EXPORT_SYMBOL(recalc_sigpending);
1972 EXPORT_SYMBOL_GPL(dequeue_signal);
1973 EXPORT_SYMBOL(flush_signals);
1974 EXPORT_SYMBOL(force_sig);
1975 EXPORT_SYMBOL(kill_pg);
1976 EXPORT_SYMBOL(kill_proc);
1977 EXPORT_SYMBOL(ptrace_notify);
1978 EXPORT_SYMBOL(send_sig);
1979 EXPORT_SYMBOL(send_sig_info);
1980 EXPORT_SYMBOL(sigprocmask);
1981 EXPORT_SYMBOL(block_all_signals);
1982 EXPORT_SYMBOL(unblock_all_signals);
1986 * System call entry points.
1989 asmlinkage long sys_restart_syscall(void)
1991 struct restart_block *restart = &current_thread_info()->restart_block;
1992 return restart->fn(restart);
1995 long do_no_restart_syscall(struct restart_block *param)
1997 return -EINTR;
2001 * We don't need to get the kernel lock - this is all local to this
2002 * particular thread.. (and that's good, because this is _heavily_
2003 * used by various programs)
2007 * This is also useful for kernel threads that want to temporarily
2008 * (or permanently) block certain signals.
2010 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2011 * interface happily blocks "unblockable" signals like SIGKILL
2012 * and friends.
2014 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2016 int error;
2017 sigset_t old_block;
2019 spin_lock_irq(&current->sighand->siglock);
2020 old_block = current->blocked;
2021 error = 0;
2022 switch (how) {
2023 case SIG_BLOCK:
2024 sigorsets(&current->blocked, &current->blocked, set);
2025 break;
2026 case SIG_UNBLOCK:
2027 signandsets(&current->blocked, &current->blocked, set);
2028 break;
2029 case SIG_SETMASK:
2030 current->blocked = *set;
2031 break;
2032 default:
2033 error = -EINVAL;
2035 recalc_sigpending();
2036 spin_unlock_irq(&current->sighand->siglock);
2037 if (oldset)
2038 *oldset = old_block;
2039 return error;
2042 asmlinkage long
2043 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2045 int error = -EINVAL;
2046 sigset_t old_set, new_set;
2048 /* XXX: Don't preclude handling different sized sigset_t's. */
2049 if (sigsetsize != sizeof(sigset_t))
2050 goto out;
2052 if (set) {
2053 error = -EFAULT;
2054 if (copy_from_user(&new_set, set, sizeof(*set)))
2055 goto out;
2056 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2058 error = sigprocmask(how, &new_set, &old_set);
2059 if (error)
2060 goto out;
2061 if (oset)
2062 goto set_old;
2063 } else if (oset) {
2064 spin_lock_irq(&current->sighand->siglock);
2065 old_set = current->blocked;
2066 spin_unlock_irq(&current->sighand->siglock);
2068 set_old:
2069 error = -EFAULT;
2070 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2071 goto out;
2073 error = 0;
2074 out:
2075 return error;
2078 long do_sigpending(void __user *set, unsigned long sigsetsize)
2080 long error = -EINVAL;
2081 sigset_t pending;
2083 if (sigsetsize > sizeof(sigset_t))
2084 goto out;
2086 spin_lock_irq(&current->sighand->siglock);
2087 sigorsets(&pending, &current->pending.signal,
2088 &current->signal->shared_pending.signal);
2089 spin_unlock_irq(&current->sighand->siglock);
2091 /* Outside the lock because only this thread touches it. */
2092 sigandsets(&pending, &current->blocked, &pending);
2094 error = -EFAULT;
2095 if (!copy_to_user(set, &pending, sigsetsize))
2096 error = 0;
2098 out:
2099 return error;
2102 asmlinkage long
2103 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2105 return do_sigpending(set, sigsetsize);
2108 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2110 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2112 int err;
2114 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2115 return -EFAULT;
2116 if (from->si_code < 0)
2117 return __copy_to_user(to, from, sizeof(siginfo_t))
2118 ? -EFAULT : 0;
2120 * If you change siginfo_t structure, please be sure
2121 * this code is fixed accordingly.
2122 * It should never copy any pad contained in the structure
2123 * to avoid security leaks, but must copy the generic
2124 * 3 ints plus the relevant union member.
2126 err = __put_user(from->si_signo, &to->si_signo);
2127 err |= __put_user(from->si_errno, &to->si_errno);
2128 err |= __put_user((short)from->si_code, &to->si_code);
2129 switch (from->si_code & __SI_MASK) {
2130 case __SI_KILL:
2131 err |= __put_user(from->si_pid, &to->si_pid);
2132 err |= __put_user(from->si_uid, &to->si_uid);
2133 break;
2134 case __SI_TIMER:
2135 err |= __put_user(from->si_tid, &to->si_tid);
2136 err |= __put_user(from->si_overrun, &to->si_overrun);
2137 err |= __put_user(from->si_ptr, &to->si_ptr);
2138 break;
2139 case __SI_POLL:
2140 err |= __put_user(from->si_band, &to->si_band);
2141 err |= __put_user(from->si_fd, &to->si_fd);
2142 break;
2143 case __SI_FAULT:
2144 err |= __put_user(from->si_addr, &to->si_addr);
2145 #ifdef __ARCH_SI_TRAPNO
2146 err |= __put_user(from->si_trapno, &to->si_trapno);
2147 #endif
2148 break;
2149 case __SI_CHLD:
2150 err |= __put_user(from->si_pid, &to->si_pid);
2151 err |= __put_user(from->si_uid, &to->si_uid);
2152 err |= __put_user(from->si_status, &to->si_status);
2153 err |= __put_user(from->si_utime, &to->si_utime);
2154 err |= __put_user(from->si_stime, &to->si_stime);
2155 break;
2156 case __SI_RT: /* This is not generated by the kernel as of now. */
2157 case __SI_MESGQ: /* But this is */
2158 err |= __put_user(from->si_pid, &to->si_pid);
2159 err |= __put_user(from->si_uid, &to->si_uid);
2160 err |= __put_user(from->si_ptr, &to->si_ptr);
2161 break;
2162 default: /* this is just in case for now ... */
2163 err |= __put_user(from->si_pid, &to->si_pid);
2164 err |= __put_user(from->si_uid, &to->si_uid);
2165 break;
2167 return err;
2170 #endif
2172 asmlinkage long
2173 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2174 siginfo_t __user *uinfo,
2175 const struct timespec __user *uts,
2176 size_t sigsetsize)
2178 int ret, sig;
2179 sigset_t these;
2180 struct timespec ts;
2181 siginfo_t info;
2182 long timeout = 0;
2184 /* XXX: Don't preclude handling different sized sigset_t's. */
2185 if (sigsetsize != sizeof(sigset_t))
2186 return -EINVAL;
2188 if (copy_from_user(&these, uthese, sizeof(these)))
2189 return -EFAULT;
2192 * Invert the set of allowed signals to get those we
2193 * want to block.
2195 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2196 signotset(&these);
2198 if (uts) {
2199 if (copy_from_user(&ts, uts, sizeof(ts)))
2200 return -EFAULT;
2201 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2202 || ts.tv_sec < 0)
2203 return -EINVAL;
2206 spin_lock_irq(&current->sighand->siglock);
2207 sig = dequeue_signal(current, &these, &info);
2208 if (!sig) {
2209 timeout = MAX_SCHEDULE_TIMEOUT;
2210 if (uts)
2211 timeout = (timespec_to_jiffies(&ts)
2212 + (ts.tv_sec || ts.tv_nsec));
2214 if (timeout) {
2215 /* None ready -- temporarily unblock those we're
2216 * interested while we are sleeping in so that we'll
2217 * be awakened when they arrive. */
2218 current->real_blocked = current->blocked;
2219 sigandsets(&current->blocked, &current->blocked, &these);
2220 recalc_sigpending();
2221 spin_unlock_irq(&current->sighand->siglock);
2223 timeout = schedule_timeout_interruptible(timeout);
2225 try_to_freeze();
2226 spin_lock_irq(&current->sighand->siglock);
2227 sig = dequeue_signal(current, &these, &info);
2228 current->blocked = current->real_blocked;
2229 siginitset(&current->real_blocked, 0);
2230 recalc_sigpending();
2233 spin_unlock_irq(&current->sighand->siglock);
2235 if (sig) {
2236 ret = sig;
2237 if (uinfo) {
2238 if (copy_siginfo_to_user(uinfo, &info))
2239 ret = -EFAULT;
2241 } else {
2242 ret = -EAGAIN;
2243 if (timeout)
2244 ret = -EINTR;
2247 return ret;
2250 asmlinkage long
2251 sys_kill(int pid, int sig)
2253 struct siginfo info;
2255 info.si_signo = sig;
2256 info.si_errno = 0;
2257 info.si_code = SI_USER;
2258 info.si_pid = current->tgid;
2259 info.si_uid = current->uid;
2261 return kill_something_info(sig, &info, pid);
2265 * sys_tgkill - send signal to one specific thread
2266 * @tgid: the thread group ID of the thread
2267 * @pid: the PID of the thread
2268 * @sig: signal to be sent
2270 * This syscall also checks the tgid and returns -ESRCH even if the PID
2271 * exists but it's not belonging to the target process anymore. This
2272 * method solves the problem of threads exiting and PIDs getting reused.
2274 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2276 struct siginfo info;
2277 int error;
2278 struct task_struct *p;
2280 /* This is only valid for single tasks */
2281 if (pid <= 0 || tgid <= 0)
2282 return -EINVAL;
2284 info.si_signo = sig;
2285 info.si_errno = 0;
2286 info.si_code = SI_TKILL;
2287 info.si_pid = current->tgid;
2288 info.si_uid = current->uid;
2290 read_lock(&tasklist_lock);
2291 p = find_task_by_pid(pid);
2292 error = -ESRCH;
2293 if (p && (p->tgid == tgid)) {
2294 error = check_kill_permission(sig, &info, p);
2296 * The null signal is a permissions and process existence
2297 * probe. No signal is actually delivered.
2299 if (!error && sig && p->sighand) {
2300 spin_lock_irq(&p->sighand->siglock);
2301 handle_stop_signal(sig, p);
2302 error = specific_send_sig_info(sig, &info, p);
2303 spin_unlock_irq(&p->sighand->siglock);
2306 read_unlock(&tasklist_lock);
2307 return error;
2311 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2313 asmlinkage long
2314 sys_tkill(int pid, int sig)
2316 struct siginfo info;
2317 int error;
2318 struct task_struct *p;
2320 /* This is only valid for single tasks */
2321 if (pid <= 0)
2322 return -EINVAL;
2324 info.si_signo = sig;
2325 info.si_errno = 0;
2326 info.si_code = SI_TKILL;
2327 info.si_pid = current->tgid;
2328 info.si_uid = current->uid;
2330 read_lock(&tasklist_lock);
2331 p = find_task_by_pid(pid);
2332 error = -ESRCH;
2333 if (p) {
2334 error = check_kill_permission(sig, &info, p);
2336 * The null signal is a permissions and process existence
2337 * probe. No signal is actually delivered.
2339 if (!error && sig && p->sighand) {
2340 spin_lock_irq(&p->sighand->siglock);
2341 handle_stop_signal(sig, p);
2342 error = specific_send_sig_info(sig, &info, p);
2343 spin_unlock_irq(&p->sighand->siglock);
2346 read_unlock(&tasklist_lock);
2347 return error;
2350 asmlinkage long
2351 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2353 siginfo_t info;
2355 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2356 return -EFAULT;
2358 /* Not even root can pretend to send signals from the kernel.
2359 Nor can they impersonate a kill(), which adds source info. */
2360 if (info.si_code >= 0)
2361 return -EPERM;
2362 info.si_signo = sig;
2364 /* POSIX.1b doesn't mention process groups. */
2365 return kill_proc_info(sig, &info, pid);
2369 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2371 struct k_sigaction *k;
2373 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2374 return -EINVAL;
2376 k = &current->sighand->action[sig-1];
2378 spin_lock_irq(&current->sighand->siglock);
2379 if (signal_pending(current)) {
2381 * If there might be a fatal signal pending on multiple
2382 * threads, make sure we take it before changing the action.
2384 spin_unlock_irq(&current->sighand->siglock);
2385 return -ERESTARTNOINTR;
2388 if (oact)
2389 *oact = *k;
2391 if (act) {
2393 * POSIX 3.3.1.3:
2394 * "Setting a signal action to SIG_IGN for a signal that is
2395 * pending shall cause the pending signal to be discarded,
2396 * whether or not it is blocked."
2398 * "Setting a signal action to SIG_DFL for a signal that is
2399 * pending and whose default action is to ignore the signal
2400 * (for example, SIGCHLD), shall cause the pending signal to
2401 * be discarded, whether or not it is blocked"
2403 if (act->sa.sa_handler == SIG_IGN ||
2404 (act->sa.sa_handler == SIG_DFL &&
2405 sig_kernel_ignore(sig))) {
2407 * This is a fairly rare case, so we only take the
2408 * tasklist_lock once we're sure we'll need it.
2409 * Now we must do this little unlock and relock
2410 * dance to maintain the lock hierarchy.
2412 struct task_struct *t = current;
2413 spin_unlock_irq(&t->sighand->siglock);
2414 read_lock(&tasklist_lock);
2415 spin_lock_irq(&t->sighand->siglock);
2416 *k = *act;
2417 sigdelsetmask(&k->sa.sa_mask,
2418 sigmask(SIGKILL) | sigmask(SIGSTOP));
2419 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2420 do {
2421 rm_from_queue(sigmask(sig), &t->pending);
2422 recalc_sigpending_tsk(t);
2423 t = next_thread(t);
2424 } while (t != current);
2425 spin_unlock_irq(&current->sighand->siglock);
2426 read_unlock(&tasklist_lock);
2427 return 0;
2430 *k = *act;
2431 sigdelsetmask(&k->sa.sa_mask,
2432 sigmask(SIGKILL) | sigmask(SIGSTOP));
2435 spin_unlock_irq(&current->sighand->siglock);
2436 return 0;
2439 int
2440 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2442 stack_t oss;
2443 int error;
2445 if (uoss) {
2446 oss.ss_sp = (void __user *) current->sas_ss_sp;
2447 oss.ss_size = current->sas_ss_size;
2448 oss.ss_flags = sas_ss_flags(sp);
2451 if (uss) {
2452 void __user *ss_sp;
2453 size_t ss_size;
2454 int ss_flags;
2456 error = -EFAULT;
2457 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2458 || __get_user(ss_sp, &uss->ss_sp)
2459 || __get_user(ss_flags, &uss->ss_flags)
2460 || __get_user(ss_size, &uss->ss_size))
2461 goto out;
2463 error = -EPERM;
2464 if (on_sig_stack(sp))
2465 goto out;
2467 error = -EINVAL;
2470 * Note - this code used to test ss_flags incorrectly
2471 * old code may have been written using ss_flags==0
2472 * to mean ss_flags==SS_ONSTACK (as this was the only
2473 * way that worked) - this fix preserves that older
2474 * mechanism
2476 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2477 goto out;
2479 if (ss_flags == SS_DISABLE) {
2480 ss_size = 0;
2481 ss_sp = NULL;
2482 } else {
2483 error = -ENOMEM;
2484 if (ss_size < MINSIGSTKSZ)
2485 goto out;
2488 current->sas_ss_sp = (unsigned long) ss_sp;
2489 current->sas_ss_size = ss_size;
2492 if (uoss) {
2493 error = -EFAULT;
2494 if (copy_to_user(uoss, &oss, sizeof(oss)))
2495 goto out;
2498 error = 0;
2499 out:
2500 return error;
2503 #ifdef __ARCH_WANT_SYS_SIGPENDING
2505 asmlinkage long
2506 sys_sigpending(old_sigset_t __user *set)
2508 return do_sigpending(set, sizeof(*set));
2511 #endif
2513 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2514 /* Some platforms have their own version with special arguments others
2515 support only sys_rt_sigprocmask. */
2517 asmlinkage long
2518 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2520 int error;
2521 old_sigset_t old_set, new_set;
2523 if (set) {
2524 error = -EFAULT;
2525 if (copy_from_user(&new_set, set, sizeof(*set)))
2526 goto out;
2527 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2529 spin_lock_irq(&current->sighand->siglock);
2530 old_set = current->blocked.sig[0];
2532 error = 0;
2533 switch (how) {
2534 default:
2535 error = -EINVAL;
2536 break;
2537 case SIG_BLOCK:
2538 sigaddsetmask(&current->blocked, new_set);
2539 break;
2540 case SIG_UNBLOCK:
2541 sigdelsetmask(&current->blocked, new_set);
2542 break;
2543 case SIG_SETMASK:
2544 current->blocked.sig[0] = new_set;
2545 break;
2548 recalc_sigpending();
2549 spin_unlock_irq(&current->sighand->siglock);
2550 if (error)
2551 goto out;
2552 if (oset)
2553 goto set_old;
2554 } else if (oset) {
2555 old_set = current->blocked.sig[0];
2556 set_old:
2557 error = -EFAULT;
2558 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2559 goto out;
2561 error = 0;
2562 out:
2563 return error;
2565 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2567 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2568 asmlinkage long
2569 sys_rt_sigaction(int sig,
2570 const struct sigaction __user *act,
2571 struct sigaction __user *oact,
2572 size_t sigsetsize)
2574 struct k_sigaction new_sa, old_sa;
2575 int ret = -EINVAL;
2577 /* XXX: Don't preclude handling different sized sigset_t's. */
2578 if (sigsetsize != sizeof(sigset_t))
2579 goto out;
2581 if (act) {
2582 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2583 return -EFAULT;
2586 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2588 if (!ret && oact) {
2589 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2590 return -EFAULT;
2592 out:
2593 return ret;
2595 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2597 #ifdef __ARCH_WANT_SYS_SGETMASK
2600 * For backwards compatibility. Functionality superseded by sigprocmask.
2602 asmlinkage long
2603 sys_sgetmask(void)
2605 /* SMP safe */
2606 return current->blocked.sig[0];
2609 asmlinkage long
2610 sys_ssetmask(int newmask)
2612 int old;
2614 spin_lock_irq(&current->sighand->siglock);
2615 old = current->blocked.sig[0];
2617 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2618 sigmask(SIGSTOP)));
2619 recalc_sigpending();
2620 spin_unlock_irq(&current->sighand->siglock);
2622 return old;
2624 #endif /* __ARCH_WANT_SGETMASK */
2626 #ifdef __ARCH_WANT_SYS_SIGNAL
2628 * For backwards compatibility. Functionality superseded by sigaction.
2630 asmlinkage unsigned long
2631 sys_signal(int sig, __sighandler_t handler)
2633 struct k_sigaction new_sa, old_sa;
2634 int ret;
2636 new_sa.sa.sa_handler = handler;
2637 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2639 ret = do_sigaction(sig, &new_sa, &old_sa);
2641 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2643 #endif /* __ARCH_WANT_SYS_SIGNAL */
2645 #ifdef __ARCH_WANT_SYS_PAUSE
2647 asmlinkage long
2648 sys_pause(void)
2650 current->state = TASK_INTERRUPTIBLE;
2651 schedule();
2652 return -ERESTARTNOHAND;
2655 #endif
2657 void __init signals_init(void)
2659 sigqueue_cachep =
2660 kmem_cache_create("sigqueue",
2661 sizeof(struct sigqueue),
2662 __alignof__(struct sigqueue),
2663 SLAB_PANIC, NULL, NULL);