[PATCH] C99 designated initializers for arch/sh
[linux-2.6/history.git] / kernel / signal.c
blobb037b12ce04ba8349ec2effa78eb1adc81c9c190
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #define __KERNEL_SYSCALLS__
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/unistd.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/fs.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <asm/param.h>
22 #include <asm/uaccess.h>
23 #include <asm/siginfo.h>
26 * SLAB caches for signal bits.
29 static kmem_cache_t *sigqueue_cachep;
31 atomic_t nr_queued_signals;
32 int max_queued_signals = 1024;
34 /*********************************************************
36 POSIX thread group signal behavior:
38 ----------------------------------------------------------
39 | | userspace | kernel |
40 ----------------------------------------------------------
41 | SIGHUP | load-balance | kill-all |
42 | SIGINT | load-balance | kill-all |
43 | SIGQUIT | load-balance | kill-all+core |
44 | SIGILL | specific | kill-all+core |
45 | SIGTRAP | specific | kill-all+core |
46 | SIGABRT/SIGIOT | specific | kill-all+core |
47 | SIGBUS | specific | kill-all+core |
48 | SIGFPE | specific | kill-all+core |
49 | SIGKILL | n/a | kill-all |
50 | SIGUSR1 | load-balance | kill-all |
51 | SIGSEGV | specific | kill-all+core |
52 | SIGUSR2 | load-balance | kill-all |
53 | SIGPIPE | specific | kill-all |
54 | SIGALRM | load-balance | kill-all |
55 | SIGTERM | load-balance | kill-all |
56 | SIGCHLD | load-balance | ignore |
57 | SIGCONT | specific | continue-all |
58 | SIGSTOP | n/a | stop-all |
59 | SIGTSTP | load-balance | stop-all |
60 | SIGTTIN | load-balance | stop-all |
61 | SIGTTOU | load-balance | stop-all |
62 | SIGURG | load-balance | ignore |
63 | SIGXCPU | specific | kill-all+core |
64 | SIGXFSZ | specific | kill-all+core |
65 | SIGVTALRM | load-balance | kill-all |
66 | SIGPROF | specific | kill-all |
67 | SIGPOLL/SIGIO | load-balance | kill-all |
68 | SIGSYS/SIGUNUSED | specific | kill-all+core |
69 | SIGSTKFLT | specific | kill-all |
70 | SIGWINCH | load-balance | ignore |
71 | SIGPWR | load-balance | kill-all |
72 | SIGRTMIN-SIGRTMAX | load-balance | kill-all |
73 ----------------------------------------------------------
75 non-POSIX signal thread group behavior:
77 ----------------------------------------------------------
78 | | userspace | kernel |
79 ----------------------------------------------------------
80 | SIGEMT | specific | kill-all+core |
81 ----------------------------------------------------------
84 /* Some systems do not have a SIGSTKFLT and the kernel never
85 * generates such signals anyways.
87 #ifdef SIGSTKFLT
88 #define M_SIGSTKFLT M(SIGSTKFLT)
89 #else
90 #define M_SIGSTKFLT 0
91 #endif
93 #ifdef SIGEMT
94 #define M_SIGEMT M(SIGEMT)
95 #else
96 #define M_SIGEMT 0
97 #endif
99 #define M(sig) (1UL << (sig))
101 #define SIG_USER_SPECIFIC_MASK (\
102 M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | \
103 M(SIGFPE) | M(SIGSEGV) | M(SIGPIPE) | M(SIGXFSZ) | \
104 M(SIGPROF) | M(SIGSYS) | M_SIGSTKFLT | M(SIGCONT) | \
105 M_SIGEMT )
107 #define SIG_USER_LOAD_BALANCE_MASK (\
108 M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGUSR1) | \
109 M(SIGUSR2) | M(SIGALRM) | M(SIGTERM) | M(SIGCHLD) | \
110 M(SIGURG) | M(SIGVTALRM) | M(SIGPOLL) | M(SIGWINCH) | \
111 M(SIGPWR) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
113 #define SIG_KERNEL_SPECIFIC_MASK (\
114 M(SIGCHLD) | M(SIGURG) | M(SIGWINCH) )
116 #define SIG_KERNEL_BROADCAST_MASK (\
117 M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGILL) | \
118 M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | M(SIGFPE) | \
119 M(SIGKILL) | M(SIGUSR1) | M(SIGSEGV) | M(SIGUSR2) | \
120 M(SIGPIPE) | M(SIGALRM) | M(SIGTERM) | M(SIGXCPU) | \
121 M(SIGXFSZ) | M(SIGVTALRM) | M(SIGPROF) | M(SIGPOLL) | \
122 M(SIGSYS) | M_SIGSTKFLT | M(SIGPWR) | M(SIGCONT) | \
123 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) | \
124 M_SIGEMT )
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_COREDUMP_MASK (\
130 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
131 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
132 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
134 #define T(sig, mask) \
135 ((1UL << (sig)) & mask)
137 #define sig_user_specific(sig) \
138 (((sig) < SIGRTMIN) && T(sig, SIG_USER_SPECIFIC_MASK))
139 #define sig_user_load_balance(sig) \
140 (((sig) >= SIGRTMIN) || T(sig, SIG_USER_LOAD_BALANCE_MASK))
141 #define sig_kernel_specific(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_SPECIFIC_MASK))
143 #define sig_kernel_broadcast(sig) \
144 (((sig) >= SIGRTMIN) || T(sig, SIG_KERNEL_BROADCAST_MASK))
145 #define sig_kernel_only(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
147 #define sig_kernel_coredump(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
150 #define sig_user_defined(t, sig) \
151 (((t)->sig->action[(sig)-1].sa.sa_handler != SIG_DFL) && \
152 ((t)->sig->action[(sig)-1].sa.sa_handler != SIG_IGN))
154 #define sig_ignored(t, sig) \
155 (((sig) != SIGCHLD) && \
156 ((t)->sig->action[(sig)-1].sa.sa_handler == SIG_IGN))
158 static int
159 __send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
161 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
163 void recalc_sigpending_tsk(struct task_struct *t)
165 if (PENDING(&t->pending, &t->blocked) ||
166 PENDING(&t->sig->shared_pending, &t->blocked))
167 set_tsk_thread_flag(t, TIF_SIGPENDING);
168 else
169 clear_tsk_thread_flag(t, TIF_SIGPENDING);
172 void recalc_sigpending(void)
174 if (PENDING(&current->pending, &current->blocked) ||
175 PENDING(&current->sig->shared_pending, &current->blocked))
176 set_thread_flag(TIF_SIGPENDING);
177 else
178 clear_thread_flag(TIF_SIGPENDING);
181 /* Given the mask, find the first available signal that should be serviced. */
183 static int
184 next_signal(struct sigpending *pending, sigset_t *mask)
186 unsigned long i, *s, *m, x;
187 int sig = 0;
189 s = pending->signal.sig;
190 m = mask->sig;
191 switch (_NSIG_WORDS) {
192 default:
193 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
194 if ((x = *s &~ *m) != 0) {
195 sig = ffz(~x) + i*_NSIG_BPW + 1;
196 break;
198 break;
200 case 2: if ((x = s[0] &~ m[0]) != 0)
201 sig = 1;
202 else if ((x = s[1] &~ m[1]) != 0)
203 sig = _NSIG_BPW + 1;
204 else
205 break;
206 sig += ffz(~x);
207 break;
209 case 1: if ((x = *s &~ *m) != 0)
210 sig = ffz(~x) + 1;
211 break;
214 return sig;
217 static void flush_sigqueue(struct sigpending *queue)
219 struct sigqueue *q, *n;
221 sigemptyset(&queue->signal);
222 q = queue->head;
223 queue->head = NULL;
224 queue->tail = &queue->head;
226 while (q) {
227 n = q->next;
228 kmem_cache_free(sigqueue_cachep, q);
229 atomic_dec(&nr_queued_signals);
230 q = n;
235 * Flush all pending signals for a task.
238 void
239 flush_signals(struct task_struct *t)
241 clear_tsk_thread_flag(t,TIF_SIGPENDING);
242 flush_sigqueue(&t->pending);
246 * This function expects the tasklist_lock write-locked.
248 void __exit_sighand(struct task_struct *tsk)
250 struct signal_struct * sig = tsk->sig;
252 if (!sig)
253 BUG();
254 if (!atomic_read(&sig->count))
255 BUG();
256 spin_lock(&sig->siglock);
257 if (atomic_dec_and_test(&sig->count)) {
258 if (tsk == sig->curr_target)
259 sig->curr_target = next_thread(tsk);
260 tsk->sig = NULL;
261 spin_unlock(&sig->siglock);
262 flush_sigqueue(&sig->shared_pending);
263 kmem_cache_free(sigact_cachep, sig);
264 } else {
266 * If there is any task waiting for the group exit
267 * then notify it:
269 if (sig->group_exit_task && atomic_read(&sig->count) <= 2) {
270 wake_up_process(sig->group_exit_task);
271 sig->group_exit_task = NULL;
273 if (tsk == sig->curr_target)
274 sig->curr_target = next_thread(tsk);
275 tsk->sig = NULL;
276 spin_unlock(&sig->siglock);
278 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
279 flush_sigqueue(&tsk->pending);
282 void exit_sighand(struct task_struct *tsk)
284 write_lock_irq(&tasklist_lock);
285 __exit_sighand(tsk);
286 write_unlock_irq(&tasklist_lock);
290 * Flush all handlers for a task.
293 void
294 flush_signal_handlers(struct task_struct *t)
296 int i;
297 struct k_sigaction *ka = &t->sig->action[0];
298 for (i = _NSIG ; i != 0 ; i--) {
299 if (ka->sa.sa_handler != SIG_IGN)
300 ka->sa.sa_handler = SIG_DFL;
301 ka->sa.sa_flags = 0;
302 sigemptyset(&ka->sa.sa_mask);
303 ka++;
308 * sig_exit - cause the current task to exit due to a signal.
311 void
312 sig_exit(int sig, int exit_code, struct siginfo *info)
314 sigaddset(&current->pending.signal, sig);
315 recalc_sigpending();
316 current->flags |= PF_SIGNALED;
318 if (current->sig->group_exit)
319 exit_code = current->sig->group_exit_code;
321 do_exit(exit_code);
322 /* NOTREACHED */
325 /* Notify the system that a driver wants to block all signals for this
326 * process, and wants to be notified if any signals at all were to be
327 * sent/acted upon. If the notifier routine returns non-zero, then the
328 * signal will be acted upon after all. If the notifier routine returns 0,
329 * then then signal will be blocked. Only one block per process is
330 * allowed. priv is a pointer to private data that the notifier routine
331 * can use to determine if the signal should be blocked or not. */
333 void
334 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
336 unsigned long flags;
338 spin_lock_irqsave(&current->sig->siglock, flags);
339 current->notifier_mask = mask;
340 current->notifier_data = priv;
341 current->notifier = notifier;
342 spin_unlock_irqrestore(&current->sig->siglock, flags);
345 /* Notify the system that blocking has ended. */
347 void
348 unblock_all_signals(void)
350 unsigned long flags;
352 spin_lock_irqsave(&current->sig->siglock, flags);
353 current->notifier = NULL;
354 current->notifier_data = NULL;
355 recalc_sigpending();
356 spin_unlock_irqrestore(&current->sig->siglock, flags);
359 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
361 if (sigismember(&list->signal, sig)) {
362 /* Collect the siginfo appropriate to this signal. */
363 struct sigqueue *q, **pp;
364 pp = &list->head;
365 while ((q = *pp) != NULL) {
366 if (q->info.si_signo == sig)
367 goto found_it;
368 pp = &q->next;
371 /* Ok, it wasn't in the queue. This must be
372 a fast-pathed signal or we must have been
373 out of queue space. So zero out the info.
375 sigdelset(&list->signal, sig);
376 info->si_signo = sig;
377 info->si_errno = 0;
378 info->si_code = 0;
379 info->si_pid = 0;
380 info->si_uid = 0;
381 return 1;
383 found_it:
384 if ((*pp = q->next) == NULL)
385 list->tail = pp;
387 /* Copy the sigqueue information and free the queue entry */
388 copy_siginfo(info, &q->info);
389 kmem_cache_free(sigqueue_cachep,q);
390 atomic_dec(&nr_queued_signals);
392 /* Non-RT signals can exist multiple times.. */
393 if (sig >= SIGRTMIN) {
394 while ((q = *pp) != NULL) {
395 if (q->info.si_signo == sig)
396 goto found_another;
397 pp = &q->next;
401 sigdelset(&list->signal, sig);
402 found_another:
403 return 1;
405 return 0;
408 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
409 siginfo_t *info)
411 int sig = 0;
413 sig = next_signal(pending, mask);
414 if (sig) {
415 if (current->notifier) {
416 if (sigismember(current->notifier_mask, sig)) {
417 if (!(current->notifier)(current->notifier_data)) {
418 clear_thread_flag(TIF_SIGPENDING);
419 return 0;
424 if (!collect_signal(sig, pending, info))
425 sig = 0;
427 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
428 we need to xchg out the timer overrun values. */
430 recalc_sigpending();
432 return sig;
436 * Dequeue a signal and return the element to the caller, which is
437 * expected to free it.
439 * All callers have to hold the siglock.
441 int dequeue_signal(sigset_t *mask, siginfo_t *info)
444 * Here we handle shared pending signals. To implement the full
445 * semantics we need to unqueue and resend them. It will likely
446 * get into our own pending queue.
448 if (current->sig->shared_pending.head) {
449 int signr = __dequeue_signal(&current->sig->shared_pending, mask, info);
450 if (signr)
451 __send_sig_info(signr, info, current);
453 return __dequeue_signal(&current->pending, mask, info);
456 static int rm_from_queue(int sig, struct sigpending *s)
458 struct sigqueue *q, **pp;
460 if (!sigismember(&s->signal, sig))
461 return 0;
463 sigdelset(&s->signal, sig);
465 pp = &s->head;
467 while ((q = *pp) != NULL) {
468 if (q->info.si_signo == sig) {
469 if ((*pp = q->next) == NULL)
470 s->tail = pp;
471 kmem_cache_free(sigqueue_cachep,q);
472 atomic_dec(&nr_queued_signals);
473 continue;
475 pp = &q->next;
477 return 1;
481 * Remove signal sig from t->pending.
482 * Returns 1 if sig was found.
484 * All callers must be holding the siglock.
486 static int rm_sig_from_queue(int sig, struct task_struct *t)
488 return rm_from_queue(sig, &t->pending);
492 * Bad permissions for sending the signal
494 static inline int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
496 return (!info || ((unsigned long)info != 1 &&
497 (unsigned long)info != 2 && SI_FROMUSER(info)))
498 && ((sig != SIGCONT) || (current->session != t->session))
499 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
500 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
501 && !capable(CAP_KILL);
505 * Signal type:
506 * < 0 : global action (kill - spread to all non-blocked threads)
507 * = 0 : ignored
508 * > 0 : wake up.
510 static int signal_type(int sig, struct signal_struct *signals)
512 unsigned long handler;
514 if (!signals)
515 return 0;
517 handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
518 if (handler > 1)
519 return 1;
521 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
522 if (handler == 1)
523 return sig == SIGCHLD;
525 /* Default handler. Normally lethal, but.. */
526 switch (sig) {
528 /* Ignored */
529 case SIGCONT: case SIGWINCH:
530 case SIGCHLD: case SIGURG:
531 return 0;
533 /* Implicit behaviour */
534 case SIGTSTP: case SIGTTIN: case SIGTTOU:
535 return 1;
537 /* Implicit actions (kill or do special stuff) */
538 default:
539 return -1;
545 * Determine whether a signal should be posted or not.
547 * Signals with SIG_IGN can be ignored, except for the
548 * special case of a SIGCHLD.
550 * Some signals with SIG_DFL default to a non-action.
552 static int ignored_signal(int sig, struct task_struct *t)
554 /* Don't ignore traced or blocked signals */
555 if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
556 return 0;
558 return signal_type(sig, t->sig) == 0;
562 * Handle TASK_STOPPED cases etc implicit behaviour
563 * of certain magical signals.
565 * SIGKILL gets spread out to every thread.
567 static void handle_stop_signal(int sig, struct task_struct *t)
569 switch (sig) {
570 case SIGKILL: case SIGCONT:
571 /* Wake up the process if stopped. */
572 if (t->state == TASK_STOPPED)
573 wake_up_process(t);
574 t->exit_code = 0;
575 rm_sig_from_queue(SIGSTOP, t);
576 rm_sig_from_queue(SIGTSTP, t);
577 rm_sig_from_queue(SIGTTOU, t);
578 rm_sig_from_queue(SIGTTIN, t);
579 break;
581 case SIGSTOP: case SIGTSTP:
582 case SIGTTIN: case SIGTTOU:
583 /* If we're stopping again, cancel SIGCONT */
584 rm_sig_from_queue(SIGCONT, t);
585 break;
589 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
591 struct sigqueue * q = NULL;
594 * fast-pathed signals for kernel-internal things like SIGSTOP
595 * or SIGKILL.
597 if ((unsigned long)info == 2)
598 goto out_set;
600 /* Real-time signals must be queued if sent by sigqueue, or
601 some other real-time mechanism. It is implementation
602 defined whether kill() does so. We attempt to do so, on
603 the principle of least surprise, but since kill is not
604 allowed to fail with EAGAIN when low on memory we just
605 make sure at least one signal gets delivered and don't
606 pass on the info struct. */
608 if (atomic_read(&nr_queued_signals) < max_queued_signals)
609 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
611 if (q) {
612 atomic_inc(&nr_queued_signals);
613 q->next = NULL;
614 *signals->tail = q;
615 signals->tail = &q->next;
616 switch ((unsigned long) info) {
617 case 0:
618 q->info.si_signo = sig;
619 q->info.si_errno = 0;
620 q->info.si_code = SI_USER;
621 q->info.si_pid = current->pid;
622 q->info.si_uid = current->uid;
623 break;
624 case 1:
625 q->info.si_signo = sig;
626 q->info.si_errno = 0;
627 q->info.si_code = SI_KERNEL;
628 q->info.si_pid = 0;
629 q->info.si_uid = 0;
630 break;
631 default:
632 copy_siginfo(&q->info, info);
633 break;
635 } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
636 && info->si_code != SI_USER)
638 * Queue overflow, abort. We may abort if the signal was rt
639 * and sent by user using something other than kill().
641 return -EAGAIN;
643 out_set:
644 sigaddset(&signals->signal, sig);
645 return 0;
649 * Tell a process that it has a new active signal..
651 * NOTE! we rely on the previous spin_lock to
652 * lock interrupts for us! We can only be called with
653 * "siglock" held, and the local interrupt must
654 * have been disabled when that got acquired!
656 * No need to set need_resched since signal event passing
657 * goes through ->blocked
659 inline void signal_wake_up(struct task_struct *t)
661 set_tsk_thread_flag(t,TIF_SIGPENDING);
664 * If the task is running on a different CPU
665 * force a reschedule on the other CPU to make
666 * it notice the new signal quickly.
668 * The code below is a tad loose and might occasionally
669 * kick the wrong CPU if we catch the process in the
670 * process of changing - but no harm is done by that
671 * other than doing an extra (lightweight) IPI interrupt.
673 if (t->state == TASK_RUNNING)
674 kick_if_running(t);
675 if (t->state & TASK_INTERRUPTIBLE) {
676 wake_up_process(t);
677 return;
681 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
683 int retval = send_signal(sig, info, &t->pending);
685 if (!retval && !sigismember(&t->blocked, sig))
686 signal_wake_up(t);
688 return retval;
691 static int
692 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t, int shared)
694 int ret;
696 if (!irqs_disabled())
697 BUG();
698 #if CONFIG_SMP
699 if (!spin_is_locked(&t->sig->siglock))
700 BUG();
701 #endif
702 ret = -EINVAL;
703 if (sig < 0 || sig > _NSIG)
704 goto out;
705 /* The somewhat baroque permissions check... */
706 ret = -EPERM;
707 if (bad_signal(sig, info, t))
708 goto out;
709 ret = security_ops->task_kill(t, info, sig);
710 if (ret)
711 goto out;
713 /* The null signal is a permissions and process existence probe.
714 No signal is actually delivered. Same goes for zombies. */
715 ret = 0;
716 if (!sig || !t->sig)
717 goto out;
719 handle_stop_signal(sig, t);
721 /* Optimize away the signal, if it's a signal that can be
722 handled immediately (ie non-blocked and untraced) and
723 that is ignored (either explicitly or by default). */
725 if (ignored_signal(sig, t))
726 goto out;
728 #define LEGACY_QUEUE(sigptr, sig) \
729 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
731 if (!shared) {
732 /* Support queueing exactly one non-rt signal, so that we
733 can get more detailed information about the cause of
734 the signal. */
735 if (LEGACY_QUEUE(&t->pending, sig))
736 goto out;
738 ret = deliver_signal(sig, info, t);
739 } else {
740 if (LEGACY_QUEUE(&t->sig->shared_pending, sig))
741 goto out;
742 ret = send_signal(sig, info, &t->sig->shared_pending);
744 out:
745 return ret;
749 * Force a signal that the process can't ignore: if necessary
750 * we unblock the signal and change any SIG_IGN to SIG_DFL.
754 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
756 unsigned long int flags;
757 int ret;
759 spin_lock_irqsave(&t->sig->siglock, flags);
760 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
761 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
762 sigdelset(&t->blocked, sig);
763 recalc_sigpending_tsk(t);
764 ret = __send_sig_info(sig, info, t);
765 spin_unlock_irqrestore(&t->sig->siglock, flags);
767 return ret;
770 static int
771 specific_force_sig_info(int sig, struct task_struct *t)
773 if (!t->sig)
774 return -ESRCH;
776 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
777 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
778 sigdelset(&t->blocked, sig);
779 recalc_sigpending_tsk(t);
781 return specific_send_sig_info(sig, (void *)2, t, 0);
784 #define can_take_signal(p, sig) \
785 (((unsigned long) p->sig->action[sig-1].sa.sa_handler > 1) && \
786 !sigismember(&p->blocked, sig) && (task_curr(p) || !signal_pending(p)))
788 static inline
789 int load_balance_thread_group(struct task_struct *p, int sig,
790 struct siginfo *info)
792 struct task_struct *tmp;
793 int ret;
796 * if the specified thread is not blocking this signal
797 * then deliver it.
799 if (can_take_signal(p, sig))
800 return specific_send_sig_info(sig, info, p, 0);
803 * Otherwise try to find a suitable thread.
804 * If no such thread is found then deliver to
805 * the original thread.
808 tmp = p->sig->curr_target;
810 if (!tmp || tmp->tgid != p->tgid)
811 /* restart balancing at this thread */
812 p->sig->curr_target = p;
814 else for (;;) {
815 if (thread_group_empty(p))
816 BUG();
817 if (!tmp || tmp->tgid != p->tgid)
818 BUG();
821 * Do not send signals that are ignored or blocked,
822 * or to not-running threads that are overworked:
824 if (!can_take_signal(tmp, sig)) {
825 tmp = next_thread(tmp);
826 p->sig->curr_target = tmp;
827 if (tmp == p)
828 break;
829 continue;
831 ret = specific_send_sig_info(sig, info, tmp, 0);
832 return ret;
835 * No suitable thread was found - put the signal
836 * into the shared-pending queue.
838 return specific_send_sig_info(sig, info, p, 1);
841 int __broadcast_thread_group(struct task_struct *p, int sig)
843 struct task_struct *tmp;
844 struct list_head *l;
845 struct pid *pid;
846 int err = 0;
848 for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid)
849 err = specific_force_sig_info(sig, tmp);
851 return err;
854 struct task_struct * find_unblocked_thread(struct task_struct *p, int signr)
856 struct task_struct *tmp;
857 struct list_head *l;
858 struct pid *pid;
860 for_each_task_pid(p->tgid, PIDTYPE_TGID, tmp, l, pid)
861 if (!sigismember(&tmp->blocked, signr))
862 return tmp;
863 return NULL;
866 static int
867 __send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
869 struct task_struct *t;
870 int ret = 0;
872 #if CONFIG_SMP
873 if (!spin_is_locked(&p->sig->siglock))
874 BUG();
875 #endif
876 /* not a thread group - normal signal behavior */
877 if (thread_group_empty(p) || !sig)
878 goto out_send;
880 if (sig_user_defined(p, sig)) {
881 if (sig_user_specific(sig))
882 goto out_send;
883 if (sig_user_load_balance(sig)) {
884 ret = load_balance_thread_group(p, sig, info);
885 goto out_unlock;
888 /* must not happen */
889 BUG();
891 /* optimize away ignored signals: */
892 if (sig_ignored(p, sig))
893 goto out_unlock;
895 if (sig_kernel_specific(sig))
896 goto out_send;
898 /* Does any of the threads unblock the signal? */
899 t = find_unblocked_thread(p, sig);
900 if (!t) {
901 ret = specific_send_sig_info(sig, info, p, 1);
902 goto out_unlock;
904 if (sigismember(&t->real_blocked,sig)) {
905 ret = specific_send_sig_info(sig, info, t, 0);
906 goto out_unlock;
908 if (sig_kernel_broadcast(sig) || sig_kernel_coredump(sig)) {
909 ret = __broadcast_thread_group(p, sig);
910 goto out_unlock;
913 /* must not happen */
914 BUG();
915 out_send:
916 ret = specific_send_sig_info(sig, info, p, 0);
917 out_unlock:
918 return ret;
922 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
924 unsigned long flags;
925 int ret;
927 spin_lock_irqsave(&p->sig->siglock, flags);
928 ret = __send_sig_info(sig, info, p);
929 spin_unlock_irqrestore(&p->sig->siglock, flags);
931 return ret;
935 * kill_pg_info() sends a signal to a process group: this is what the tty
936 * control characters do (^C, ^Z etc)
939 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
941 struct task_struct *p;
942 struct list_head *l;
943 struct pid *pid;
944 int err, retval = -ESRCH;
946 if (pgrp <= 0)
947 return -EINVAL;
949 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
950 err = send_sig_info(sig, info, p);
951 if (retval)
952 retval = err;
954 return retval;
958 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
960 int retval;
962 read_lock(&tasklist_lock);
963 retval = __kill_pg_info(sig, info, pgrp);
964 read_unlock(&tasklist_lock);
966 return retval;
970 * kill_sl_info() sends a signal to the session leader: this is used
971 * to send SIGHUP to the controlling process of a terminal when
972 * the connection is lost.
977 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
979 int err, retval = -EINVAL;
980 struct pid *pid;
981 struct list_head *l;
982 struct task_struct *p;
984 if (sid <= 0)
985 goto out;
987 retval = -ESRCH;
988 read_lock(&tasklist_lock);
989 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
990 if (!p->leader)
991 continue;
992 err = send_sig_info(sig, info, p);
993 if (retval)
994 retval = err;
996 read_unlock(&tasklist_lock);
997 out:
998 return retval;
1002 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1004 int error;
1005 struct task_struct *p;
1007 read_lock(&tasklist_lock);
1008 p = find_task_by_pid(pid);
1009 error = -ESRCH;
1010 if (p)
1011 error = send_sig_info(sig, info, p);
1012 read_unlock(&tasklist_lock);
1013 return error;
1018 * kill_something_info() interprets pid in interesting ways just like kill(2).
1020 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1021 * is probably wrong. Should make it like BSD or SYSV.
1024 static int kill_something_info(int sig, struct siginfo *info, int pid)
1026 if (!pid) {
1027 return kill_pg_info(sig, info, current->pgrp);
1028 } else if (pid == -1) {
1029 int retval = 0, count = 0;
1030 struct task_struct * p;
1032 read_lock(&tasklist_lock);
1033 for_each_process(p) {
1034 if (p->pid > 1 && p != current) {
1035 int err = send_sig_info(sig, info, p);
1036 ++count;
1037 if (err != -EPERM)
1038 retval = err;
1041 read_unlock(&tasklist_lock);
1042 return count ? retval : -ESRCH;
1043 } else if (pid < 0) {
1044 return kill_pg_info(sig, info, -pid);
1045 } else {
1046 return kill_proc_info(sig, info, pid);
1051 * These are for backward compatibility with the rest of the kernel source.
1055 send_sig(int sig, struct task_struct *p, int priv)
1057 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1060 void
1061 force_sig(int sig, struct task_struct *p)
1063 force_sig_info(sig, (void*)1L, p);
1067 kill_pg(pid_t pgrp, int sig, int priv)
1069 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1073 kill_sl(pid_t sess, int sig, int priv)
1075 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1079 kill_proc(pid_t pid, int sig, int priv)
1081 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1085 * Joy. Or not. Pthread wants us to wake up every thread
1086 * in our parent group.
1088 static inline void __wake_up_parent(struct task_struct *p)
1090 struct task_struct *parent = p->parent, *tsk = parent;
1093 * Fortunately this is not necessary for thread groups:
1095 if (p->tgid == tsk->tgid) {
1096 wake_up_interruptible(&tsk->wait_chldexit);
1097 return;
1100 do {
1101 wake_up_interruptible(&tsk->wait_chldexit);
1102 tsk = next_thread(tsk);
1103 if (tsk->sig != parent->sig)
1104 BUG();
1105 } while (tsk != parent);
1109 * Let a parent know about a status change of a child.
1112 void do_notify_parent(struct task_struct *tsk, int sig)
1114 struct siginfo info;
1115 unsigned long flags;
1116 int why, status;
1118 if (sig == -1)
1119 BUG();
1121 info.si_signo = sig;
1122 info.si_errno = 0;
1123 info.si_pid = tsk->pid;
1124 info.si_uid = tsk->uid;
1126 /* FIXME: find out whether or not this is supposed to be c*time. */
1127 info.si_utime = tsk->utime;
1128 info.si_stime = tsk->stime;
1130 status = tsk->exit_code & 0x7f;
1131 why = SI_KERNEL; /* shouldn't happen */
1132 switch (tsk->state) {
1133 case TASK_STOPPED:
1134 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1135 if (tsk->ptrace & PT_PTRACED)
1136 why = CLD_TRAPPED;
1137 else
1138 why = CLD_STOPPED;
1139 break;
1141 default:
1142 if (tsk->exit_code & 0x80)
1143 why = CLD_DUMPED;
1144 else if (tsk->exit_code & 0x7f)
1145 why = CLD_KILLED;
1146 else {
1147 why = CLD_EXITED;
1148 status = tsk->exit_code >> 8;
1150 break;
1152 info.si_code = why;
1153 info.si_status = status;
1155 spin_lock_irqsave(&tsk->parent->sig->siglock, flags);
1156 __send_sig_info(sig, &info, tsk->parent);
1157 __wake_up_parent(tsk);
1158 spin_unlock_irqrestore(&tsk->parent->sig->siglock, flags);
1163 * We need the tasklist lock because it's the only
1164 * thing that protects out "parent" pointer.
1166 * exit.c calls "do_notify_parent()" directly, because
1167 * it already has the tasklist lock.
1169 void
1170 notify_parent(struct task_struct *tsk, int sig)
1172 if (sig != -1) {
1173 read_lock(&tasklist_lock);
1174 do_notify_parent(tsk, sig);
1175 read_unlock(&tasklist_lock);
1179 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1181 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs)
1183 sigset_t *mask = &current->blocked;
1185 for (;;) {
1186 unsigned long signr = 0;
1187 struct k_sigaction *ka;
1189 spin_lock_irq(&current->sig->siglock);
1190 signr = dequeue_signal(mask, info);
1191 spin_unlock_irq(&current->sig->siglock);
1193 if (!signr)
1194 break;
1196 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1197 /* Let the debugger run. */
1198 current->exit_code = signr;
1199 set_current_state(TASK_STOPPED);
1200 notify_parent(current, SIGCHLD);
1201 schedule();
1203 /* We're back. Did the debugger cancel the sig? */
1204 signr = current->exit_code;
1205 if (signr == 0)
1206 continue;
1207 current->exit_code = 0;
1209 /* The debugger continued. Ignore SIGSTOP. */
1210 if (signr == SIGSTOP)
1211 continue;
1213 /* Update the siginfo structure. Is this good? */
1214 if (signr != info->si_signo) {
1215 info->si_signo = signr;
1216 info->si_errno = 0;
1217 info->si_code = SI_USER;
1218 info->si_pid = current->parent->pid;
1219 info->si_uid = current->parent->uid;
1222 /* If the (new) signal is now blocked, requeue it. */
1223 if (sigismember(&current->blocked, signr)) {
1224 send_sig_info(signr, info, current);
1225 continue;
1229 ka = &current->sig->action[signr-1];
1230 if (ka->sa.sa_handler == SIG_IGN) {
1231 if (signr != SIGCHLD)
1232 continue;
1233 /* Check for SIGCHLD: it's special. */
1234 while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
1235 /* nothing */;
1236 continue;
1239 if (ka->sa.sa_handler == SIG_DFL) {
1240 int exit_code = signr;
1242 /* Init gets no signals it doesn't want. */
1243 if (current->pid == 1)
1244 continue;
1246 switch (signr) {
1247 case SIGCONT: case SIGCHLD: case SIGWINCH: case SIGURG:
1248 continue;
1250 case SIGTSTP: case SIGTTIN: case SIGTTOU:
1251 if (is_orphaned_pgrp(current->pgrp))
1252 continue;
1253 /* FALLTHRU */
1255 case SIGSTOP: {
1256 struct signal_struct *sig;
1257 set_current_state(TASK_STOPPED);
1258 current->exit_code = signr;
1259 sig = current->parent->sig;
1260 if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1261 notify_parent(current, SIGCHLD);
1262 schedule();
1263 continue;
1266 case SIGQUIT: case SIGILL: case SIGTRAP:
1267 case SIGABRT: case SIGFPE: case SIGSEGV:
1268 case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
1269 if (do_coredump(signr, regs))
1270 exit_code |= 0x80;
1271 /* FALLTHRU */
1273 default:
1274 sig_exit(signr, exit_code, info);
1275 /* NOTREACHED */
1278 return signr;
1280 return 0;
1283 #endif
1285 EXPORT_SYMBOL(recalc_sigpending);
1286 EXPORT_SYMBOL_GPL(dequeue_signal);
1287 EXPORT_SYMBOL(flush_signals);
1288 EXPORT_SYMBOL(force_sig);
1289 EXPORT_SYMBOL(force_sig_info);
1290 EXPORT_SYMBOL(kill_pg);
1291 EXPORT_SYMBOL(kill_pg_info);
1292 EXPORT_SYMBOL(kill_proc);
1293 EXPORT_SYMBOL(kill_proc_info);
1294 EXPORT_SYMBOL(kill_sl);
1295 EXPORT_SYMBOL(kill_sl_info);
1296 EXPORT_SYMBOL(notify_parent);
1297 EXPORT_SYMBOL(send_sig);
1298 EXPORT_SYMBOL(send_sig_info);
1299 EXPORT_SYMBOL(block_all_signals);
1300 EXPORT_SYMBOL(unblock_all_signals);
1304 * System call entry points.
1308 * We don't need to get the kernel lock - this is all local to this
1309 * particular thread.. (and that's good, because this is _heavily_
1310 * used by various programs)
1313 asmlinkage long
1314 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
1316 int error = -EINVAL;
1317 sigset_t old_set, new_set;
1319 /* XXX: Don't preclude handling different sized sigset_t's. */
1320 if (sigsetsize != sizeof(sigset_t))
1321 goto out;
1323 if (set) {
1324 error = -EFAULT;
1325 if (copy_from_user(&new_set, set, sizeof(*set)))
1326 goto out;
1327 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1329 spin_lock_irq(&current->sig->siglock);
1330 old_set = current->blocked;
1332 error = 0;
1333 switch (how) {
1334 default:
1335 error = -EINVAL;
1336 break;
1337 case SIG_BLOCK:
1338 sigorsets(&new_set, &old_set, &new_set);
1339 break;
1340 case SIG_UNBLOCK:
1341 signandsets(&new_set, &old_set, &new_set);
1342 break;
1343 case SIG_SETMASK:
1344 break;
1347 current->blocked = new_set;
1348 recalc_sigpending();
1349 spin_unlock_irq(&current->sig->siglock);
1350 if (error)
1351 goto out;
1352 if (oset)
1353 goto set_old;
1354 } else if (oset) {
1355 spin_lock_irq(&current->sig->siglock);
1356 old_set = current->blocked;
1357 spin_unlock_irq(&current->sig->siglock);
1359 set_old:
1360 error = -EFAULT;
1361 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1362 goto out;
1364 error = 0;
1365 out:
1366 return error;
1369 long do_sigpending(void *set, unsigned long sigsetsize)
1371 long error = -EINVAL;
1372 sigset_t pending;
1374 if (sigsetsize > sizeof(sigset_t))
1375 goto out;
1377 spin_lock_irq(&current->sig->siglock);
1378 sigandsets(&pending, &current->blocked, &current->pending.signal);
1379 spin_unlock_irq(&current->sig->siglock);
1381 error = -EFAULT;
1382 if (!copy_to_user(set, &pending, sigsetsize))
1383 error = 0;
1384 out:
1385 return error;
1388 asmlinkage long
1389 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
1391 return do_sigpending(set, sigsetsize);
1394 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1396 int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
1398 int err;
1400 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1401 return -EFAULT;
1402 if (from->si_code < 0)
1403 return __copy_to_user(to, from, sizeof(siginfo_t))
1404 ? -EFAULT : 0;
1406 * If you change siginfo_t structure, please be sure
1407 * this code is fixed accordingly.
1408 * It should never copy any pad contained in the structure
1409 * to avoid security leaks, but must copy the generic
1410 * 3 ints plus the relevant union member.
1412 err = __put_user(from->si_signo, &to->si_signo);
1413 err |= __put_user(from->si_errno, &to->si_errno);
1414 err |= __put_user((short)from->si_code, &to->si_code);
1415 switch (from->si_code & __SI_MASK) {
1416 case __SI_KILL:
1417 err |= __put_user(from->si_pid, &to->si_pid);
1418 err |= __put_user(from->si_uid, &to->si_uid);
1419 break;
1420 case __SI_TIMER:
1421 err |= __put_user(from->si_timer1, &to->si_timer1);
1422 err |= __put_user(from->si_timer2, &to->si_timer2);
1423 break;
1424 case __SI_POLL:
1425 err |= __put_user(from->si_band, &to->si_band);
1426 err |= __put_user(from->si_fd, &to->si_fd);
1427 break;
1428 case __SI_FAULT:
1429 err |= __put_user(from->si_addr, &to->si_addr);
1430 break;
1431 case __SI_CHLD:
1432 err |= __put_user(from->si_pid, &to->si_pid);
1433 err |= __put_user(from->si_uid, &to->si_uid);
1434 err |= __put_user(from->si_status, &to->si_status);
1435 err |= __put_user(from->si_utime, &to->si_utime);
1436 err |= __put_user(from->si_stime, &to->si_stime);
1437 break;
1438 case __SI_RT: /* This is not generated by the kernel as of now. */
1439 err |= __put_user(from->si_pid, &to->si_pid);
1440 err |= __put_user(from->si_uid, &to->si_uid);
1441 err |= __put_user(from->si_int, &to->si_int);
1442 err |= __put_user(from->si_ptr, &to->si_ptr);
1443 break;
1444 default: /* this is just in case for now ... */
1445 err |= __put_user(from->si_pid, &to->si_pid);
1446 err |= __put_user(from->si_uid, &to->si_uid);
1447 break;
1449 return err;
1452 #endif
1454 asmlinkage long
1455 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
1456 const struct timespec *uts, size_t sigsetsize)
1458 int ret, sig;
1459 sigset_t these;
1460 struct timespec ts;
1461 siginfo_t info;
1462 long timeout = 0;
1464 /* XXX: Don't preclude handling different sized sigset_t's. */
1465 if (sigsetsize != sizeof(sigset_t))
1466 return -EINVAL;
1468 if (copy_from_user(&these, uthese, sizeof(these)))
1469 return -EFAULT;
1472 * Invert the set of allowed signals to get those we
1473 * want to block.
1475 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
1476 signotset(&these);
1478 if (uts) {
1479 if (copy_from_user(&ts, uts, sizeof(ts)))
1480 return -EFAULT;
1481 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
1482 || ts.tv_sec < 0)
1483 return -EINVAL;
1486 spin_lock_irq(&current->sig->siglock);
1487 sig = dequeue_signal(&these, &info);
1488 if (!sig) {
1489 timeout = MAX_SCHEDULE_TIMEOUT;
1490 if (uts)
1491 timeout = (timespec_to_jiffies(&ts)
1492 + (ts.tv_sec || ts.tv_nsec));
1494 if (timeout) {
1495 /* None ready -- temporarily unblock those we're
1496 * interested while we are sleeping in so that we'll
1497 * be awakened when they arrive. */
1498 current->real_blocked = current->blocked;
1499 sigandsets(&current->blocked, &current->blocked, &these);
1500 recalc_sigpending();
1501 spin_unlock_irq(&current->sig->siglock);
1503 current->state = TASK_INTERRUPTIBLE;
1504 timeout = schedule_timeout(timeout);
1506 spin_lock_irq(&current->sig->siglock);
1507 sig = dequeue_signal(&these, &info);
1508 current->blocked = current->real_blocked;
1509 siginitset(&current->real_blocked, 0);
1510 recalc_sigpending();
1513 spin_unlock_irq(&current->sig->siglock);
1515 if (sig) {
1516 ret = sig;
1517 if (uinfo) {
1518 if (copy_siginfo_to_user(uinfo, &info))
1519 ret = -EFAULT;
1521 } else {
1522 ret = -EAGAIN;
1523 if (timeout)
1524 ret = -EINTR;
1527 return ret;
1530 asmlinkage long
1531 sys_kill(int pid, int sig)
1533 struct siginfo info;
1535 info.si_signo = sig;
1536 info.si_errno = 0;
1537 info.si_code = SI_USER;
1538 info.si_pid = current->pid;
1539 info.si_uid = current->uid;
1541 return kill_something_info(sig, &info, pid);
1545 * Send a signal to only one task, even if it's a CLONE_THREAD task.
1547 asmlinkage long
1548 sys_tkill(int pid, int sig)
1550 struct siginfo info;
1551 int error;
1552 struct task_struct *p;
1554 /* This is only valid for single tasks */
1555 if (pid <= 0)
1556 return -EINVAL;
1558 info.si_signo = sig;
1559 info.si_errno = 0;
1560 info.si_code = SI_TKILL;
1561 info.si_pid = current->pid;
1562 info.si_uid = current->uid;
1564 read_lock(&tasklist_lock);
1565 p = find_task_by_pid(pid);
1566 error = -ESRCH;
1567 if (p) {
1568 spin_lock_irq(&p->sig->siglock);
1569 error = specific_send_sig_info(sig, &info, p, 0);
1570 spin_unlock_irq(&p->sig->siglock);
1572 read_unlock(&tasklist_lock);
1573 return error;
1576 asmlinkage long
1577 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1579 siginfo_t info;
1581 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1582 return -EFAULT;
1584 /* Not even root can pretend to send signals from the kernel.
1585 Nor can they impersonate a kill(), which adds source info. */
1586 if (info.si_code >= 0)
1587 return -EPERM;
1588 info.si_signo = sig;
1590 /* POSIX.1b doesn't mention process groups. */
1591 return kill_proc_info(sig, &info, pid);
1595 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1597 struct k_sigaction *k;
1599 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
1600 return -EINVAL;
1602 k = &current->sig->action[sig-1];
1604 spin_lock_irq(&current->sig->siglock);
1606 if (oact)
1607 *oact = *k;
1609 if (act) {
1610 *k = *act;
1611 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1614 * POSIX 3.3.1.3:
1615 * "Setting a signal action to SIG_IGN for a signal that is
1616 * pending shall cause the pending signal to be discarded,
1617 * whether or not it is blocked."
1619 * "Setting a signal action to SIG_DFL for a signal that is
1620 * pending and whose default action is to ignore the signal
1621 * (for example, SIGCHLD), shall cause the pending signal to
1622 * be discarded, whether or not it is blocked"
1624 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1625 * signal isn't actually ignored, but does automatic child
1626 * reaping, while SIG_DFL is explicitly said by POSIX to force
1627 * the signal to be ignored.
1630 if (k->sa.sa_handler == SIG_IGN
1631 || (k->sa.sa_handler == SIG_DFL
1632 && (sig == SIGCONT ||
1633 sig == SIGCHLD ||
1634 sig == SIGWINCH ||
1635 sig == SIGURG))) {
1636 if (rm_sig_from_queue(sig, current))
1637 recalc_sigpending();
1641 spin_unlock_irq(&current->sig->siglock);
1642 return 0;
1645 int
1646 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1648 stack_t oss;
1649 int error;
1651 if (uoss) {
1652 oss.ss_sp = (void *) current->sas_ss_sp;
1653 oss.ss_size = current->sas_ss_size;
1654 oss.ss_flags = sas_ss_flags(sp);
1657 if (uss) {
1658 void *ss_sp;
1659 size_t ss_size;
1660 int ss_flags;
1662 error = -EFAULT;
1663 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1664 || __get_user(ss_sp, &uss->ss_sp)
1665 || __get_user(ss_flags, &uss->ss_flags)
1666 || __get_user(ss_size, &uss->ss_size))
1667 goto out;
1669 error = -EPERM;
1670 if (on_sig_stack (sp))
1671 goto out;
1673 error = -EINVAL;
1676 * Note - this code used to test ss_flags incorrectly
1677 * old code may have been written using ss_flags==0
1678 * to mean ss_flags==SS_ONSTACK (as this was the only
1679 * way that worked) - this fix preserves that older
1680 * mechanism
1682 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1683 goto out;
1685 if (ss_flags == SS_DISABLE) {
1686 ss_size = 0;
1687 ss_sp = NULL;
1688 } else {
1689 error = -ENOMEM;
1690 if (ss_size < MINSIGSTKSZ)
1691 goto out;
1694 current->sas_ss_sp = (unsigned long) ss_sp;
1695 current->sas_ss_size = ss_size;
1698 if (uoss) {
1699 error = -EFAULT;
1700 if (copy_to_user(uoss, &oss, sizeof(oss)))
1701 goto out;
1704 error = 0;
1705 out:
1706 return error;
1709 asmlinkage long
1710 sys_sigpending(old_sigset_t *set)
1712 return do_sigpending(set, sizeof(*set));
1715 #if !defined(__alpha__)
1716 /* Alpha has its own versions with special arguments. */
1718 asmlinkage long
1719 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1721 int error;
1722 old_sigset_t old_set, new_set;
1724 if (set) {
1725 error = -EFAULT;
1726 if (copy_from_user(&new_set, set, sizeof(*set)))
1727 goto out;
1728 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1730 spin_lock_irq(&current->sig->siglock);
1731 old_set = current->blocked.sig[0];
1733 error = 0;
1734 switch (how) {
1735 default:
1736 error = -EINVAL;
1737 break;
1738 case SIG_BLOCK:
1739 sigaddsetmask(&current->blocked, new_set);
1740 break;
1741 case SIG_UNBLOCK:
1742 sigdelsetmask(&current->blocked, new_set);
1743 break;
1744 case SIG_SETMASK:
1745 current->blocked.sig[0] = new_set;
1746 break;
1749 recalc_sigpending();
1750 spin_unlock_irq(&current->sig->siglock);
1751 if (error)
1752 goto out;
1753 if (oset)
1754 goto set_old;
1755 } else if (oset) {
1756 old_set = current->blocked.sig[0];
1757 set_old:
1758 error = -EFAULT;
1759 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1760 goto out;
1762 error = 0;
1763 out:
1764 return error;
1767 #ifndef __sparc__
1768 asmlinkage long
1769 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1770 size_t sigsetsize)
1772 struct k_sigaction new_sa, old_sa;
1773 int ret = -EINVAL;
1775 /* XXX: Don't preclude handling different sized sigset_t's. */
1776 if (sigsetsize != sizeof(sigset_t))
1777 goto out;
1779 if (act) {
1780 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1781 return -EFAULT;
1784 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1786 if (!ret && oact) {
1787 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1788 return -EFAULT;
1790 out:
1791 return ret;
1793 #endif /* __sparc__ */
1794 #endif
1796 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__arm__)
1798 * For backwards compatibility. Functionality superseded by sigprocmask.
1800 asmlinkage long
1801 sys_sgetmask(void)
1803 /* SMP safe */
1804 return current->blocked.sig[0];
1807 asmlinkage long
1808 sys_ssetmask(int newmask)
1810 int old;
1812 spin_lock_irq(&current->sig->siglock);
1813 old = current->blocked.sig[0];
1815 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1816 sigmask(SIGSTOP)));
1817 recalc_sigpending();
1818 spin_unlock_irq(&current->sig->siglock);
1820 return old;
1822 #endif /* !defined(__alpha__) */
1824 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
1825 !defined(__arm__)
1827 * For backwards compatibility. Functionality superseded by sigaction.
1829 asmlinkage unsigned long
1830 sys_signal(int sig, __sighandler_t handler)
1832 struct k_sigaction new_sa, old_sa;
1833 int ret;
1835 new_sa.sa.sa_handler = handler;
1836 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1838 ret = do_sigaction(sig, &new_sa, &old_sa);
1840 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1842 #endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
1844 #ifndef HAVE_ARCH_SYS_PAUSE
1846 asmlinkage int
1847 sys_pause(void)
1849 current->state = TASK_INTERRUPTIBLE;
1850 schedule();
1851 return -ERESTARTNOHAND;
1854 #endif /* HAVE_ARCH_SYS_PAUSE */
1856 void __init signals_init(void)
1858 sigqueue_cachep =
1859 kmem_cache_create("sigqueue",
1860 sizeof(struct sigqueue),
1861 __alignof__(struct sigqueue),
1862 0, NULL, NULL);
1863 if (!sigqueue_cachep)
1864 panic("signals_init(): cannot create sigqueue SLAB cache");