Import 2.1.122pre3
[davej-history.git] / kernel / signal.c
blob130e6371a285f005ac9256f6a6c085ab5902ee15
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
29 #define DEBUG_SIG 0
31 #if DEBUG_SIG
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
33 #else
34 #define SIG_SLAB_DEBUG 0
35 #endif
37 static kmem_cache_t *signal_queue_cachep;
39 static int nr_queued_signals;
40 static int max_queued_signals = 1024;
42 void __init signals_init(void)
44 signal_queue_cachep =
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue),
47 __alignof__(struct signal_queue),
48 SIG_SLAB_DEBUG, NULL, NULL);
53 * Flush all pending signals for a task.
56 void
57 flush_signals(struct task_struct *t)
59 struct signal_queue *q, *n;
61 t->sigpending = 0;
62 sigemptyset(&t->signal);
63 q = t->sigqueue;
64 t->sigqueue = NULL;
65 t->sigqueue_tail = &t->sigqueue;
67 while (q) {
68 n = q->next;
69 kmem_cache_free(signal_queue_cachep, q);
70 nr_queued_signals--;
71 q = n;
76 * Flush all handlers for a task.
79 void
80 flush_signal_handlers(struct task_struct *t)
82 int i;
83 struct k_sigaction *ka = &t->sig->action[0];
84 for (i = _NSIG ; i != 0 ; i--) {
85 if (ka->sa.sa_handler != SIG_IGN)
86 ka->sa.sa_handler = SIG_DFL;
87 ka->sa.sa_flags = 0;
88 sigemptyset(&ka->sa.sa_mask);
89 ka++;
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t *mask, siginfo_t *info)
103 unsigned long i, *s, *m, x;
104 int sig = 0;
106 #if DEBUG_SIG
107 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
108 signal_pending(current));
109 #endif
111 /* Find the first desired signal that is pending. */
112 s = current->signal.sig;
113 m = mask->sig;
114 switch (_NSIG_WORDS) {
115 default:
116 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
117 if ((x = *s &~ *m) != 0) {
118 sig = ffz(~x) + i*_NSIG_BPW + 1;
119 break;
121 break;
123 case 2: if ((x = s[0] &~ m[0]) != 0)
124 sig = 1;
125 else if ((x = s[1] &~ m[1]) != 0)
126 sig = _NSIG_BPW + 1;
127 else
128 break;
129 sig += ffz(~x);
130 break;
132 case 1: if ((x = *s &~ *m) != 0)
133 sig = ffz(~x) + 1;
134 break;
137 if (sig) {
138 int reset = 1;
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig < SIGRTMIN) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info->si_signo = sig;
149 info->si_errno = 0;
150 info->si_code = 0;
151 info->si_pid = 0;
152 info->si_uid = 0;
153 } else {
154 struct signal_queue *q, **pp;
155 pp = &current->sigqueue;
156 q = current->sigqueue;
158 /* Find the one we're interested in ... */
159 for ( ; q ; pp = &q->next, q = q->next)
160 if (q->info.si_signo == sig)
161 break;
162 if (q) {
163 if ((*pp = q->next) == NULL)
164 current->sigqueue_tail = pp;
165 *info = q->info;
166 kmem_cache_free(signal_queue_cachep,q);
167 nr_queued_signals--;
169 /* then see if this signal is still pending. */
170 q = *pp;
171 while (q) {
172 if (q->info.si_signo == sig) {
173 reset = 0;
174 break;
176 q = q->next;
178 } else {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
182 info. */
183 info->si_signo = sig;
184 info->si_errno = 0;
185 info->si_code = 0;
186 info->si_pid = 0;
187 info->si_uid = 0;
191 if (reset)
192 sigdelset(&current->signal, sig);
193 recalc_sigpending(current);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
197 } else {
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask == &current->blocked && signal_pending(current)) {
204 printk(KERN_CRIT "SIG: sigpending lied\n");
205 current->sigpending = 0;
209 #if DEBUG_SIG
210 printk(" %d -> %d\n", signal_pending(current), sig);
211 #endif
213 return sig;
217 * Determine whether a signal should be posted or not.
219 * Signals with SIG_IGN can be ignored, except for the
220 * special case of a SIGCHLD.
222 * Some signals with SIG_DFL default to a non-action.
224 static int ignored_signal(int sig, struct task_struct *t)
226 struct signal_struct *signals;
227 struct k_sigaction *ka;
229 /* Don't ignore traced or blocked signals */
230 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
231 return 0;
233 signals = t->sig;
234 if (!signals)
235 return 1;
237 ka = &signals->action[sig-1];
238 switch ((unsigned long) ka->sa.sa_handler) {
239 case (unsigned long) SIG_DFL:
240 if (sig == SIGCONT ||
241 sig == SIGWINCH ||
242 sig == SIGCHLD ||
243 sig == SIGURG)
244 break;
245 return 0;
247 case (unsigned long) SIG_IGN:
248 if (sig != SIGCHLD)
249 break;
250 /* fallthrough */
251 default:
252 return 0;
254 return 1;
258 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
260 unsigned long flags;
261 int ret;
263 #if DEBUG_SIG
264 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
265 #endif
267 ret = -EINVAL;
268 if (sig < 0 || sig > _NSIG)
269 goto out_nolock;
270 /* The somewhat baroque permissions check... */
271 ret = -EPERM;
272 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
273 && ((sig != SIGCONT) || (current->session != t->session))
274 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
275 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
276 && !capable(CAP_SYS_ADMIN))
277 goto out_nolock;
279 /* The null signal is a permissions and process existance probe.
280 No signal is actually delivered. */
281 ret = 0;
282 if (!sig)
283 goto out_nolock;
285 spin_lock_irqsave(&t->sigmask_lock, flags);
286 switch (sig) {
287 case SIGKILL: case SIGCONT:
288 /* Wake up the process if stopped. */
289 if (t->state == TASK_STOPPED)
290 wake_up_process(t);
291 t->exit_code = 0;
292 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
293 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t);
296 break;
298 case SIGSTOP: case SIGTSTP:
299 case SIGTTIN: case SIGTTOU:
300 /* If we're stopping again, cancel SIGCONT */
301 sigdelset(&t->signal, SIGCONT);
302 /* Inflict this corner case with recalculations, not mainline */
303 recalc_sigpending(t);
304 break;
307 /* Optimize away the signal, if it's a signal that can be
308 handled immediately (ie non-blocked and untraced) and
309 that is ignored (either explicitly or by default). */
311 if (ignored_signal(sig, t))
312 goto out;
314 if (sig < SIGRTMIN) {
315 /* Non-real-time signals are not queued. */
316 /* XXX: As an extension, support queueing exactly one
317 non-rt signal if SA_SIGINFO is set, so that we can
318 get more detailed information about the cause of
319 the signal. */
320 if (sigismember(&t->signal, sig))
321 goto out;
322 } else {
323 /* Real-time signals must be queued if sent by sigqueue, or
324 some other real-time mechanism. It is implementation
325 defined whether kill() does so. We attempt to do so, on
326 the principle of least surprise, but since kill is not
327 allowed to fail with EAGAIN when low on memory we just
328 make sure at least one signal gets delivered and don't
329 pass on the info struct. */
331 struct signal_queue *q = 0;
333 if (nr_queued_signals < max_queued_signals) {
334 q = (struct signal_queue *)
335 kmem_cache_alloc(signal_queue_cachep, GFP_KERNEL);
336 nr_queued_signals++;
339 if (q) {
340 q->next = NULL;
341 *t->sigqueue_tail = q;
342 t->sigqueue_tail = &q->next;
343 switch ((unsigned long) info) {
344 case 0:
345 q->info.si_signo = sig;
346 q->info.si_errno = 0;
347 q->info.si_code = SI_USER;
348 q->info.si_pid = current->pid;
349 q->info.si_uid = current->uid;
350 break;
351 case 1:
352 q->info.si_signo = sig;
353 q->info.si_errno = 0;
354 q->info.si_code = SI_KERNEL;
355 q->info.si_pid = 0;
356 q->info.si_uid = 0;
357 break;
358 default:
359 q->info = *info;
360 break;
362 } else {
363 /* If this was sent by a rt mechanism, try again. */
364 if (info->si_code < 0) {
365 ret = -EAGAIN;
366 goto out;
368 /* Otherwise, mention that the signal is pending,
369 but don't queue the info. */
373 sigaddset(&t->signal, sig);
374 if (!sigismember(&t->blocked, sig))
375 t->sigpending = 1;
377 out:
378 spin_unlock_irqrestore(&t->sigmask_lock, flags);
379 if (t->state == TASK_INTERRUPTIBLE && signal_pending(t))
380 wake_up_process(t);
382 out_nolock:
383 #if DEBUG_SIG
384 printk(" %d -> %d\n", signal_pending(t), ret);
385 #endif
387 return ret;
391 * Force a signal that the process can't ignore: if necessary
392 * we unblock the signal and change any SIG_IGN to SIG_DFL.
396 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
398 unsigned long int flags;
400 spin_lock_irqsave(&t->sigmask_lock, flags);
401 if (t->sig == NULL) {
402 spin_unlock_irqrestore(&t->sigmask_lock, flags);
403 return -ESRCH;
406 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
407 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
408 sigdelset(&t->blocked, sig);
409 spin_unlock_irqrestore(&t->sigmask_lock, flags);
411 return send_sig_info(sig, info, t);
415 * kill_pg() sends a signal to a process group: this is what the tty
416 * control characters do (^C, ^Z etc)
420 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
422 int retval = -EINVAL;
423 if (pgrp > 0) {
424 struct task_struct *p;
425 int found = 0;
427 retval = -ESRCH;
428 read_lock(&tasklist_lock);
429 for_each_task(p) {
430 if (p->pgrp == pgrp) {
431 int err = send_sig_info(sig, info, p);
432 if (err != 0)
433 retval = err;
434 else
435 found++;
438 read_unlock(&tasklist_lock);
439 if (found)
440 retval = 0;
442 return retval;
446 * kill_sl() sends a signal to the session leader: this is used
447 * to send SIGHUP to the controlling process of a terminal when
448 * the connection is lost.
452 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
454 int retval = -EINVAL;
455 if (sess > 0) {
456 struct task_struct *p;
457 int found = 0;
459 retval = -ESRCH;
460 read_lock(&tasklist_lock);
461 for_each_task(p) {
462 if (p->leader && p->session == sess) {
463 int err = send_sig_info(sig, info, p);
464 if (err)
465 retval = err;
466 else
467 found++;
470 read_unlock(&tasklist_lock);
471 if (found)
472 retval = 0;
474 return retval;
477 inline int
478 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
480 int error;
481 struct task_struct *p;
483 read_lock(&tasklist_lock);
484 p = find_task_by_pid(pid);
485 error = -ESRCH;
486 if (p)
487 error = send_sig_info(sig, info, p);
488 read_unlock(&tasklist_lock);
489 return error;
493 * kill_something() interprets pid in interesting ways just like kill(2).
495 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
496 * is probably wrong. Should make it like BSD or SYSV.
500 kill_something_info(int sig, struct siginfo *info, int pid)
502 if (!pid) {
503 return kill_pg_info(sig, info, current->pgrp);
504 } else if (pid == -1) {
505 int retval = 0, count = 0;
506 struct task_struct * p;
508 read_lock(&tasklist_lock);
509 for_each_task(p) {
510 if (p->pid > 1 && p != current) {
511 int err = send_sig_info(sig, info, p);
512 ++count;
513 if (err != -EPERM)
514 retval = err;
517 read_unlock(&tasklist_lock);
518 return count ? retval : -ESRCH;
519 } else if (pid < 0) {
520 return kill_pg_info(sig, info, -pid);
521 } else {
522 return kill_proc_info(sig, info, pid);
527 * These are for backward compatibility with the rest of the kernel source.
531 send_sig(int sig, struct task_struct *p, int priv)
533 return send_sig_info(sig, (void*)(long)(priv != 0), p);
536 void
537 force_sig(int sig, struct task_struct *p)
539 force_sig_info(sig, (void*)1L, p);
543 kill_pg(pid_t pgrp, int sig, int priv)
545 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
549 kill_sl(pid_t sess, int sig, int priv)
551 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
555 kill_proc(pid_t pid, int sig, int priv)
557 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
561 * Let a parent know about a status change of a child.
564 void
565 notify_parent(struct task_struct *tsk, int sig)
567 struct siginfo info;
568 int why;
570 info.si_signo = sig;
571 info.si_errno = 0;
572 info.si_pid = tsk->pid;
574 /* FIXME: find out whether or not this is supposed to be c*time. */
575 info.si_utime = tsk->times.tms_utime;
576 info.si_stime = tsk->times.tms_stime;
578 why = SI_KERNEL; /* shouldn't happen */
579 switch (tsk->state) {
580 case TASK_ZOMBIE:
581 if (tsk->exit_code & 0x80)
582 why = CLD_DUMPED;
583 else if (tsk->exit_code & 0x7f)
584 why = CLD_KILLED;
585 else
586 why = CLD_EXITED;
587 break;
588 case TASK_STOPPED:
589 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
590 why = CLD_STOPPED;
591 break;
593 default:
594 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
595 tsk->state);
596 break;
598 info.si_code = why;
600 send_sig_info(sig, &info, tsk->p_pptr);
601 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
604 EXPORT_SYMBOL(dequeue_signal);
605 EXPORT_SYMBOL(flush_signals);
606 EXPORT_SYMBOL(force_sig);
607 EXPORT_SYMBOL(force_sig_info);
608 EXPORT_SYMBOL(kill_pg);
609 EXPORT_SYMBOL(kill_pg_info);
610 EXPORT_SYMBOL(kill_proc);
611 EXPORT_SYMBOL(kill_proc_info);
612 EXPORT_SYMBOL(kill_sl);
613 EXPORT_SYMBOL(kill_sl_info);
614 EXPORT_SYMBOL(notify_parent);
615 EXPORT_SYMBOL(recalc_sigpending);
616 EXPORT_SYMBOL(send_sig);
617 EXPORT_SYMBOL(send_sig_info);
621 * System call entry points.
625 * We don't need to get the kernel lock - this is all local to this
626 * particular thread.. (and that's good, because this is _heavily_
627 * used by various programs)
630 asmlinkage int
631 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
633 int error = -EINVAL;
634 sigset_t old_set, new_set;
636 /* XXX: Don't preclude handling different sized sigset_t's. */
637 if (sigsetsize != sizeof(sigset_t))
638 goto out;
640 if (set) {
641 error = -EFAULT;
642 if (copy_from_user(&new_set, set, sizeof(*set)))
643 goto out;
644 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
646 spin_lock_irq(&current->sigmask_lock);
647 old_set = current->blocked;
649 error = 0;
650 switch (how) {
651 default:
652 error = -EINVAL;
653 break;
654 case SIG_BLOCK:
655 sigorsets(&new_set, &old_set, &new_set);
656 break;
657 case SIG_UNBLOCK:
658 signandsets(&new_set, &old_set, &new_set);
659 break;
660 case SIG_SETMASK:
661 break;
664 current->blocked = new_set;
665 recalc_sigpending(current);
666 spin_unlock_irq(&current->sigmask_lock);
667 if (error)
668 goto out;
669 if (oset)
670 goto set_old;
671 } else if (oset) {
672 spin_lock_irq(&current->sigmask_lock);
673 old_set = current->blocked;
674 spin_unlock_irq(&current->sigmask_lock);
676 set_old:
677 error = -EFAULT;
678 if (copy_to_user(oset, &old_set, sizeof(*oset)))
679 goto out;
681 error = 0;
682 out:
683 return error;
686 asmlinkage int
687 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
689 int error = -EINVAL;
690 sigset_t pending;
692 /* XXX: Don't preclude handling different sized sigset_t's. */
693 if (sigsetsize != sizeof(sigset_t))
694 goto out;
696 spin_lock_irq(&current->sigmask_lock);
697 sigandsets(&pending, &current->blocked, &current->signal);
698 spin_unlock_irq(&current->sigmask_lock);
700 error = -EFAULT;
701 if (!copy_to_user(set, &pending, sizeof(*set)))
702 error = 0;
703 out:
704 return error;
707 asmlinkage int
708 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
709 const struct timespec *uts, size_t sigsetsize)
711 int ret, sig;
712 sigset_t these;
713 struct timespec ts;
714 siginfo_t info;
716 /* XXX: Don't preclude handling different sized sigset_t's. */
717 if (sigsetsize != sizeof(sigset_t))
718 return -EINVAL;
720 if (copy_from_user(&these, uthese, sizeof(these)))
721 return -EFAULT;
722 else {
723 /* Invert the set of allowed signals to get those we
724 want to block. */
725 signotset(&these);
728 if (uts) {
729 if (copy_from_user(&ts, uts, sizeof(ts)))
730 return -EFAULT;
731 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
732 || ts.tv_sec < 0)
733 return -EINVAL;
736 spin_lock_irq(&current->sigmask_lock);
737 sig = dequeue_signal(&these, &info);
738 if (!sig) {
739 /* None ready -- temporarily unblock those we're interested
740 in so that we'll be awakened when they arrive. */
741 unsigned long expire;
742 sigset_t oldblocked = current->blocked;
743 sigandsets(&current->blocked, &current->blocked, &these);
744 recalc_sigpending(current);
745 spin_unlock_irq(&current->sigmask_lock);
747 expire = ~0UL;
748 if (uts) {
749 expire = (timespec_to_jiffies(&ts)
750 + (ts.tv_sec || ts.tv_nsec));
751 expire += jiffies;
753 current->timeout = expire;
755 current->state = TASK_INTERRUPTIBLE;
756 schedule();
758 spin_lock_irq(&current->sigmask_lock);
759 sig = dequeue_signal(&these, &info);
760 current->blocked = oldblocked;
761 recalc_sigpending(current);
763 spin_unlock_irq(&current->sigmask_lock);
765 if (sig) {
766 ret = sig;
767 if (uinfo) {
768 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
769 ret = -EFAULT;
771 } else {
772 ret = -EAGAIN;
773 if (current->timeout != 0) {
774 current->timeout = 0;
775 ret = -EINTR;
779 return ret;
782 asmlinkage int
783 sys_kill(int pid, int sig)
785 struct siginfo info;
787 info.si_signo = sig;
788 info.si_errno = 0;
789 info.si_code = SI_USER;
790 info.si_pid = current->pid;
791 info.si_uid = current->uid;
793 return kill_something_info(sig, &info, pid);
796 asmlinkage int
797 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
799 siginfo_t info;
801 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
802 return -EFAULT;
804 /* Not even root can pretend to send signals from the kernel.
805 Nor can they impersonate a kill(), which adds source info. */
806 if (info.si_code >= 0)
807 return -EPERM;
808 info.si_signo = sig;
810 /* POSIX.1b doesn't mention process groups. */
811 return kill_proc_info(sig, &info, pid);
815 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
817 struct k_sigaction *k;
819 if (sig < 1 || sig > _NSIG ||
820 (act && (sig == SIGKILL || sig == SIGSTOP)))
821 return -EINVAL;
823 spin_lock_irq(&current->sigmask_lock);
824 k = &current->sig->action[sig-1];
826 if (oact) *oact = *k;
828 if (act) {
829 *k = *act;
830 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
833 * POSIX 3.3.1.3:
834 * "Setting a signal action to SIG_IGN for a signal that is
835 * pending shall cause the pending signal to be discarded,
836 * whether or not it is blocked."
838 * "Setting a signal action to SIG_DFL for a signal that is
839 * pending and whose default action is to ignore the signal
840 * (for example, SIGCHLD), shall cause the pending signal to
841 * be discarded, whether or not it is blocked"
843 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
844 * signal isn't actually ignored, but does automatic child
845 * reaping, while SIG_DFL is explicitly said by POSIX to force
846 * the signal to be ignored.
849 if (k->sa.sa_handler == SIG_IGN
850 || (k->sa.sa_handler == SIG_DFL
851 && (sig == SIGCONT ||
852 sig == SIGCHLD ||
853 sig == SIGWINCH))) {
854 /* So dequeue any that might be pending.
855 XXX: process-wide signals? */
856 if (sig >= SIGRTMIN &&
857 sigismember(&current->signal, sig)) {
858 struct signal_queue *q, **pp;
859 pp = &current->sigqueue;
860 q = current->sigqueue;
861 while (q) {
862 if (q->info.si_signo != sig)
863 pp = &q->next;
864 else {
865 *pp = q->next;
866 kmem_cache_free(signal_queue_cachep, q);
867 nr_queued_signals--;
869 q = *pp;
873 sigdelset(&current->signal, sig);
874 recalc_sigpending(current);
878 spin_unlock_irq(&current->sigmask_lock);
880 return 0;
883 int
884 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
886 stack_t oss;
887 int error;
889 if (uoss) {
890 oss.ss_sp = (void *) current->sas_ss_sp;
891 oss.ss_size = current->sas_ss_size;
892 oss.ss_flags = sas_ss_flags(sp);
895 if (uss) {
896 void *ss_sp;
897 size_t ss_size;
898 int ss_flags;
900 error = -EFAULT;
901 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
902 || __get_user(ss_sp, &uss->ss_sp)
903 || __get_user(ss_flags, &uss->ss_flags)
904 || __get_user(ss_size, &uss->ss_size))
905 goto out;
907 error = -EPERM;
908 if (on_sig_stack (sp))
909 goto out;
911 error = -EINVAL;
912 if (ss_flags & ~SS_DISABLE)
913 goto out;
915 if (ss_flags & SS_DISABLE) {
916 ss_size = 0;
917 ss_sp = NULL;
918 } else {
919 error = -ENOMEM;
920 if (ss_size < MINSIGSTKSZ)
921 goto out;
924 current->sas_ss_sp = (unsigned long) ss_sp;
925 current->sas_ss_size = ss_size;
928 if (uoss) {
929 error = -EFAULT;
930 if (copy_to_user(uoss, &oss, sizeof(oss)))
931 goto out;
934 error = 0;
935 out:
936 return error;
939 #if !defined(__alpha__)
940 /* Alpha has its own versions with special arguments. */
942 asmlinkage int
943 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
945 int error;
946 old_sigset_t old_set, new_set;
948 if (set) {
949 error = -EFAULT;
950 if (copy_from_user(&new_set, set, sizeof(*set)))
951 goto out;
952 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
954 spin_lock_irq(&current->sigmask_lock);
955 old_set = current->blocked.sig[0];
957 error = 0;
958 switch (how) {
959 default:
960 error = -EINVAL;
961 break;
962 case SIG_BLOCK:
963 sigaddsetmask(&current->blocked, new_set);
964 break;
965 case SIG_UNBLOCK:
966 sigdelsetmask(&current->blocked, new_set);
967 break;
968 case SIG_SETMASK:
969 current->blocked.sig[0] = new_set;
970 break;
973 recalc_sigpending(current);
974 spin_unlock_irq(&current->sigmask_lock);
975 if (error)
976 goto out;
977 if (oset)
978 goto set_old;
979 } else if (oset) {
980 old_set = current->blocked.sig[0];
981 set_old:
982 error = -EFAULT;
983 if (copy_to_user(oset, &old_set, sizeof(*oset)))
984 goto out;
986 error = 0;
987 out:
988 return error;
991 asmlinkage int
992 sys_sigpending(old_sigset_t *set)
994 int error;
995 old_sigset_t pending;
997 spin_lock_irq(&current->sigmask_lock);
998 pending = current->blocked.sig[0] & current->signal.sig[0];
999 spin_unlock_irq(&current->sigmask_lock);
1001 error = -EFAULT;
1002 if (!copy_to_user(set, &pending, sizeof(*set)))
1003 error = 0;
1004 return error;
1007 #ifndef __sparc__
1008 asmlinkage int
1009 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1010 size_t sigsetsize)
1012 struct k_sigaction new_sa, old_sa;
1013 int ret = -EINVAL;
1015 /* XXX: Don't preclude handling different sized sigset_t's. */
1016 if (sigsetsize != sizeof(sigset_t))
1017 goto out;
1019 if (act) {
1020 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1021 return -EFAULT;
1024 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1026 if (!ret && oact) {
1027 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1028 return -EFAULT;
1030 out:
1031 return ret;
1033 #endif /* __sparc__ */
1034 #endif
1036 #if !defined(__alpha__)
1038 * For backwards compatibility. Functionality superseded by sigprocmask.
1040 asmlinkage int
1041 sys_sgetmask(void)
1043 /* SMP safe */
1044 return current->blocked.sig[0];
1047 asmlinkage int
1048 sys_ssetmask(int newmask)
1050 int old;
1052 spin_lock_irq(&current->sigmask_lock);
1053 old = current->blocked.sig[0];
1055 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1056 sigmask(SIGSTOP)));
1057 recalc_sigpending(current);
1058 spin_unlock_irq(&current->sigmask_lock);
1060 return old;
1064 * For backwards compatibility. Functionality superseded by sigaction.
1066 asmlinkage unsigned long
1067 sys_signal(int sig, __sighandler_t handler)
1069 struct k_sigaction new_sa, old_sa;
1070 int ret;
1072 new_sa.sa.sa_handler = handler;
1073 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1075 ret = do_sigaction(sig, &new_sa, &old_sa);
1077 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1079 #endif /* !alpha */