>> Btw, I've been looking at why Andrea thinks he's patches are needed,
[davej-history.git] / kernel / signal.c
blobe519bcb22c21a5e6a6b718ea80ca803bb30aa204
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
29 #define DEBUG_SIG 0
31 #if DEBUG_SIG
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
33 #else
34 #define SIG_SLAB_DEBUG 0
35 #endif
37 static kmem_cache_t *signal_queue_cachep;
39 static int nr_queued_signals;
40 static int max_queued_signals = 1024;
42 void __init signals_init(void)
44 signal_queue_cachep =
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue),
47 __alignof__(struct signal_queue),
48 SIG_SLAB_DEBUG, NULL, NULL);
53 * Flush all pending signals for a task.
56 void
57 flush_signals(struct task_struct *t)
59 struct signal_queue *q, *n;
61 t->sigpending = 0;
62 sigemptyset(&t->signal);
63 q = t->sigqueue;
64 t->sigqueue = NULL;
65 t->sigqueue_tail = &t->sigqueue;
67 while (q) {
68 n = q->next;
69 kmem_cache_free(signal_queue_cachep, q);
70 nr_queued_signals--;
71 q = n;
76 * Flush all handlers for a task.
79 void
80 flush_signal_handlers(struct task_struct *t)
82 int i;
83 struct k_sigaction *ka = &t->sig->action[0];
84 for (i = _NSIG ; i != 0 ; i--) {
85 if (ka->sa.sa_handler != SIG_IGN)
86 ka->sa.sa_handler = SIG_DFL;
87 ka->sa.sa_flags = 0;
88 sigemptyset(&ka->sa.sa_mask);
89 ka++;
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t *mask, siginfo_t *info)
103 unsigned long i, *s, *m, x;
104 int sig = 0;
106 #if DEBUG_SIG
107 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
108 signal_pending(current));
109 #endif
111 /* Find the first desired signal that is pending. */
112 s = current->signal.sig;
113 m = mask->sig;
114 switch (_NSIG_WORDS) {
115 default:
116 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
117 if ((x = *s &~ *m) != 0) {
118 sig = ffz(~x) + i*_NSIG_BPW + 1;
119 break;
121 break;
123 case 2: if ((x = s[0] &~ m[0]) != 0)
124 sig = 1;
125 else if ((x = s[1] &~ m[1]) != 0)
126 sig = _NSIG_BPW + 1;
127 else
128 break;
129 sig += ffz(~x);
130 break;
132 case 1: if ((x = *s &~ *m) != 0)
133 sig = ffz(~x) + 1;
134 break;
137 if (sig) {
138 int reset = 1;
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig < SIGRTMIN) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info->si_signo = sig;
149 info->si_errno = 0;
150 info->si_code = 0;
151 info->si_pid = 0;
152 info->si_uid = 0;
153 } else {
154 struct signal_queue *q, **pp;
155 pp = &current->sigqueue;
156 q = current->sigqueue;
158 /* Find the one we're interested in ... */
159 for ( ; q ; pp = &q->next, q = q->next)
160 if (q->info.si_signo == sig)
161 break;
162 if (q) {
163 if ((*pp = q->next) == NULL)
164 current->sigqueue_tail = pp;
165 *info = q->info;
166 kmem_cache_free(signal_queue_cachep,q);
167 nr_queued_signals--;
169 /* then see if this signal is still pending. */
170 q = *pp;
171 while (q) {
172 if (q->info.si_signo == sig) {
173 reset = 0;
174 break;
176 q = q->next;
178 } else {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
182 info. */
183 info->si_signo = sig;
184 info->si_errno = 0;
185 info->si_code = 0;
186 info->si_pid = 0;
187 info->si_uid = 0;
191 if (reset)
192 sigdelset(&current->signal, sig);
193 recalc_sigpending(current);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
197 } else {
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask == &current->blocked && signal_pending(current)) {
204 printk(KERN_CRIT "SIG: sigpending lied\n");
205 current->sigpending = 0;
209 #if DEBUG_SIG
210 printk(" %d -> %d\n", signal_pending(current), sig);
211 #endif
213 return sig;
217 * Determine whether a signal should be posted or not.
219 * Signals with SIG_IGN can be ignored, except for the
220 * special case of a SIGCHLD.
222 * Some signals with SIG_DFL default to a non-action.
224 static int ignored_signal(int sig, struct task_struct *t)
226 struct signal_struct *signals;
227 struct k_sigaction *ka;
229 /* Don't ignore traced or blocked signals */
230 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
231 return 0;
233 signals = t->sig;
234 if (!signals)
235 return 1;
237 ka = &signals->action[sig-1];
238 switch ((unsigned long) ka->sa.sa_handler) {
239 case (unsigned long) SIG_DFL:
240 if (sig == SIGCONT ||
241 sig == SIGWINCH ||
242 sig == SIGCHLD ||
243 sig == SIGURG)
244 break;
245 return 0;
247 case (unsigned long) SIG_IGN:
248 if (sig != SIGCHLD)
249 break;
250 /* fallthrough */
251 default:
252 return 0;
254 return 1;
258 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
260 unsigned long flags;
261 int ret;
263 #if DEBUG_SIG
264 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
265 #endif
267 ret = -EINVAL;
268 if (sig < 0 || sig > _NSIG)
269 goto out_nolock;
270 /* The somewhat baroque permissions check... */
271 ret = -EPERM;
272 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
273 && ((sig != SIGCONT) || (current->session != t->session))
274 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
275 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
276 && !capable(CAP_SYS_ADMIN))
277 goto out_nolock;
279 /* The null signal is a permissions and process existance probe.
280 No signal is actually delivered. */
281 ret = 0;
282 if (!sig)
283 goto out_nolock;
285 spin_lock_irqsave(&t->sigmask_lock, flags);
286 switch (sig) {
287 case SIGKILL: case SIGCONT:
288 /* Wake up the process if stopped. */
289 if (t->state == TASK_STOPPED)
290 wake_up_process(t);
291 t->exit_code = 0;
292 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
293 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t);
296 break;
298 case SIGSTOP: case SIGTSTP:
299 case SIGTTIN: case SIGTTOU:
300 /* If we're stopping again, cancel SIGCONT */
301 sigdelset(&t->signal, SIGCONT);
302 /* Inflict this corner case with recalculations, not mainline */
303 recalc_sigpending(t);
304 break;
307 /* Optimize away the signal, if it's a signal that can be
308 handled immediately (ie non-blocked and untraced) and
309 that is ignored (either explicitly or by default). */
311 if (ignored_signal(sig, t))
312 goto out;
314 if (sig < SIGRTMIN) {
315 /* Non-real-time signals are not queued. */
316 /* XXX: As an extension, support queueing exactly one
317 non-rt signal if SA_SIGINFO is set, so that we can
318 get more detailed information about the cause of
319 the signal. */
320 if (sigismember(&t->signal, sig))
321 goto out;
322 } else {
323 /* Real-time signals must be queued if sent by sigqueue, or
324 some other real-time mechanism. It is implementation
325 defined whether kill() does so. We attempt to do so, on
326 the principle of least surprise, but since kill is not
327 allowed to fail with EAGAIN when low on memory we just
328 make sure at least one signal gets delivered and don't
329 pass on the info struct. */
331 struct signal_queue *q = 0;
333 if (nr_queued_signals < max_queued_signals) {
334 q = (struct signal_queue *)
335 kmem_cache_alloc(signal_queue_cachep, GFP_KERNEL);
336 nr_queued_signals++;
339 if (q) {
340 q->next = NULL;
341 *t->sigqueue_tail = q;
342 t->sigqueue_tail = &q->next;
343 switch ((unsigned long) info) {
344 case 0:
345 q->info.si_signo = sig;
346 q->info.si_errno = 0;
347 q->info.si_code = SI_USER;
348 q->info.si_pid = current->pid;
349 q->info.si_uid = current->uid;
350 break;
351 case 1:
352 q->info.si_signo = sig;
353 q->info.si_errno = 0;
354 q->info.si_code = SI_KERNEL;
355 q->info.si_pid = 0;
356 q->info.si_uid = 0;
357 break;
358 default:
359 q->info = *info;
360 break;
362 } else {
363 /* If this was sent by a rt mechanism, try again. */
364 if (info->si_code < 0) {
365 ret = -EAGAIN;
366 goto out;
368 /* Otherwise, mention that the signal is pending,
369 but don't queue the info. */
373 sigaddset(&t->signal, sig);
374 if (!sigismember(&t->blocked, sig))
375 t->sigpending = 1;
377 out:
378 spin_unlock_irqrestore(&t->sigmask_lock, flags);
379 if (t->state == TASK_INTERRUPTIBLE && signal_pending(t))
380 wake_up_process(t);
382 out_nolock:
383 #if DEBUG_SIG
384 printk(" %d -> %d\n", signal_pending(t), ret);
385 #endif
387 return ret;
391 * Force a signal that the process can't ignore: if necessary
392 * we unblock the signal and change any SIG_IGN to SIG_DFL.
396 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
398 unsigned long int flags;
400 spin_lock_irqsave(&t->sigmask_lock, flags);
401 if (t->sig == NULL) {
402 spin_unlock_irqrestore(&t->sigmask_lock, flags);
403 return -ESRCH;
406 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
407 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
408 sigdelset(&t->blocked, sig);
409 spin_unlock_irqrestore(&t->sigmask_lock, flags);
411 return send_sig_info(sig, info, t);
415 * kill_pg() sends a signal to a process group: this is what the tty
416 * control characters do (^C, ^Z etc)
420 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
422 int retval = -EINVAL;
423 if (pgrp > 0) {
424 struct task_struct *p;
425 int found = 0;
427 retval = -ESRCH;
428 read_lock(&tasklist_lock);
429 for_each_task(p) {
430 if (p->pgrp == pgrp) {
431 int err = send_sig_info(sig, info, p);
432 if (err != 0)
433 retval = err;
434 else
435 found++;
438 read_unlock(&tasklist_lock);
439 if (found)
440 retval = 0;
442 return retval;
446 * kill_sl() sends a signal to the session leader: this is used
447 * to send SIGHUP to the controlling process of a terminal when
448 * the connection is lost.
452 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
454 int retval = -EINVAL;
455 if (sess > 0) {
456 struct task_struct *p;
457 int found = 0;
459 retval = -ESRCH;
460 read_lock(&tasklist_lock);
461 for_each_task(p) {
462 if (p->leader && p->session == sess) {
463 int err = send_sig_info(sig, info, p);
464 if (err)
465 retval = err;
466 else
467 found++;
470 read_unlock(&tasklist_lock);
471 if (found)
472 retval = 0;
474 return retval;
477 inline int
478 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
480 int error;
481 struct task_struct *p;
483 read_lock(&tasklist_lock);
484 p = find_task_by_pid(pid);
485 error = -ESRCH;
486 if (p)
487 error = send_sig_info(sig, info, p);
488 read_unlock(&tasklist_lock);
489 return error;
493 * kill_something() interprets pid in interesting ways just like kill(2).
495 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
496 * is probably wrong. Should make it like BSD or SYSV.
500 kill_something_info(int sig, struct siginfo *info, int pid)
502 if (!pid) {
503 return kill_pg_info(sig, info, current->pgrp);
504 } else if (pid == -1) {
505 int retval = 0, count = 0;
506 struct task_struct * p;
508 read_lock(&tasklist_lock);
509 for_each_task(p) {
510 if (p->pid > 1 && p != current) {
511 int err = send_sig_info(sig, info, p);
512 ++count;
513 if (err != -EPERM)
514 retval = err;
517 read_unlock(&tasklist_lock);
518 return count ? retval : -ESRCH;
519 } else if (pid < 0) {
520 return kill_pg_info(sig, info, -pid);
521 } else {
522 return kill_proc_info(sig, info, pid);
527 * These are for backward compatibility with the rest of the kernel source.
531 send_sig(int sig, struct task_struct *p, int priv)
533 return send_sig_info(sig, (void*)(long)(priv != 0), p);
536 void
537 force_sig(int sig, struct task_struct *p)
539 force_sig_info(sig, (void*)1L, p);
543 kill_pg(pid_t pgrp, int sig, int priv)
545 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
549 kill_sl(pid_t sess, int sig, int priv)
551 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
555 kill_proc(pid_t pid, int sig, int priv)
557 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
561 * Let a parent know about a status change of a child.
564 void
565 notify_parent(struct task_struct *tsk, int sig)
567 struct siginfo info;
568 int why;
570 info.si_signo = sig;
571 info.si_errno = 0;
572 info.si_pid = tsk->pid;
574 /* FIXME: find out whether or not this is supposed to be c*time. */
575 info.si_utime = tsk->times.tms_utime;
576 info.si_stime = tsk->times.tms_stime;
578 why = SI_KERNEL; /* shouldn't happen */
579 switch (tsk->state) {
580 case TASK_ZOMBIE:
581 if (tsk->exit_code & 0x80)
582 why = CLD_DUMPED;
583 else if (tsk->exit_code & 0x7f)
584 why = CLD_KILLED;
585 else
586 why = CLD_EXITED;
587 break;
588 case TASK_STOPPED:
589 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
590 why = CLD_STOPPED;
591 break;
593 default:
594 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
595 tsk->state);
596 break;
598 info.si_code = why;
600 send_sig_info(sig, &info, tsk->p_pptr);
601 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
604 EXPORT_SYMBOL(dequeue_signal);
605 EXPORT_SYMBOL(flush_signals);
606 EXPORT_SYMBOL(force_sig);
607 EXPORT_SYMBOL(force_sig_info);
608 EXPORT_SYMBOL(kill_pg);
609 EXPORT_SYMBOL(kill_pg_info);
610 EXPORT_SYMBOL(kill_proc);
611 EXPORT_SYMBOL(kill_proc_info);
612 EXPORT_SYMBOL(kill_sl);
613 EXPORT_SYMBOL(kill_sl_info);
614 EXPORT_SYMBOL(notify_parent);
615 EXPORT_SYMBOL(recalc_sigpending);
616 EXPORT_SYMBOL(send_sig);
617 EXPORT_SYMBOL(send_sig_info);
621 * System call entry points.
625 * We don't need to get the kernel lock - this is all local to this
626 * particular thread.. (and that's good, because this is _heavily_
627 * used by various programs)
630 asmlinkage int
631 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
633 int error = -EINVAL;
634 sigset_t old_set, new_set;
636 /* XXX: Don't preclude handling different sized sigset_t's. */
637 if (sigsetsize != sizeof(sigset_t))
638 goto out;
640 if (set) {
641 error = -EFAULT;
642 if (copy_from_user(&new_set, set, sizeof(*set)))
643 goto out;
644 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
646 spin_lock_irq(&current->sigmask_lock);
647 old_set = current->blocked;
649 error = 0;
650 switch (how) {
651 default:
652 error = -EINVAL;
653 break;
654 case SIG_BLOCK:
655 sigorsets(&new_set, &old_set, &new_set);
656 break;
657 case SIG_UNBLOCK:
658 signandsets(&new_set, &old_set, &new_set);
659 break;
660 case SIG_SETMASK:
661 break;
664 current->blocked = new_set;
665 recalc_sigpending(current);
666 spin_unlock_irq(&current->sigmask_lock);
667 if (error)
668 goto out;
669 if (oset)
670 goto set_old;
671 } else if (oset) {
672 spin_lock_irq(&current->sigmask_lock);
673 old_set = current->blocked;
674 spin_unlock_irq(&current->sigmask_lock);
676 set_old:
677 error = -EFAULT;
678 if (copy_to_user(oset, &old_set, sizeof(*oset)))
679 goto out;
681 error = 0;
682 out:
683 return error;
686 asmlinkage int
687 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
689 int error = -EINVAL;
690 sigset_t pending;
692 /* XXX: Don't preclude handling different sized sigset_t's. */
693 if (sigsetsize != sizeof(sigset_t))
694 goto out;
696 spin_lock_irq(&current->sigmask_lock);
697 sigandsets(&pending, &current->blocked, &current->signal);
698 spin_unlock_irq(&current->sigmask_lock);
700 error = -EFAULT;
701 if (!copy_to_user(set, &pending, sizeof(*set)))
702 error = 0;
703 out:
704 return error;
707 asmlinkage int
708 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
709 const struct timespec *uts, size_t sigsetsize)
711 int ret, sig;
712 sigset_t these;
713 struct timespec ts;
714 siginfo_t info;
715 long timeout = 0;
717 /* XXX: Don't preclude handling different sized sigset_t's. */
718 if (sigsetsize != sizeof(sigset_t))
719 return -EINVAL;
721 if (copy_from_user(&these, uthese, sizeof(these)))
722 return -EFAULT;
723 else {
724 /* Invert the set of allowed signals to get those we
725 want to block. */
726 signotset(&these);
729 if (uts) {
730 if (copy_from_user(&ts, uts, sizeof(ts)))
731 return -EFAULT;
732 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
733 || ts.tv_sec < 0)
734 return -EINVAL;
737 spin_lock_irq(&current->sigmask_lock);
738 sig = dequeue_signal(&these, &info);
739 if (!sig) {
740 /* None ready -- temporarily unblock those we're interested
741 in so that we'll be awakened when they arrive. */
742 sigset_t oldblocked = current->blocked;
743 sigandsets(&current->blocked, &current->blocked, &these);
744 recalc_sigpending(current);
745 spin_unlock_irq(&current->sigmask_lock);
747 timeout = MAX_SCHEDULE_TIMEOUT;
748 if (uts)
749 timeout = (timespec_to_jiffies(&ts)
750 + (ts.tv_sec || ts.tv_nsec));
752 current->state = TASK_INTERRUPTIBLE;
753 timeout = schedule_timeout(timeout);
755 spin_lock_irq(&current->sigmask_lock);
756 sig = dequeue_signal(&these, &info);
757 current->blocked = oldblocked;
758 recalc_sigpending(current);
760 spin_unlock_irq(&current->sigmask_lock);
762 if (sig) {
763 ret = sig;
764 if (uinfo) {
765 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
766 ret = -EFAULT;
768 } else {
769 ret = -EAGAIN;
770 if (timeout)
771 ret = -EINTR;
774 return ret;
777 asmlinkage int
778 sys_kill(int pid, int sig)
780 struct siginfo info;
782 info.si_signo = sig;
783 info.si_errno = 0;
784 info.si_code = SI_USER;
785 info.si_pid = current->pid;
786 info.si_uid = current->uid;
788 return kill_something_info(sig, &info, pid);
791 asmlinkage int
792 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
794 siginfo_t info;
796 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
797 return -EFAULT;
799 /* Not even root can pretend to send signals from the kernel.
800 Nor can they impersonate a kill(), which adds source info. */
801 if (info.si_code >= 0)
802 return -EPERM;
803 info.si_signo = sig;
805 /* POSIX.1b doesn't mention process groups. */
806 return kill_proc_info(sig, &info, pid);
810 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
812 struct k_sigaction *k;
814 if (sig < 1 || sig > _NSIG ||
815 (act && (sig == SIGKILL || sig == SIGSTOP)))
816 return -EINVAL;
818 spin_lock_irq(&current->sigmask_lock);
819 k = &current->sig->action[sig-1];
821 if (oact) *oact = *k;
823 if (act) {
824 *k = *act;
825 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
828 * POSIX 3.3.1.3:
829 * "Setting a signal action to SIG_IGN for a signal that is
830 * pending shall cause the pending signal to be discarded,
831 * whether or not it is blocked."
833 * "Setting a signal action to SIG_DFL for a signal that is
834 * pending and whose default action is to ignore the signal
835 * (for example, SIGCHLD), shall cause the pending signal to
836 * be discarded, whether or not it is blocked"
838 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
839 * signal isn't actually ignored, but does automatic child
840 * reaping, while SIG_DFL is explicitly said by POSIX to force
841 * the signal to be ignored.
844 if (k->sa.sa_handler == SIG_IGN
845 || (k->sa.sa_handler == SIG_DFL
846 && (sig == SIGCONT ||
847 sig == SIGCHLD ||
848 sig == SIGWINCH))) {
849 /* So dequeue any that might be pending.
850 XXX: process-wide signals? */
851 if (sig >= SIGRTMIN &&
852 sigismember(&current->signal, sig)) {
853 struct signal_queue *q, **pp;
854 pp = &current->sigqueue;
855 q = current->sigqueue;
856 while (q) {
857 if (q->info.si_signo != sig)
858 pp = &q->next;
859 else {
860 *pp = q->next;
861 kmem_cache_free(signal_queue_cachep, q);
862 nr_queued_signals--;
864 q = *pp;
868 sigdelset(&current->signal, sig);
869 recalc_sigpending(current);
873 spin_unlock_irq(&current->sigmask_lock);
875 return 0;
878 int
879 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
881 stack_t oss;
882 int error;
884 if (uoss) {
885 oss.ss_sp = (void *) current->sas_ss_sp;
886 oss.ss_size = current->sas_ss_size;
887 oss.ss_flags = sas_ss_flags(sp);
890 if (uss) {
891 void *ss_sp;
892 size_t ss_size;
893 int ss_flags;
895 error = -EFAULT;
896 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
897 || __get_user(ss_sp, &uss->ss_sp)
898 || __get_user(ss_flags, &uss->ss_flags)
899 || __get_user(ss_size, &uss->ss_size))
900 goto out;
902 error = -EPERM;
903 if (on_sig_stack (sp))
904 goto out;
906 error = -EINVAL;
907 if (ss_flags & ~SS_DISABLE)
908 goto out;
910 if (ss_flags & SS_DISABLE) {
911 ss_size = 0;
912 ss_sp = NULL;
913 } else {
914 error = -ENOMEM;
915 if (ss_size < MINSIGSTKSZ)
916 goto out;
919 current->sas_ss_sp = (unsigned long) ss_sp;
920 current->sas_ss_size = ss_size;
923 if (uoss) {
924 error = -EFAULT;
925 if (copy_to_user(uoss, &oss, sizeof(oss)))
926 goto out;
929 error = 0;
930 out:
931 return error;
934 #if !defined(__alpha__)
935 /* Alpha has its own versions with special arguments. */
937 asmlinkage int
938 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
940 int error;
941 old_sigset_t old_set, new_set;
943 if (set) {
944 error = -EFAULT;
945 if (copy_from_user(&new_set, set, sizeof(*set)))
946 goto out;
947 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
949 spin_lock_irq(&current->sigmask_lock);
950 old_set = current->blocked.sig[0];
952 error = 0;
953 switch (how) {
954 default:
955 error = -EINVAL;
956 break;
957 case SIG_BLOCK:
958 sigaddsetmask(&current->blocked, new_set);
959 break;
960 case SIG_UNBLOCK:
961 sigdelsetmask(&current->blocked, new_set);
962 break;
963 case SIG_SETMASK:
964 current->blocked.sig[0] = new_set;
965 break;
968 recalc_sigpending(current);
969 spin_unlock_irq(&current->sigmask_lock);
970 if (error)
971 goto out;
972 if (oset)
973 goto set_old;
974 } else if (oset) {
975 old_set = current->blocked.sig[0];
976 set_old:
977 error = -EFAULT;
978 if (copy_to_user(oset, &old_set, sizeof(*oset)))
979 goto out;
981 error = 0;
982 out:
983 return error;
986 asmlinkage int
987 sys_sigpending(old_sigset_t *set)
989 int error;
990 old_sigset_t pending;
992 spin_lock_irq(&current->sigmask_lock);
993 pending = current->blocked.sig[0] & current->signal.sig[0];
994 spin_unlock_irq(&current->sigmask_lock);
996 error = -EFAULT;
997 if (!copy_to_user(set, &pending, sizeof(*set)))
998 error = 0;
999 return error;
1002 #ifndef __sparc__
1003 asmlinkage int
1004 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1005 size_t sigsetsize)
1007 struct k_sigaction new_sa, old_sa;
1008 int ret = -EINVAL;
1010 /* XXX: Don't preclude handling different sized sigset_t's. */
1011 if (sigsetsize != sizeof(sigset_t))
1012 goto out;
1014 if (act) {
1015 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1016 return -EFAULT;
1019 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1021 if (!ret && oact) {
1022 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1023 return -EFAULT;
1025 out:
1026 return ret;
1028 #endif /* __sparc__ */
1029 #endif
1031 #if !defined(__alpha__)
1033 * For backwards compatibility. Functionality superseded by sigprocmask.
1035 asmlinkage int
1036 sys_sgetmask(void)
1038 /* SMP safe */
1039 return current->blocked.sig[0];
1042 asmlinkage int
1043 sys_ssetmask(int newmask)
1045 int old;
1047 spin_lock_irq(&current->sigmask_lock);
1048 old = current->blocked.sig[0];
1050 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1051 sigmask(SIGSTOP)));
1052 recalc_sigpending(current);
1053 spin_unlock_irq(&current->sigmask_lock);
1055 return old;
1059 * For backwards compatibility. Functionality superseded by sigaction.
1061 asmlinkage unsigned long
1062 sys_signal(int sig, __sighandler_t handler)
1064 struct k_sigaction new_sa, old_sa;
1065 int ret;
1067 new_sa.sa.sa_handler = handler;
1068 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1070 ret = do_sigaction(sig, &new_sa, &old_sa);
1072 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1074 #endif /* !alpha */