Import 2.3.1pre2
[davej-history.git] / kernel / signal.c
blobc30da74022c9bdbc5b2e3ce2b0b5bf4a165b2d3b
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
15 #include <asm/uaccess.h>
18 * SLAB caches for signal bits.
21 #define DEBUG_SIG 0
23 #if DEBUG_SIG
24 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
25 #else
26 #define SIG_SLAB_DEBUG 0
27 #endif
29 static kmem_cache_t *signal_queue_cachep;
31 int nr_queued_signals;
32 int max_queued_signals = 1024;
34 void __init signals_init(void)
36 signal_queue_cachep =
37 kmem_cache_create("signal_queue",
38 sizeof(struct signal_queue),
39 __alignof__(struct signal_queue),
40 SIG_SLAB_DEBUG, NULL, NULL);
45 * Flush all pending signals for a task.
48 void
49 flush_signals(struct task_struct *t)
51 struct signal_queue *q, *n;
53 t->sigpending = 0;
54 sigemptyset(&t->signal);
55 q = t->sigqueue;
56 t->sigqueue = NULL;
57 t->sigqueue_tail = &t->sigqueue;
59 while (q) {
60 n = q->next;
61 kmem_cache_free(signal_queue_cachep, q);
62 nr_queued_signals--;
63 q = n;
68 * Flush all handlers for a task.
71 void
72 flush_signal_handlers(struct task_struct *t)
74 int i;
75 struct k_sigaction *ka = &t->sig->action[0];
76 for (i = _NSIG ; i != 0 ; i--) {
77 if (ka->sa.sa_handler != SIG_IGN)
78 ka->sa.sa_handler = SIG_DFL;
79 ka->sa.sa_flags = 0;
80 sigemptyset(&ka->sa.sa_mask);
81 ka++;
86 * Dequeue a signal and return the element to the caller, which is
87 * expected to free it.
89 * All callers of must be holding current->sigmask_lock.
92 int
93 dequeue_signal(sigset_t *mask, siginfo_t *info)
95 unsigned long i, *s, *m, x;
96 int sig = 0;
98 #if DEBUG_SIG
99 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
100 signal_pending(current));
101 #endif
103 /* Find the first desired signal that is pending. */
104 s = current->signal.sig;
105 m = mask->sig;
106 switch (_NSIG_WORDS) {
107 default:
108 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
109 if ((x = *s &~ *m) != 0) {
110 sig = ffz(~x) + i*_NSIG_BPW + 1;
111 break;
113 break;
115 case 2: if ((x = s[0] &~ m[0]) != 0)
116 sig = 1;
117 else if ((x = s[1] &~ m[1]) != 0)
118 sig = _NSIG_BPW + 1;
119 else
120 break;
121 sig += ffz(~x);
122 break;
124 case 1: if ((x = *s &~ *m) != 0)
125 sig = ffz(~x) + 1;
126 break;
129 if (sig) {
130 int reset = 1;
132 /* Collect the siginfo appropriate to this signal. */
133 if (sig < SIGRTMIN) {
134 /* XXX: As an extension, support queueing exactly
135 one non-rt signal if SA_SIGINFO is set, so that
136 we can get more detailed information about the
137 cause of the signal. */
138 /* Deciding not to init these couple of fields is
139 more expensive that just initializing them. */
140 info->si_signo = sig;
141 info->si_errno = 0;
142 info->si_code = 0;
143 info->si_pid = 0;
144 info->si_uid = 0;
145 } else {
146 struct signal_queue *q, **pp;
147 pp = &current->sigqueue;
148 q = current->sigqueue;
150 /* Find the one we're interested in ... */
151 for ( ; q ; pp = &q->next, q = q->next)
152 if (q->info.si_signo == sig)
153 break;
154 if (q) {
155 if ((*pp = q->next) == NULL)
156 current->sigqueue_tail = pp;
157 *info = q->info;
158 kmem_cache_free(signal_queue_cachep,q);
159 nr_queued_signals--;
161 /* then see if this signal is still pending. */
162 q = *pp;
163 while (q) {
164 if (q->info.si_signo == sig) {
165 reset = 0;
166 break;
168 q = q->next;
170 } else {
171 /* Ok, it wasn't in the queue. It must have
172 been sent either by a non-rt mechanism and
173 we ran out of queue space. So zero out the
174 info. */
175 info->si_signo = sig;
176 info->si_errno = 0;
177 info->si_code = 0;
178 info->si_pid = 0;
179 info->si_uid = 0;
183 if (reset)
184 sigdelset(&current->signal, sig);
185 recalc_sigpending(current);
187 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
188 we need to xchg out the timer overrun values. */
189 } else {
190 /* XXX: Once CLONE_PID is in to join those "threads" that are
191 part of the same "process", look for signals sent to the
192 "process" as well. */
194 /* Sanity check... */
195 if (mask == &current->blocked && signal_pending(current)) {
196 printk(KERN_CRIT "SIG: sigpending lied\n");
197 current->sigpending = 0;
201 #if DEBUG_SIG
202 printk(" %d -> %d\n", signal_pending(current), sig);
203 #endif
205 return sig;
209 * Determine whether a signal should be posted or not.
211 * Signals with SIG_IGN can be ignored, except for the
212 * special case of a SIGCHLD.
214 * Some signals with SIG_DFL default to a non-action.
216 static int ignored_signal(int sig, struct task_struct *t)
218 struct signal_struct *signals;
219 struct k_sigaction *ka;
221 /* Don't ignore traced or blocked signals */
222 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
223 return 0;
225 signals = t->sig;
226 if (!signals)
227 return 1;
229 ka = &signals->action[sig-1];
230 switch ((unsigned long) ka->sa.sa_handler) {
231 case (unsigned long) SIG_DFL:
232 if (sig == SIGCONT ||
233 sig == SIGWINCH ||
234 sig == SIGCHLD ||
235 sig == SIGURG)
236 break;
237 return 0;
239 case (unsigned long) SIG_IGN:
240 if (sig != SIGCHLD)
241 break;
242 /* fallthrough */
243 default:
244 return 0;
246 return 1;
250 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
252 unsigned long flags;
253 int ret;
255 #if DEBUG_SIG
256 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
257 #endif
259 ret = -EINVAL;
260 if (sig < 0 || sig > _NSIG)
261 goto out_nolock;
262 /* The somewhat baroque permissions check... */
263 ret = -EPERM;
264 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
265 && ((sig != SIGCONT) || (current->session != t->session))
266 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
267 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
268 && !capable(CAP_KILL))
269 goto out_nolock;
271 /* The null signal is a permissions and process existance probe.
272 No signal is actually delivered. Same goes for zombies. */
273 ret = 0;
274 if (!sig || !t->sig)
275 goto out_nolock;
277 spin_lock_irqsave(&t->sigmask_lock, flags);
278 switch (sig) {
279 case SIGKILL: case SIGCONT:
280 /* Wake up the process if stopped. */
281 if (t->state == TASK_STOPPED)
282 wake_up_process(t);
283 t->exit_code = 0;
284 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
285 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
286 /* Inflict this corner case with recalculations, not mainline */
287 recalc_sigpending(t);
288 break;
290 case SIGSTOP: case SIGTSTP:
291 case SIGTTIN: case SIGTTOU:
292 /* If we're stopping again, cancel SIGCONT */
293 sigdelset(&t->signal, SIGCONT);
294 /* Inflict this corner case with recalculations, not mainline */
295 recalc_sigpending(t);
296 break;
299 /* Optimize away the signal, if it's a signal that can be
300 handled immediately (ie non-blocked and untraced) and
301 that is ignored (either explicitly or by default). */
303 if (ignored_signal(sig, t))
304 goto out;
306 if (sig < SIGRTMIN) {
307 /* Non-real-time signals are not queued. */
308 /* XXX: As an extension, support queueing exactly one
309 non-rt signal if SA_SIGINFO is set, so that we can
310 get more detailed information about the cause of
311 the signal. */
312 if (sigismember(&t->signal, sig))
313 goto out;
314 } else {
315 /* Real-time signals must be queued if sent by sigqueue, or
316 some other real-time mechanism. It is implementation
317 defined whether kill() does so. We attempt to do so, on
318 the principle of least surprise, but since kill is not
319 allowed to fail with EAGAIN when low on memory we just
320 make sure at least one signal gets delivered and don't
321 pass on the info struct. */
323 struct signal_queue *q = 0;
325 if (nr_queued_signals < max_queued_signals) {
326 q = (struct signal_queue *)
327 kmem_cache_alloc(signal_queue_cachep, GFP_KERNEL);
330 if (q) {
331 nr_queued_signals++;
332 q->next = NULL;
333 *t->sigqueue_tail = q;
334 t->sigqueue_tail = &q->next;
335 switch ((unsigned long) info) {
336 case 0:
337 q->info.si_signo = sig;
338 q->info.si_errno = 0;
339 q->info.si_code = SI_USER;
340 q->info.si_pid = current->pid;
341 q->info.si_uid = current->uid;
342 break;
343 case 1:
344 q->info.si_signo = sig;
345 q->info.si_errno = 0;
346 q->info.si_code = SI_KERNEL;
347 q->info.si_pid = 0;
348 q->info.si_uid = 0;
349 break;
350 default:
351 q->info = *info;
352 break;
354 } else {
355 /* If this was sent by a rt mechanism, try again. */
356 if (info->si_code < 0) {
357 ret = -EAGAIN;
358 goto out;
360 /* Otherwise, mention that the signal is pending,
361 but don't queue the info. */
365 sigaddset(&t->signal, sig);
366 if (!sigismember(&t->blocked, sig)) {
367 t->sigpending = 1;
368 #ifdef __SMP__
370 * If the task is running on a different CPU
371 * force a reschedule on the other CPU - note that
372 * the code below is a tad loose and might occasionally
373 * kick the wrong CPU if we catch the process in the
374 * process of changing - but no harm is done by that
375 * other than doing an extra (lightweight) IPI interrupt.
377 * note that we rely on the previous spin_lock to
378 * lock interrupts for us! No need to set need_resched
379 * since signal event passing goes through ->blocked.
381 spin_lock(&runqueue_lock);
382 if (t->has_cpu && t->processor != smp_processor_id())
383 smp_send_reschedule(t->processor);
384 spin_unlock(&runqueue_lock);
385 #endif /* __SMP__ */
388 out:
389 spin_unlock_irqrestore(&t->sigmask_lock, flags);
390 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
391 wake_up_process(t);
393 out_nolock:
394 #if DEBUG_SIG
395 printk(" %d -> %d\n", signal_pending(t), ret);
396 #endif
398 return ret;
402 * Force a signal that the process can't ignore: if necessary
403 * we unblock the signal and change any SIG_IGN to SIG_DFL.
407 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
409 unsigned long int flags;
411 spin_lock_irqsave(&t->sigmask_lock, flags);
412 if (t->sig == NULL) {
413 spin_unlock_irqrestore(&t->sigmask_lock, flags);
414 return -ESRCH;
417 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
418 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
419 sigdelset(&t->blocked, sig);
420 spin_unlock_irqrestore(&t->sigmask_lock, flags);
422 return send_sig_info(sig, info, t);
426 * kill_pg() sends a signal to a process group: this is what the tty
427 * control characters do (^C, ^Z etc)
431 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
433 int retval = -EINVAL;
434 if (pgrp > 0) {
435 struct task_struct *p;
436 int found = 0;
438 retval = -ESRCH;
439 read_lock(&tasklist_lock);
440 for_each_task(p) {
441 if (p->pgrp == pgrp) {
442 int err = send_sig_info(sig, info, p);
443 if (err != 0)
444 retval = err;
445 else
446 found++;
449 read_unlock(&tasklist_lock);
450 if (found)
451 retval = 0;
453 return retval;
457 * kill_sl() sends a signal to the session leader: this is used
458 * to send SIGHUP to the controlling process of a terminal when
459 * the connection is lost.
463 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
465 int retval = -EINVAL;
466 if (sess > 0) {
467 struct task_struct *p;
468 int found = 0;
470 retval = -ESRCH;
471 read_lock(&tasklist_lock);
472 for_each_task(p) {
473 if (p->leader && p->session == sess) {
474 int err = send_sig_info(sig, info, p);
475 if (err)
476 retval = err;
477 else
478 found++;
481 read_unlock(&tasklist_lock);
482 if (found)
483 retval = 0;
485 return retval;
488 inline int
489 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
491 int error;
492 struct task_struct *p;
494 read_lock(&tasklist_lock);
495 p = find_task_by_pid(pid);
496 error = -ESRCH;
497 if (p)
498 error = send_sig_info(sig, info, p);
499 read_unlock(&tasklist_lock);
500 return error;
504 * kill_something() interprets pid in interesting ways just like kill(2).
506 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
507 * is probably wrong. Should make it like BSD or SYSV.
511 kill_something_info(int sig, struct siginfo *info, int pid)
513 if (!pid) {
514 return kill_pg_info(sig, info, current->pgrp);
515 } else if (pid == -1) {
516 int retval = 0, count = 0;
517 struct task_struct * p;
519 read_lock(&tasklist_lock);
520 for_each_task(p) {
521 if (p->pid > 1 && p != current) {
522 int err = send_sig_info(sig, info, p);
523 ++count;
524 if (err != -EPERM)
525 retval = err;
528 read_unlock(&tasklist_lock);
529 return count ? retval : -ESRCH;
530 } else if (pid < 0) {
531 return kill_pg_info(sig, info, -pid);
532 } else {
533 return kill_proc_info(sig, info, pid);
538 * These are for backward compatibility with the rest of the kernel source.
542 send_sig(int sig, struct task_struct *p, int priv)
544 return send_sig_info(sig, (void*)(long)(priv != 0), p);
547 void
548 force_sig(int sig, struct task_struct *p)
550 force_sig_info(sig, (void*)1L, p);
554 kill_pg(pid_t pgrp, int sig, int priv)
556 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
560 kill_sl(pid_t sess, int sig, int priv)
562 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
566 kill_proc(pid_t pid, int sig, int priv)
568 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
572 * Let a parent know about a status change of a child.
575 void
576 notify_parent(struct task_struct *tsk, int sig)
578 struct siginfo info;
579 int why;
581 info.si_signo = sig;
582 info.si_errno = 0;
583 info.si_pid = tsk->pid;
585 /* FIXME: find out whether or not this is supposed to be c*time. */
586 info.si_utime = tsk->times.tms_utime;
587 info.si_stime = tsk->times.tms_stime;
589 why = SI_KERNEL; /* shouldn't happen */
590 switch (tsk->state) {
591 case TASK_ZOMBIE:
592 if (tsk->exit_code & 0x80)
593 why = CLD_DUMPED;
594 else if (tsk->exit_code & 0x7f)
595 why = CLD_KILLED;
596 else
597 why = CLD_EXITED;
598 break;
599 case TASK_STOPPED:
600 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
601 why = CLD_STOPPED;
602 break;
604 default:
605 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
606 tsk->state);
607 break;
609 info.si_code = why;
611 send_sig_info(sig, &info, tsk->p_pptr);
612 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
615 EXPORT_SYMBOL(dequeue_signal);
616 EXPORT_SYMBOL(flush_signals);
617 EXPORT_SYMBOL(force_sig);
618 EXPORT_SYMBOL(force_sig_info);
619 EXPORT_SYMBOL(kill_pg);
620 EXPORT_SYMBOL(kill_pg_info);
621 EXPORT_SYMBOL(kill_proc);
622 EXPORT_SYMBOL(kill_proc_info);
623 EXPORT_SYMBOL(kill_sl);
624 EXPORT_SYMBOL(kill_sl_info);
625 EXPORT_SYMBOL(notify_parent);
626 EXPORT_SYMBOL(recalc_sigpending);
627 EXPORT_SYMBOL(send_sig);
628 EXPORT_SYMBOL(send_sig_info);
632 * System call entry points.
636 * We don't need to get the kernel lock - this is all local to this
637 * particular thread.. (and that's good, because this is _heavily_
638 * used by various programs)
641 asmlinkage int
642 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
644 int error = -EINVAL;
645 sigset_t old_set, new_set;
647 /* XXX: Don't preclude handling different sized sigset_t's. */
648 if (sigsetsize != sizeof(sigset_t))
649 goto out;
651 if (set) {
652 error = -EFAULT;
653 if (copy_from_user(&new_set, set, sizeof(*set)))
654 goto out;
655 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
657 spin_lock_irq(&current->sigmask_lock);
658 old_set = current->blocked;
660 error = 0;
661 switch (how) {
662 default:
663 error = -EINVAL;
664 break;
665 case SIG_BLOCK:
666 sigorsets(&new_set, &old_set, &new_set);
667 break;
668 case SIG_UNBLOCK:
669 signandsets(&new_set, &old_set, &new_set);
670 break;
671 case SIG_SETMASK:
672 break;
675 current->blocked = new_set;
676 recalc_sigpending(current);
677 spin_unlock_irq(&current->sigmask_lock);
678 if (error)
679 goto out;
680 if (oset)
681 goto set_old;
682 } else if (oset) {
683 spin_lock_irq(&current->sigmask_lock);
684 old_set = current->blocked;
685 spin_unlock_irq(&current->sigmask_lock);
687 set_old:
688 error = -EFAULT;
689 if (copy_to_user(oset, &old_set, sizeof(*oset)))
690 goto out;
692 error = 0;
693 out:
694 return error;
697 asmlinkage int
698 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
700 int error = -EINVAL;
701 sigset_t pending;
703 /* XXX: Don't preclude handling different sized sigset_t's. */
704 if (sigsetsize != sizeof(sigset_t))
705 goto out;
707 spin_lock_irq(&current->sigmask_lock);
708 sigandsets(&pending, &current->blocked, &current->signal);
709 spin_unlock_irq(&current->sigmask_lock);
711 error = -EFAULT;
712 if (!copy_to_user(set, &pending, sizeof(*set)))
713 error = 0;
714 out:
715 return error;
718 asmlinkage int
719 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
720 const struct timespec *uts, size_t sigsetsize)
722 int ret, sig;
723 sigset_t these;
724 struct timespec ts;
725 siginfo_t info;
726 long timeout = 0;
728 /* XXX: Don't preclude handling different sized sigset_t's. */
729 if (sigsetsize != sizeof(sigset_t))
730 return -EINVAL;
732 if (copy_from_user(&these, uthese, sizeof(these)))
733 return -EFAULT;
734 else {
735 /* Invert the set of allowed signals to get those we
736 want to block. */
737 signotset(&these);
740 if (uts) {
741 if (copy_from_user(&ts, uts, sizeof(ts)))
742 return -EFAULT;
743 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
744 || ts.tv_sec < 0)
745 return -EINVAL;
748 spin_lock_irq(&current->sigmask_lock);
749 sig = dequeue_signal(&these, &info);
750 if (!sig) {
751 /* None ready -- temporarily unblock those we're interested
752 in so that we'll be awakened when they arrive. */
753 sigset_t oldblocked = current->blocked;
754 sigandsets(&current->blocked, &current->blocked, &these);
755 recalc_sigpending(current);
756 spin_unlock_irq(&current->sigmask_lock);
758 timeout = MAX_SCHEDULE_TIMEOUT;
759 if (uts)
760 timeout = (timespec_to_jiffies(&ts)
761 + (ts.tv_sec || ts.tv_nsec));
763 current->state = TASK_INTERRUPTIBLE;
764 timeout = schedule_timeout(timeout);
766 spin_lock_irq(&current->sigmask_lock);
767 sig = dequeue_signal(&these, &info);
768 current->blocked = oldblocked;
769 recalc_sigpending(current);
771 spin_unlock_irq(&current->sigmask_lock);
773 if (sig) {
774 ret = sig;
775 if (uinfo) {
776 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
777 ret = -EFAULT;
779 } else {
780 ret = -EAGAIN;
781 if (timeout)
782 ret = -EINTR;
785 return ret;
788 asmlinkage int
789 sys_kill(int pid, int sig)
791 struct siginfo info;
793 info.si_signo = sig;
794 info.si_errno = 0;
795 info.si_code = SI_USER;
796 info.si_pid = current->pid;
797 info.si_uid = current->uid;
799 return kill_something_info(sig, &info, pid);
802 asmlinkage int
803 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
805 siginfo_t info;
807 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
808 return -EFAULT;
810 /* Not even root can pretend to send signals from the kernel.
811 Nor can they impersonate a kill(), which adds source info. */
812 if (info.si_code >= 0)
813 return -EPERM;
814 info.si_signo = sig;
816 /* POSIX.1b doesn't mention process groups. */
817 return kill_proc_info(sig, &info, pid);
821 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
823 struct k_sigaction *k;
825 if (sig < 1 || sig > _NSIG ||
826 (act && (sig == SIGKILL || sig == SIGSTOP)))
827 return -EINVAL;
829 spin_lock_irq(&current->sigmask_lock);
830 k = &current->sig->action[sig-1];
832 if (oact) *oact = *k;
834 if (act) {
835 *k = *act;
836 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
839 * POSIX 3.3.1.3:
840 * "Setting a signal action to SIG_IGN for a signal that is
841 * pending shall cause the pending signal to be discarded,
842 * whether or not it is blocked."
844 * "Setting a signal action to SIG_DFL for a signal that is
845 * pending and whose default action is to ignore the signal
846 * (for example, SIGCHLD), shall cause the pending signal to
847 * be discarded, whether or not it is blocked"
849 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
850 * signal isn't actually ignored, but does automatic child
851 * reaping, while SIG_DFL is explicitly said by POSIX to force
852 * the signal to be ignored.
855 if (k->sa.sa_handler == SIG_IGN
856 || (k->sa.sa_handler == SIG_DFL
857 && (sig == SIGCONT ||
858 sig == SIGCHLD ||
859 sig == SIGWINCH))) {
860 /* So dequeue any that might be pending.
861 XXX: process-wide signals? */
862 if (sig >= SIGRTMIN &&
863 sigismember(&current->signal, sig)) {
864 struct signal_queue *q, **pp;
865 pp = &current->sigqueue;
866 q = current->sigqueue;
867 while (q) {
868 if (q->info.si_signo != sig)
869 pp = &q->next;
870 else {
871 *pp = q->next;
872 kmem_cache_free(signal_queue_cachep, q);
873 nr_queued_signals--;
875 q = *pp;
879 sigdelset(&current->signal, sig);
880 recalc_sigpending(current);
884 spin_unlock_irq(&current->sigmask_lock);
886 return 0;
889 int
890 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
892 stack_t oss;
893 int error;
895 if (uoss) {
896 oss.ss_sp = (void *) current->sas_ss_sp;
897 oss.ss_size = current->sas_ss_size;
898 oss.ss_flags = sas_ss_flags(sp);
901 if (uss) {
902 void *ss_sp;
903 size_t ss_size;
904 int ss_flags;
906 error = -EFAULT;
907 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
908 || __get_user(ss_sp, &uss->ss_sp)
909 || __get_user(ss_flags, &uss->ss_flags)
910 || __get_user(ss_size, &uss->ss_size))
911 goto out;
913 error = -EPERM;
914 if (on_sig_stack (sp))
915 goto out;
917 error = -EINVAL;
918 if (ss_flags & ~SS_DISABLE)
919 goto out;
921 if (ss_flags & SS_DISABLE) {
922 ss_size = 0;
923 ss_sp = NULL;
924 } else {
925 error = -ENOMEM;
926 if (ss_size < MINSIGSTKSZ)
927 goto out;
930 current->sas_ss_sp = (unsigned long) ss_sp;
931 current->sas_ss_size = ss_size;
934 if (uoss) {
935 error = -EFAULT;
936 if (copy_to_user(uoss, &oss, sizeof(oss)))
937 goto out;
940 error = 0;
941 out:
942 return error;
945 #if !defined(__alpha__)
946 /* Alpha has its own versions with special arguments. */
948 asmlinkage int
949 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
951 int error;
952 old_sigset_t old_set, new_set;
954 if (set) {
955 error = -EFAULT;
956 if (copy_from_user(&new_set, set, sizeof(*set)))
957 goto out;
958 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
960 spin_lock_irq(&current->sigmask_lock);
961 old_set = current->blocked.sig[0];
963 error = 0;
964 switch (how) {
965 default:
966 error = -EINVAL;
967 break;
968 case SIG_BLOCK:
969 sigaddsetmask(&current->blocked, new_set);
970 break;
971 case SIG_UNBLOCK:
972 sigdelsetmask(&current->blocked, new_set);
973 break;
974 case SIG_SETMASK:
975 current->blocked.sig[0] = new_set;
976 break;
979 recalc_sigpending(current);
980 spin_unlock_irq(&current->sigmask_lock);
981 if (error)
982 goto out;
983 if (oset)
984 goto set_old;
985 } else if (oset) {
986 old_set = current->blocked.sig[0];
987 set_old:
988 error = -EFAULT;
989 if (copy_to_user(oset, &old_set, sizeof(*oset)))
990 goto out;
992 error = 0;
993 out:
994 return error;
997 asmlinkage int
998 sys_sigpending(old_sigset_t *set)
1000 int error;
1001 old_sigset_t pending;
1003 spin_lock_irq(&current->sigmask_lock);
1004 pending = current->blocked.sig[0] & current->signal.sig[0];
1005 spin_unlock_irq(&current->sigmask_lock);
1007 error = -EFAULT;
1008 if (!copy_to_user(set, &pending, sizeof(*set)))
1009 error = 0;
1010 return error;
1013 #ifndef __sparc__
1014 asmlinkage int
1015 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1016 size_t sigsetsize)
1018 struct k_sigaction new_sa, old_sa;
1019 int ret = -EINVAL;
1021 /* XXX: Don't preclude handling different sized sigset_t's. */
1022 if (sigsetsize != sizeof(sigset_t))
1023 goto out;
1025 if (act) {
1026 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1027 return -EFAULT;
1030 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1032 if (!ret && oact) {
1033 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1034 return -EFAULT;
1036 out:
1037 return ret;
1039 #endif /* __sparc__ */
1040 #endif
1042 #if !defined(__alpha__)
1044 * For backwards compatibility. Functionality superseded by sigprocmask.
1046 asmlinkage int
1047 sys_sgetmask(void)
1049 /* SMP safe */
1050 return current->blocked.sig[0];
1053 asmlinkage int
1054 sys_ssetmask(int newmask)
1056 int old;
1058 spin_lock_irq(&current->sigmask_lock);
1059 old = current->blocked.sig[0];
1061 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1062 sigmask(SIGSTOP)));
1063 recalc_sigpending(current);
1064 spin_unlock_irq(&current->sigmask_lock);
1066 return old;
1070 * For backwards compatibility. Functionality superseded by sigaction.
1072 asmlinkage unsigned long
1073 sys_signal(int sig, __sighandler_t handler)
1075 struct k_sigaction new_sa, old_sa;
1076 int ret;
1078 new_sa.sa.sa_handler = handler;
1079 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1081 ret = do_sigaction(sig, &new_sa, &old_sa);
1083 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1085 #endif /* !alpha */