Import 2.3.12pre3
[davej-history.git] / kernel / signal.c
blob4cd2f6f54c57446f6b77841a8d0e40f418d5f903
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
22 #define DEBUG_SIG 0
24 #if DEBUG_SIG
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
26 #else
27 #define SIG_SLAB_DEBUG 0
28 #endif
30 static kmem_cache_t *signal_queue_cachep;
32 atomic_t nr_queued_signals;
33 int max_queued_signals = 1024;
35 void __init signals_init(void)
37 signal_queue_cachep =
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue),
40 __alignof__(struct signal_queue),
41 SIG_SLAB_DEBUG, NULL, NULL);
46 * Flush all pending signals for a task.
49 void
50 flush_signals(struct task_struct *t)
52 struct signal_queue *q, *n;
54 t->sigpending = 0;
55 sigemptyset(&t->signal);
56 q = t->sigqueue;
57 t->sigqueue = NULL;
58 t->sigqueue_tail = &t->sigqueue;
60 while (q) {
61 n = q->next;
62 kmem_cache_free(signal_queue_cachep, q);
63 atomic_dec(&nr_queued_signals);
64 q = n;
69 * Flush all handlers for a task.
72 void
73 flush_signal_handlers(struct task_struct *t)
75 int i;
76 struct k_sigaction *ka = &t->sig->action[0];
77 for (i = _NSIG ; i != 0 ; i--) {
78 if (ka->sa.sa_handler != SIG_IGN)
79 ka->sa.sa_handler = SIG_DFL;
80 ka->sa.sa_flags = 0;
81 sigemptyset(&ka->sa.sa_mask);
82 ka++;
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
93 int
94 dequeue_signal(sigset_t *mask, siginfo_t *info)
96 unsigned long i, *s, *m, x;
97 int sig = 0;
99 #if DEBUG_SIG
100 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
101 signal_pending(current));
102 #endif
104 /* Find the first desired signal that is pending. */
105 s = current->signal.sig;
106 m = mask->sig;
107 switch (_NSIG_WORDS) {
108 default:
109 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
110 if ((x = *s &~ *m) != 0) {
111 sig = ffz(~x) + i*_NSIG_BPW + 1;
112 break;
114 break;
116 case 2: if ((x = s[0] &~ m[0]) != 0)
117 sig = 1;
118 else if ((x = s[1] &~ m[1]) != 0)
119 sig = _NSIG_BPW + 1;
120 else
121 break;
122 sig += ffz(~x);
123 break;
125 case 1: if ((x = *s &~ *m) != 0)
126 sig = ffz(~x) + 1;
127 break;
130 if (sig) {
131 int reset = 1;
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig < SIGRTMIN) {
135 *info = current->nrt_info[sig];
136 } else {
137 struct signal_queue *q, **pp;
138 pp = &current->sigqueue;
139 q = current->sigqueue;
141 /* Find the one we're interested in ... */
142 for ( ; q ; pp = &q->next, q = q->next)
143 if (q->info.si_signo == sig)
144 break;
145 if (q) {
146 if ((*pp = q->next) == NULL)
147 current->sigqueue_tail = pp;
148 *info = q->info;
149 kmem_cache_free(signal_queue_cachep,q);
150 atomic_dec(&nr_queued_signals);
152 /* then see if this signal is still pending. */
153 q = *pp;
154 while (q) {
155 if (q->info.si_signo == sig) {
156 reset = 0;
157 break;
159 q = q->next;
161 } else {
162 /* Ok, it wasn't in the queue. It must have
163 been sent either by a non-rt mechanism and
164 we ran out of queue space. So zero out the
165 info. */
166 info->si_signo = sig;
167 info->si_errno = 0;
168 info->si_code = 0;
169 info->si_pid = 0;
170 info->si_uid = 0;
174 if (reset)
175 sigdelset(&current->signal, sig);
176 recalc_sigpending(current);
178 } else {
179 /* XXX: Once CLONE_PID is in to join those "threads" that are
180 part of the same "process", look for signals sent to the
181 "process" as well. */
183 /* Sanity check... */
184 if (mask == &current->blocked && signal_pending(current)) {
185 printk(KERN_CRIT "SIG: sigpending lied\n");
186 current->sigpending = 0;
190 #if DEBUG_SIG
191 printk(" %d -> %d\n", signal_pending(current), sig);
192 #endif
194 return sig;
198 * Determine whether a signal should be posted or not.
200 * Signals with SIG_IGN can be ignored, except for the
201 * special case of a SIGCHLD.
203 * Some signals with SIG_DFL default to a non-action.
205 static int ignored_signal(int sig, struct task_struct *t)
207 struct signal_struct *signals;
208 struct k_sigaction *ka;
210 /* Don't ignore traced or blocked signals */
211 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
212 return 0;
214 signals = t->sig;
215 if (!signals)
216 return 1;
218 ka = &signals->action[sig-1];
219 switch ((unsigned long) ka->sa.sa_handler) {
220 case (unsigned long) SIG_DFL:
221 if (sig == SIGCONT ||
222 sig == SIGWINCH ||
223 sig == SIGCHLD ||
224 sig == SIGURG)
225 break;
226 return 0;
228 case (unsigned long) SIG_IGN:
229 if (sig != SIGCHLD)
230 break;
231 /* fallthrough */
232 default:
233 return 0;
235 return 1;
238 static void set_siginfo(siginfo_t *dst, const siginfo_t *src, int sig)
240 switch ((unsigned long)src) {
241 case 0:
242 dst->si_signo = sig;
243 dst->si_errno = 0;
244 dst->si_code = SI_USER;
245 dst->si_pid = current->pid;
246 dst->si_uid = current->uid;
247 break;
248 case 1:
249 dst->si_signo = sig;
250 dst->si_errno = 0;
251 dst->si_code = SI_KERNEL;
252 dst->si_pid = 0;
253 dst->si_uid = 0;
254 break;
255 default:
256 *dst = *src;
257 break;
262 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
264 unsigned long flags;
265 int ret;
267 #if DEBUG_SIG
268 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
269 #endif
271 ret = -EINVAL;
272 if (sig < 0 || sig > _NSIG)
273 goto out_nolock;
274 /* The somewhat baroque permissions check... */
275 ret = -EPERM;
276 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
277 && ((sig != SIGCONT) || (current->session != t->session))
278 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
279 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
280 && !capable(CAP_KILL))
281 goto out_nolock;
283 /* The null signal is a permissions and process existance probe.
284 No signal is actually delivered. Same goes for zombies. */
285 ret = 0;
286 if (!sig || !t->sig)
287 goto out_nolock;
289 spin_lock_irqsave(&t->sigmask_lock, flags);
290 switch (sig) {
291 case SIGKILL: case SIGCONT:
292 /* Wake up the process if stopped. */
293 if (t->state == TASK_STOPPED)
294 wake_up_process(t);
295 t->exit_code = 0;
296 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
297 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
298 /* Inflict this corner case with recalculations, not mainline */
299 recalc_sigpending(t);
300 break;
302 case SIGSTOP: case SIGTSTP:
303 case SIGTTIN: case SIGTTOU:
304 /* If we're stopping again, cancel SIGCONT */
305 sigdelset(&t->signal, SIGCONT);
306 /* Inflict this corner case with recalculations, not mainline */
307 recalc_sigpending(t);
308 break;
311 /* Optimize away the signal, if it's a signal that can be
312 handled immediately (ie non-blocked and untraced) and
313 that is ignored (either explicitly or by default). */
315 if (ignored_signal(sig, t))
316 goto out;
318 if (sig < SIGRTMIN) {
319 /* Non-real-time signals are not queued. */
320 if (sigismember(&t->signal, sig))
321 goto out;
322 set_siginfo(&t->nrt_info[sig], info, sig);
324 } else {
325 /* Real-time signals must be queued if sent by sigqueue, or
326 some other real-time mechanism. It is implementation
327 defined whether kill() does so. We attempt to do so, on
328 the principle of least surprise, but since kill is not
329 allowed to fail with EAGAIN when low on memory we just
330 make sure at least one signal gets delivered and don't
331 pass on the info struct. */
333 struct signal_queue *q = 0;
335 /* In case of a POSIX timer generated signal you must check
336 if a signal from this timer is already in the queue */
337 if (info && (info->si_code == SI_TIMER)) {
338 for (q = t->sigqueue; q; q = q->next) {
339 if ((q->info.si_code == SI_TIMER) &&
340 (q->info.si_timer1 == info->si_timer1)) {
341 /* this special value (1) is recognized
342 only by posix_timer_fn() in
343 itimer.c */
344 ret = 1;
345 goto out;
350 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
351 q = (struct signal_queue *)
352 kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
355 if (q) {
356 atomic_inc(&nr_queued_signals);
357 q->next = NULL;
358 *t->sigqueue_tail = q;
359 t->sigqueue_tail = &q->next;
360 set_siginfo(&q->info, info, sig);
361 } else {
362 /* If this was sent by a rt mechanism, try again. */
363 if (info->si_code < 0) {
364 ret = -EAGAIN;
365 goto out;
367 /* Otherwise, mention that the signal is pending,
368 but don't queue the info. */
372 sigaddset(&t->signal, sig);
373 if (!sigismember(&t->blocked, sig)) {
374 t->sigpending = 1;
375 #ifdef __SMP__
377 * If the task is running on a different CPU
378 * force a reschedule on the other CPU - note that
379 * the code below is a tad loose and might occasionally
380 * kick the wrong CPU if we catch the process in the
381 * process of changing - but no harm is done by that
382 * other than doing an extra (lightweight) IPI interrupt.
384 * note that we rely on the previous spin_lock to
385 * lock interrupts for us! No need to set need_resched
386 * since signal event passing goes through ->blocked.
388 spin_lock(&runqueue_lock);
389 if (t->has_cpu && t->processor != smp_processor_id())
390 smp_send_reschedule(t->processor);
391 spin_unlock(&runqueue_lock);
392 #endif /* __SMP__ */
395 out:
396 spin_unlock_irqrestore(&t->sigmask_lock, flags);
397 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
398 wake_up_process(t);
400 out_nolock:
401 #if DEBUG_SIG
402 printk(" %d -> %d\n", signal_pending(t), ret);
403 #endif
405 return ret;
409 * Force a signal that the process can't ignore: if necessary
410 * we unblock the signal and change any SIG_IGN to SIG_DFL.
414 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
416 unsigned long int flags;
418 spin_lock_irqsave(&t->sigmask_lock, flags);
419 if (t->sig == NULL) {
420 spin_unlock_irqrestore(&t->sigmask_lock, flags);
421 return -ESRCH;
424 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
425 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
426 sigdelset(&t->blocked, sig);
427 recalc_sigpending(t);
428 spin_unlock_irqrestore(&t->sigmask_lock, flags);
430 return send_sig_info(sig, info, t);
434 * kill_pg() sends a signal to a process group: this is what the tty
435 * control characters do (^C, ^Z etc)
439 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
441 int retval = -EINVAL;
442 if (pgrp > 0) {
443 struct task_struct *p;
444 int found = 0;
446 retval = -ESRCH;
447 read_lock(&tasklist_lock);
448 for_each_task(p) {
449 if (p->pgrp == pgrp) {
450 int err = send_sig_info(sig, info, p);
451 if (err != 0)
452 retval = err;
453 else
454 found++;
457 read_unlock(&tasklist_lock);
458 if (found)
459 retval = 0;
461 return retval;
465 * kill_sl() sends a signal to the session leader: this is used
466 * to send SIGHUP to the controlling process of a terminal when
467 * the connection is lost.
471 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
473 int retval = -EINVAL;
474 if (sess > 0) {
475 struct task_struct *p;
476 int found = 0;
478 retval = -ESRCH;
479 read_lock(&tasklist_lock);
480 for_each_task(p) {
481 if (p->leader && p->session == sess) {
482 int err = send_sig_info(sig, info, p);
483 if (err)
484 retval = err;
485 else
486 found++;
489 read_unlock(&tasklist_lock);
490 if (found)
491 retval = 0;
493 return retval;
496 inline int
497 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
499 int error;
500 struct task_struct *p;
502 read_lock(&tasklist_lock);
503 p = find_task_by_pid(pid);
504 error = -ESRCH;
505 if (p)
506 error = send_sig_info(sig, info, p);
507 read_unlock(&tasklist_lock);
508 return error;
512 * kill_something() interprets pid in interesting ways just like kill(2).
514 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
515 * is probably wrong. Should make it like BSD or SYSV.
519 kill_something_info(int sig, struct siginfo *info, int pid)
521 if (!pid) {
522 return kill_pg_info(sig, info, current->pgrp);
523 } else if (pid == -1) {
524 int retval = 0, count = 0;
525 struct task_struct * p;
527 read_lock(&tasklist_lock);
528 for_each_task(p) {
529 if (p->pid > 1 && p != current) {
530 int err = send_sig_info(sig, info, p);
531 ++count;
532 if (err != -EPERM)
533 retval = err;
536 read_unlock(&tasklist_lock);
537 return count ? retval : -ESRCH;
538 } else if (pid < 0) {
539 return kill_pg_info(sig, info, -pid);
540 } else {
541 return kill_proc_info(sig, info, pid);
546 * These are for backward compatibility with the rest of the kernel source.
550 send_sig(int sig, struct task_struct *p, int priv)
552 return send_sig_info(sig, (void*)(long)(priv != 0), p);
555 void
556 force_sig(int sig, struct task_struct *p)
558 force_sig_info(sig, (void*)1L, p);
562 kill_pg(pid_t pgrp, int sig, int priv)
564 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
568 kill_sl(pid_t sess, int sig, int priv)
570 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
574 kill_proc(pid_t pid, int sig, int priv)
576 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
580 * Let a parent know about a status change of a child.
583 void
584 notify_parent(struct task_struct *tsk, int sig)
586 struct siginfo info;
587 int why;
589 info.si_signo = sig;
590 info.si_errno = 0;
591 info.si_pid = tsk->pid;
593 /* FIXME: find out whether or not this is supposed to be c*time. */
594 info.si_utime = tsk->times.tms_utime;
595 info.si_stime = tsk->times.tms_stime;
597 why = SI_KERNEL; /* shouldn't happen */
598 switch (tsk->state) {
599 case TASK_ZOMBIE:
600 if (tsk->exit_code & 0x80)
601 why = CLD_DUMPED;
602 else if (tsk->exit_code & 0x7f)
603 why = CLD_KILLED;
604 else
605 why = CLD_EXITED;
606 break;
607 case TASK_STOPPED:
608 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
609 why = CLD_STOPPED;
610 break;
612 default:
613 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
614 tsk->state);
615 break;
617 info.si_code = why;
619 send_sig_info(sig, &info, tsk->p_pptr);
620 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
623 EXPORT_SYMBOL(dequeue_signal);
624 EXPORT_SYMBOL(flush_signals);
625 EXPORT_SYMBOL(force_sig);
626 EXPORT_SYMBOL(force_sig_info);
627 EXPORT_SYMBOL(kill_pg);
628 EXPORT_SYMBOL(kill_pg_info);
629 EXPORT_SYMBOL(kill_proc);
630 EXPORT_SYMBOL(kill_proc_info);
631 EXPORT_SYMBOL(kill_sl);
632 EXPORT_SYMBOL(kill_sl_info);
633 EXPORT_SYMBOL(notify_parent);
634 EXPORT_SYMBOL(recalc_sigpending);
635 EXPORT_SYMBOL(send_sig);
636 EXPORT_SYMBOL(send_sig_info);
640 * System call entry points.
644 * We don't need to get the kernel lock - this is all local to this
645 * particular thread.. (and that's good, because this is _heavily_
646 * used by various programs)
649 asmlinkage int
650 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
652 int error = -EINVAL;
653 sigset_t old_set, new_set;
655 /* XXX: Don't preclude handling different sized sigset_t's. */
656 if (sigsetsize != sizeof(sigset_t))
657 goto out;
659 if (set) {
660 error = -EFAULT;
661 if (copy_from_user(&new_set, set, sizeof(*set)))
662 goto out;
663 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
665 spin_lock_irq(&current->sigmask_lock);
666 old_set = current->blocked;
668 error = 0;
669 switch (how) {
670 default:
671 error = -EINVAL;
672 break;
673 case SIG_BLOCK:
674 sigorsets(&new_set, &old_set, &new_set);
675 break;
676 case SIG_UNBLOCK:
677 signandsets(&new_set, &old_set, &new_set);
678 break;
679 case SIG_SETMASK:
680 break;
683 current->blocked = new_set;
684 recalc_sigpending(current);
685 spin_unlock_irq(&current->sigmask_lock);
686 if (error)
687 goto out;
688 if (oset)
689 goto set_old;
690 } else if (oset) {
691 spin_lock_irq(&current->sigmask_lock);
692 old_set = current->blocked;
693 spin_unlock_irq(&current->sigmask_lock);
695 set_old:
696 error = -EFAULT;
697 if (copy_to_user(oset, &old_set, sizeof(*oset)))
698 goto out;
700 error = 0;
701 out:
702 return error;
705 asmlinkage int
706 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
708 int error = -EINVAL;
709 sigset_t pending;
711 /* XXX: Don't preclude handling different sized sigset_t's. */
712 if (sigsetsize != sizeof(sigset_t))
713 goto out;
715 spin_lock_irq(&current->sigmask_lock);
716 sigandsets(&pending, &current->blocked, &current->signal);
717 spin_unlock_irq(&current->sigmask_lock);
719 error = -EFAULT;
720 if (!copy_to_user(set, &pending, sizeof(*set)))
721 error = 0;
722 out:
723 return error;
726 asmlinkage int
727 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
728 const struct timespec *uts, size_t sigsetsize)
730 int ret, sig;
731 sigset_t these;
732 struct timespec ts;
733 siginfo_t info;
734 long timeout = 0;
736 /* XXX: Don't preclude handling different sized sigset_t's. */
737 if (sigsetsize != sizeof(sigset_t))
738 return -EINVAL;
740 if (copy_from_user(&these, uthese, sizeof(these)))
741 return -EFAULT;
742 else {
743 /* Invert the set of allowed signals to get those we
744 want to block. */
745 signotset(&these);
748 if (uts) {
749 if (copy_from_user(&ts, uts, sizeof(ts)))
750 return -EFAULT;
751 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
752 || ts.tv_sec < 0)
753 return -EINVAL;
756 spin_lock_irq(&current->sigmask_lock);
757 sig = dequeue_signal(&these, &info);
758 if (!sig) {
759 /* None ready -- temporarily unblock those we're interested
760 in so that we'll be awakened when they arrive. */
761 sigset_t oldblocked = current->blocked;
762 sigandsets(&current->blocked, &current->blocked, &these);
763 recalc_sigpending(current);
764 spin_unlock_irq(&current->sigmask_lock);
766 timeout = MAX_SCHEDULE_TIMEOUT;
767 if (uts)
768 timeout = (timespec_to_jiffies(&ts)
769 + (ts.tv_sec || ts.tv_nsec));
771 current->state = TASK_INTERRUPTIBLE;
772 timeout = schedule_timeout(timeout);
774 spin_lock_irq(&current->sigmask_lock);
775 sig = dequeue_signal(&these, &info);
776 current->blocked = oldblocked;
777 recalc_sigpending(current);
779 spin_unlock_irq(&current->sigmask_lock);
781 if (sig) {
782 ret = sig;
783 if (uinfo) {
784 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
785 ret = -EFAULT;
787 } else {
788 ret = -EAGAIN;
789 if (timeout)
790 ret = -EINTR;
793 return ret;
796 asmlinkage int
797 sys_kill(int pid, int sig)
799 struct siginfo info;
801 info.si_signo = sig;
802 info.si_errno = 0;
803 info.si_code = SI_USER;
804 info.si_pid = current->pid;
805 info.si_uid = current->uid;
807 return kill_something_info(sig, &info, pid);
810 asmlinkage int
811 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
813 siginfo_t info;
815 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
816 return -EFAULT;
818 /* Not even root can pretend to send signals from the kernel.
819 Nor can they impersonate a kill(), which adds source info. */
820 if (info.si_code >= 0)
821 return -EPERM;
822 info.si_signo = sig;
824 /* POSIX.1b doesn't mention process groups. */
825 return kill_proc_info(sig, &info, pid);
829 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
831 struct k_sigaction *k;
833 if (sig < 1 || sig > _NSIG ||
834 (act && (sig == SIGKILL || sig == SIGSTOP)))
835 return -EINVAL;
837 spin_lock_irq(&current->sigmask_lock);
838 k = &current->sig->action[sig-1];
840 if (oact) *oact = *k;
842 if (act) {
843 *k = *act;
844 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
847 * POSIX 3.3.1.3:
848 * "Setting a signal action to SIG_IGN for a signal that is
849 * pending shall cause the pending signal to be discarded,
850 * whether or not it is blocked."
852 * "Setting a signal action to SIG_DFL for a signal that is
853 * pending and whose default action is to ignore the signal
854 * (for example, SIGCHLD), shall cause the pending signal to
855 * be discarded, whether or not it is blocked"
857 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
858 * signal isn't actually ignored, but does automatic child
859 * reaping, while SIG_DFL is explicitly said by POSIX to force
860 * the signal to be ignored.
863 if (k->sa.sa_handler == SIG_IGN
864 || (k->sa.sa_handler == SIG_DFL
865 && (sig == SIGCONT ||
866 sig == SIGCHLD ||
867 sig == SIGWINCH))) {
868 /* So dequeue any that might be pending.
869 XXX: process-wide signals? */
870 if (sig >= SIGRTMIN &&
871 sigismember(&current->signal, sig)) {
872 struct signal_queue *q, **pp;
873 pp = &current->sigqueue;
874 q = current->sigqueue;
875 while (q) {
876 if (q->info.si_signo != sig)
877 pp = &q->next;
878 else {
879 *pp = q->next;
880 kmem_cache_free(signal_queue_cachep, q);
881 atomic_dec(&nr_queued_signals);
883 q = *pp;
887 sigdelset(&current->signal, sig);
888 recalc_sigpending(current);
892 spin_unlock_irq(&current->sigmask_lock);
894 return 0;
897 int
898 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
900 stack_t oss;
901 int error;
903 if (uoss) {
904 oss.ss_sp = (void *) current->sas_ss_sp;
905 oss.ss_size = current->sas_ss_size;
906 oss.ss_flags = sas_ss_flags(sp);
909 if (uss) {
910 void *ss_sp;
911 size_t ss_size;
912 int ss_flags;
914 error = -EFAULT;
915 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
916 || __get_user(ss_sp, &uss->ss_sp)
917 || __get_user(ss_flags, &uss->ss_flags)
918 || __get_user(ss_size, &uss->ss_size))
919 goto out;
921 error = -EPERM;
922 if (on_sig_stack (sp))
923 goto out;
925 error = -EINVAL;
926 if (ss_flags & ~SS_DISABLE)
927 goto out;
929 if (ss_flags & SS_DISABLE) {
930 ss_size = 0;
931 ss_sp = NULL;
932 } else {
933 error = -ENOMEM;
934 if (ss_size < MINSIGSTKSZ)
935 goto out;
938 current->sas_ss_sp = (unsigned long) ss_sp;
939 current->sas_ss_size = ss_size;
942 if (uoss) {
943 error = -EFAULT;
944 if (copy_to_user(uoss, &oss, sizeof(oss)))
945 goto out;
948 error = 0;
949 out:
950 return error;
953 #if !defined(__alpha__)
954 /* Alpha has its own versions with special arguments. */
956 asmlinkage int
957 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
959 int error;
960 old_sigset_t old_set, new_set;
962 if (set) {
963 error = -EFAULT;
964 if (copy_from_user(&new_set, set, sizeof(*set)))
965 goto out;
966 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
968 spin_lock_irq(&current->sigmask_lock);
969 old_set = current->blocked.sig[0];
971 error = 0;
972 switch (how) {
973 default:
974 error = -EINVAL;
975 break;
976 case SIG_BLOCK:
977 sigaddsetmask(&current->blocked, new_set);
978 break;
979 case SIG_UNBLOCK:
980 sigdelsetmask(&current->blocked, new_set);
981 break;
982 case SIG_SETMASK:
983 current->blocked.sig[0] = new_set;
984 break;
987 recalc_sigpending(current);
988 spin_unlock_irq(&current->sigmask_lock);
989 if (error)
990 goto out;
991 if (oset)
992 goto set_old;
993 } else if (oset) {
994 old_set = current->blocked.sig[0];
995 set_old:
996 error = -EFAULT;
997 if (copy_to_user(oset, &old_set, sizeof(*oset)))
998 goto out;
1000 error = 0;
1001 out:
1002 return error;
1005 asmlinkage int
1006 sys_sigpending(old_sigset_t *set)
1008 int error;
1009 old_sigset_t pending;
1011 spin_lock_irq(&current->sigmask_lock);
1012 pending = current->blocked.sig[0] & current->signal.sig[0];
1013 spin_unlock_irq(&current->sigmask_lock);
1015 error = -EFAULT;
1016 if (!copy_to_user(set, &pending, sizeof(*set)))
1017 error = 0;
1018 return error;
1021 #ifndef __sparc__
1022 asmlinkage int
1023 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1024 size_t sigsetsize)
1026 struct k_sigaction new_sa, old_sa;
1027 int ret = -EINVAL;
1029 /* XXX: Don't preclude handling different sized sigset_t's. */
1030 if (sigsetsize != sizeof(sigset_t))
1031 goto out;
1033 if (act) {
1034 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1035 return -EFAULT;
1038 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1040 if (!ret && oact) {
1041 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1042 return -EFAULT;
1044 out:
1045 return ret;
1047 #endif /* __sparc__ */
1048 #endif
1050 #if !defined(__alpha__) && !defined(__ia64__)
1052 * For backwards compatibility. Functionality superseded by sigprocmask.
1054 asmlinkage int
1055 sys_sgetmask(void)
1057 /* SMP safe */
1058 return current->blocked.sig[0];
1061 asmlinkage int
1062 sys_ssetmask(int newmask)
1064 int old;
1066 spin_lock_irq(&current->sigmask_lock);
1067 old = current->blocked.sig[0];
1069 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1070 sigmask(SIGSTOP)));
1071 recalc_sigpending(current);
1072 spin_unlock_irq(&current->sigmask_lock);
1074 return old;
1076 #endif /* !defined(__alpha__) */
1078 #if !defined(__alpha__) && !defined(__mips__)
1080 * For backwards compatibility. Functionality superseded by sigaction.
1082 asmlinkage unsigned long
1083 sys_signal(int sig, __sighandler_t handler)
1085 struct k_sigaction new_sa, old_sa;
1086 int ret;
1088 new_sa.sa.sa_handler = handler;
1089 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1091 ret = do_sigaction(sig, &new_sa, &old_sa);
1093 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1095 #endif /* !alpha && !__ia64__ && !defined(__mips__) */