Import 2.3.30pre1
[davej-history.git] / kernel / signal.c
blobac0e62b47b2048c579a2ef783d92386e117d746a
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
22 #define DEBUG_SIG 0
24 #if DEBUG_SIG
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
26 #else
27 #define SIG_SLAB_DEBUG 0
28 #endif
30 static kmem_cache_t *signal_queue_cachep;
32 atomic_t nr_queued_signals;
33 int max_queued_signals = 1024;
35 void __init signals_init(void)
37 signal_queue_cachep =
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue),
40 __alignof__(struct signal_queue),
41 SIG_SLAB_DEBUG, NULL, NULL);
46 * Flush all pending signals for a task.
49 void
50 flush_signals(struct task_struct *t)
52 struct signal_queue *q, *n;
54 t->sigpending = 0;
55 sigemptyset(&t->signal);
56 q = t->sigqueue;
57 t->sigqueue = NULL;
58 t->sigqueue_tail = &t->sigqueue;
60 while (q) {
61 n = q->next;
62 kmem_cache_free(signal_queue_cachep, q);
63 atomic_dec(&nr_queued_signals);
64 q = n;
69 * Flush all handlers for a task.
72 void
73 flush_signal_handlers(struct task_struct *t)
75 int i;
76 struct k_sigaction *ka = &t->sig->action[0];
77 for (i = _NSIG ; i != 0 ; i--) {
78 if (ka->sa.sa_handler != SIG_IGN)
79 ka->sa.sa_handler = SIG_DFL;
80 ka->sa.sa_flags = 0;
81 sigemptyset(&ka->sa.sa_mask);
82 ka++;
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
93 int
94 dequeue_signal(sigset_t *mask, siginfo_t *info)
96 unsigned long i, *s, *m, x;
97 int sig = 0;
99 #if DEBUG_SIG
100 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
101 signal_pending(current));
102 #endif
104 /* Find the first desired signal that is pending. */
105 s = current->signal.sig;
106 m = mask->sig;
107 switch (_NSIG_WORDS) {
108 default:
109 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
110 if ((x = *s &~ *m) != 0) {
111 sig = ffz(~x) + i*_NSIG_BPW + 1;
112 break;
114 break;
116 case 2: if ((x = s[0] &~ m[0]) != 0)
117 sig = 1;
118 else if ((x = s[1] &~ m[1]) != 0)
119 sig = _NSIG_BPW + 1;
120 else
121 break;
122 sig += ffz(~x);
123 break;
125 case 1: if ((x = *s &~ *m) != 0)
126 sig = ffz(~x) + 1;
127 break;
130 if (sig) {
131 int reset = 1;
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig < SIGRTMIN) {
135 /* XXX: As an extension, support queueing exactly
136 one non-rt signal if SA_SIGINFO is set, so that
137 we can get more detailed information about the
138 cause of the signal. */
139 /* Deciding not to init these couple of fields is
140 more expensive that just initializing them. */
141 info->si_signo = sig;
142 info->si_errno = 0;
143 info->si_code = 0;
144 info->si_pid = 0;
145 info->si_uid = 0;
146 } else {
147 struct signal_queue *q, **pp;
148 pp = &current->sigqueue;
149 q = current->sigqueue;
151 /* Find the one we're interested in ... */
152 for ( ; q ; pp = &q->next, q = q->next)
153 if (q->info.si_signo == sig)
154 break;
155 if (q) {
156 if ((*pp = q->next) == NULL)
157 current->sigqueue_tail = pp;
158 *info = q->info;
159 kmem_cache_free(signal_queue_cachep,q);
160 atomic_dec(&nr_queued_signals);
162 /* then see if this signal is still pending. */
163 q = *pp;
164 while (q) {
165 if (q->info.si_signo == sig) {
166 reset = 0;
167 break;
169 q = q->next;
171 } else {
172 /* Ok, it wasn't in the queue. It must have
173 been sent either by a non-rt mechanism and
174 we ran out of queue space. So zero out the
175 info. */
176 info->si_signo = sig;
177 info->si_errno = 0;
178 info->si_code = 0;
179 info->si_pid = 0;
180 info->si_uid = 0;
184 if (reset)
185 sigdelset(&current->signal, sig);
186 recalc_sigpending(current);
188 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
189 we need to xchg out the timer overrun values. */
190 } else {
191 /* XXX: Once CLONE_PID is in to join those "threads" that are
192 part of the same "process", look for signals sent to the
193 "process" as well. */
195 /* Sanity check... */
196 if (mask == &current->blocked && signal_pending(current)) {
197 printk(KERN_CRIT "SIG: sigpending lied\n");
198 current->sigpending = 0;
202 #if DEBUG_SIG
203 printk(" %d -> %d\n", signal_pending(current), sig);
204 #endif
206 return sig;
210 * Determine whether a signal should be posted or not.
212 * Signals with SIG_IGN can be ignored, except for the
213 * special case of a SIGCHLD.
215 * Some signals with SIG_DFL default to a non-action.
217 static int ignored_signal(int sig, struct task_struct *t)
219 struct signal_struct *signals;
220 struct k_sigaction *ka;
222 /* Don't ignore traced or blocked signals */
223 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
224 return 0;
226 signals = t->sig;
227 if (!signals)
228 return 1;
230 ka = &signals->action[sig-1];
231 switch ((unsigned long) ka->sa.sa_handler) {
232 case (unsigned long) SIG_DFL:
233 if (sig == SIGCONT ||
234 sig == SIGWINCH ||
235 sig == SIGCHLD ||
236 sig == SIGURG)
237 break;
238 return 0;
240 case (unsigned long) SIG_IGN:
241 if (sig != SIGCHLD)
242 break;
243 /* fallthrough */
244 default:
245 return 0;
247 return 1;
251 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
253 unsigned long flags;
254 int ret;
256 #if DEBUG_SIG
257 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
258 #endif
260 ret = -EINVAL;
261 if (sig < 0 || sig > _NSIG)
262 goto out_nolock;
263 /* The somewhat baroque permissions check... */
264 ret = -EPERM;
265 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
266 && ((sig != SIGCONT) || (current->session != t->session))
267 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
268 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
269 && !capable(CAP_KILL))
270 goto out_nolock;
272 /* The null signal is a permissions and process existance probe.
273 No signal is actually delivered. Same goes for zombies. */
274 ret = 0;
275 if (!sig || !t->sig)
276 goto out_nolock;
278 spin_lock_irqsave(&t->sigmask_lock, flags);
279 switch (sig) {
280 case SIGKILL: case SIGCONT:
281 /* Wake up the process if stopped. */
282 if (t->state == TASK_STOPPED)
283 wake_up_process(t);
284 t->exit_code = 0;
285 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
286 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
287 /* Inflict this corner case with recalculations, not mainline */
288 recalc_sigpending(t);
289 break;
291 case SIGSTOP: case SIGTSTP:
292 case SIGTTIN: case SIGTTOU:
293 /* If we're stopping again, cancel SIGCONT */
294 sigdelset(&t->signal, SIGCONT);
295 /* Inflict this corner case with recalculations, not mainline */
296 recalc_sigpending(t);
297 break;
300 /* Optimize away the signal, if it's a signal that can be
301 handled immediately (ie non-blocked and untraced) and
302 that is ignored (either explicitly or by default). */
304 if (ignored_signal(sig, t))
305 goto out;
307 if (sig < SIGRTMIN) {
308 /* Non-real-time signals are not queued. */
309 /* XXX: As an extension, support queueing exactly one
310 non-rt signal if SA_SIGINFO is set, so that we can
311 get more detailed information about the cause of
312 the signal. */
313 if (sigismember(&t->signal, sig))
314 goto out;
315 } else {
316 /* Real-time signals must be queued if sent by sigqueue, or
317 some other real-time mechanism. It is implementation
318 defined whether kill() does so. We attempt to do so, on
319 the principle of least surprise, but since kill is not
320 allowed to fail with EAGAIN when low on memory we just
321 make sure at least one signal gets delivered and don't
322 pass on the info struct. */
324 struct signal_queue *q = 0;
326 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
327 q = (struct signal_queue *)
328 kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
331 if (q) {
332 atomic_inc(&nr_queued_signals);
333 q->next = NULL;
334 *t->sigqueue_tail = q;
335 t->sigqueue_tail = &q->next;
336 switch ((unsigned long) info) {
337 case 0:
338 q->info.si_signo = sig;
339 q->info.si_errno = 0;
340 q->info.si_code = SI_USER;
341 q->info.si_pid = current->pid;
342 q->info.si_uid = current->uid;
343 break;
344 case 1:
345 q->info.si_signo = sig;
346 q->info.si_errno = 0;
347 q->info.si_code = SI_KERNEL;
348 q->info.si_pid = 0;
349 q->info.si_uid = 0;
350 break;
351 default:
352 q->info = *info;
353 break;
355 } else {
356 /* Queue overflow, we have to abort. */
357 ret = -EAGAIN;
358 goto out;
362 sigaddset(&t->signal, sig);
363 if (!sigismember(&t->blocked, sig)) {
364 t->sigpending = 1;
365 #ifdef __SMP__
367 * If the task is running on a different CPU
368 * force a reschedule on the other CPU - note that
369 * the code below is a tad loose and might occasionally
370 * kick the wrong CPU if we catch the process in the
371 * process of changing - but no harm is done by that
372 * other than doing an extra (lightweight) IPI interrupt.
374 * note that we rely on the previous spin_lock to
375 * lock interrupts for us! No need to set need_resched
376 * since signal event passing goes through ->blocked.
378 spin_lock(&runqueue_lock);
379 if (t->has_cpu && t->processor != smp_processor_id())
380 smp_send_reschedule(t->processor);
381 spin_unlock(&runqueue_lock);
382 #endif /* __SMP__ */
385 out:
386 spin_unlock_irqrestore(&t->sigmask_lock, flags);
387 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
388 wake_up_process(t);
390 out_nolock:
391 #if DEBUG_SIG
392 printk(" %d -> %d\n", signal_pending(t), ret);
393 #endif
395 return ret;
399 * Force a signal that the process can't ignore: if necessary
400 * we unblock the signal and change any SIG_IGN to SIG_DFL.
404 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
406 unsigned long int flags;
408 spin_lock_irqsave(&t->sigmask_lock, flags);
409 if (t->sig == NULL) {
410 spin_unlock_irqrestore(&t->sigmask_lock, flags);
411 return -ESRCH;
414 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
415 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
416 sigdelset(&t->blocked, sig);
417 recalc_sigpending(t);
418 spin_unlock_irqrestore(&t->sigmask_lock, flags);
420 return send_sig_info(sig, info, t);
424 * kill_pg() sends a signal to a process group: this is what the tty
425 * control characters do (^C, ^Z etc)
429 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
431 int retval = -EINVAL;
432 if (pgrp > 0) {
433 struct task_struct *p;
434 int found = 0;
436 retval = -ESRCH;
437 read_lock(&tasklist_lock);
438 for_each_task(p) {
439 if (p->pgrp == pgrp) {
440 int err = send_sig_info(sig, info, p);
441 if (err != 0)
442 retval = err;
443 else
444 found++;
447 read_unlock(&tasklist_lock);
448 if (found)
449 retval = 0;
451 return retval;
455 * kill_sl() sends a signal to the session leader: this is used
456 * to send SIGHUP to the controlling process of a terminal when
457 * the connection is lost.
461 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
463 int retval = -EINVAL;
464 if (sess > 0) {
465 struct task_struct *p;
466 int found = 0;
468 retval = -ESRCH;
469 read_lock(&tasklist_lock);
470 for_each_task(p) {
471 if (p->leader && p->session == sess) {
472 int err = send_sig_info(sig, info, p);
473 if (err)
474 retval = err;
475 else
476 found++;
479 read_unlock(&tasklist_lock);
480 if (found)
481 retval = 0;
483 return retval;
486 inline int
487 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
489 int error;
490 struct task_struct *p;
492 read_lock(&tasklist_lock);
493 p = find_task_by_pid(pid);
494 error = -ESRCH;
495 if (p)
496 error = send_sig_info(sig, info, p);
497 read_unlock(&tasklist_lock);
498 return error;
502 * kill_something() interprets pid in interesting ways just like kill(2).
504 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
505 * is probably wrong. Should make it like BSD or SYSV.
509 kill_something_info(int sig, struct siginfo *info, int pid)
511 if (!pid) {
512 return kill_pg_info(sig, info, current->pgrp);
513 } else if (pid == -1) {
514 int retval = 0, count = 0;
515 struct task_struct * p;
517 read_lock(&tasklist_lock);
518 for_each_task(p) {
519 if (p->pid > 1 && p != current) {
520 int err = send_sig_info(sig, info, p);
521 ++count;
522 if (err != -EPERM)
523 retval = err;
526 read_unlock(&tasklist_lock);
527 return count ? retval : -ESRCH;
528 } else if (pid < 0) {
529 return kill_pg_info(sig, info, -pid);
530 } else {
531 return kill_proc_info(sig, info, pid);
536 * These are for backward compatibility with the rest of the kernel source.
540 send_sig(int sig, struct task_struct *p, int priv)
542 return send_sig_info(sig, (void*)(long)(priv != 0), p);
545 void
546 force_sig(int sig, struct task_struct *p)
548 force_sig_info(sig, (void*)1L, p);
552 kill_pg(pid_t pgrp, int sig, int priv)
554 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
558 kill_sl(pid_t sess, int sig, int priv)
560 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
564 kill_proc(pid_t pid, int sig, int priv)
566 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
570 * Let a parent know about a status change of a child.
573 void
574 notify_parent(struct task_struct *tsk, int sig)
576 struct siginfo info;
577 int why;
579 info.si_signo = sig;
580 info.si_errno = 0;
581 info.si_pid = tsk->pid;
583 /* FIXME: find out whether or not this is supposed to be c*time. */
584 info.si_utime = tsk->times.tms_utime;
585 info.si_stime = tsk->times.tms_stime;
587 why = SI_KERNEL; /* shouldn't happen */
588 switch (tsk->state) {
589 case TASK_ZOMBIE:
590 if (tsk->exit_code & 0x80)
591 why = CLD_DUMPED;
592 else if (tsk->exit_code & 0x7f)
593 why = CLD_KILLED;
594 else
595 why = CLD_EXITED;
596 break;
597 case TASK_STOPPED:
598 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
599 why = CLD_STOPPED;
600 break;
602 default:
603 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
604 tsk->state);
605 break;
607 info.si_code = why;
609 send_sig_info(sig, &info, tsk->p_pptr);
610 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
613 EXPORT_SYMBOL(dequeue_signal);
614 EXPORT_SYMBOL(flush_signals);
615 EXPORT_SYMBOL(force_sig);
616 EXPORT_SYMBOL(force_sig_info);
617 EXPORT_SYMBOL(kill_pg);
618 EXPORT_SYMBOL(kill_pg_info);
619 EXPORT_SYMBOL(kill_proc);
620 EXPORT_SYMBOL(kill_proc_info);
621 EXPORT_SYMBOL(kill_sl);
622 EXPORT_SYMBOL(kill_sl_info);
623 EXPORT_SYMBOL(notify_parent);
624 EXPORT_SYMBOL(recalc_sigpending);
625 EXPORT_SYMBOL(send_sig);
626 EXPORT_SYMBOL(send_sig_info);
630 * System call entry points.
634 * We don't need to get the kernel lock - this is all local to this
635 * particular thread.. (and that's good, because this is _heavily_
636 * used by various programs)
639 asmlinkage long
640 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
642 int error = -EINVAL;
643 sigset_t old_set, new_set;
645 /* XXX: Don't preclude handling different sized sigset_t's. */
646 if (sigsetsize != sizeof(sigset_t))
647 goto out;
649 if (set) {
650 error = -EFAULT;
651 if (copy_from_user(&new_set, set, sizeof(*set)))
652 goto out;
653 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
655 spin_lock_irq(&current->sigmask_lock);
656 old_set = current->blocked;
658 error = 0;
659 switch (how) {
660 default:
661 error = -EINVAL;
662 break;
663 case SIG_BLOCK:
664 sigorsets(&new_set, &old_set, &new_set);
665 break;
666 case SIG_UNBLOCK:
667 signandsets(&new_set, &old_set, &new_set);
668 break;
669 case SIG_SETMASK:
670 break;
673 current->blocked = new_set;
674 recalc_sigpending(current);
675 spin_unlock_irq(&current->sigmask_lock);
676 if (error)
677 goto out;
678 if (oset)
679 goto set_old;
680 } else if (oset) {
681 spin_lock_irq(&current->sigmask_lock);
682 old_set = current->blocked;
683 spin_unlock_irq(&current->sigmask_lock);
685 set_old:
686 error = -EFAULT;
687 if (copy_to_user(oset, &old_set, sizeof(*oset)))
688 goto out;
690 error = 0;
691 out:
692 return error;
695 asmlinkage long
696 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
698 int error = -EINVAL;
699 sigset_t pending;
701 /* XXX: Don't preclude handling different sized sigset_t's. */
702 if (sigsetsize != sizeof(sigset_t))
703 goto out;
705 spin_lock_irq(&current->sigmask_lock);
706 sigandsets(&pending, &current->blocked, &current->signal);
707 spin_unlock_irq(&current->sigmask_lock);
709 error = -EFAULT;
710 if (!copy_to_user(set, &pending, sizeof(*set)))
711 error = 0;
712 out:
713 return error;
716 asmlinkage long
717 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
718 const struct timespec *uts, size_t sigsetsize)
720 int ret, sig;
721 sigset_t these;
722 struct timespec ts;
723 siginfo_t info;
724 long timeout = 0;
726 /* XXX: Don't preclude handling different sized sigset_t's. */
727 if (sigsetsize != sizeof(sigset_t))
728 return -EINVAL;
730 if (copy_from_user(&these, uthese, sizeof(these)))
731 return -EFAULT;
732 else {
733 /* Invert the set of allowed signals to get those we
734 want to block. */
735 signotset(&these);
738 if (uts) {
739 if (copy_from_user(&ts, uts, sizeof(ts)))
740 return -EFAULT;
741 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
742 || ts.tv_sec < 0)
743 return -EINVAL;
746 spin_lock_irq(&current->sigmask_lock);
747 sig = dequeue_signal(&these, &info);
748 if (!sig) {
749 /* None ready -- temporarily unblock those we're interested
750 in so that we'll be awakened when they arrive. */
751 sigset_t oldblocked = current->blocked;
752 sigandsets(&current->blocked, &current->blocked, &these);
753 recalc_sigpending(current);
754 spin_unlock_irq(&current->sigmask_lock);
756 timeout = MAX_SCHEDULE_TIMEOUT;
757 if (uts)
758 timeout = (timespec_to_jiffies(&ts)
759 + (ts.tv_sec || ts.tv_nsec));
761 current->state = TASK_INTERRUPTIBLE;
762 timeout = schedule_timeout(timeout);
764 spin_lock_irq(&current->sigmask_lock);
765 sig = dequeue_signal(&these, &info);
766 current->blocked = oldblocked;
767 recalc_sigpending(current);
769 spin_unlock_irq(&current->sigmask_lock);
771 if (sig) {
772 ret = sig;
773 if (uinfo) {
774 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
775 ret = -EFAULT;
777 } else {
778 ret = -EAGAIN;
779 if (timeout)
780 ret = -EINTR;
783 return ret;
786 asmlinkage long
787 sys_kill(int pid, int sig)
789 struct siginfo info;
791 memset(&info, 0, sizeof(info));
793 info.si_signo = sig;
794 info.si_errno = 0;
795 info.si_code = SI_USER;
796 info.si_pid = current->pid;
797 info.si_uid = current->uid;
799 return kill_something_info(sig, &info, pid);
802 asmlinkage long
803 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
805 siginfo_t info;
807 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
808 return -EFAULT;
810 /* Not even root can pretend to send signals from the kernel.
811 Nor can they impersonate a kill(), which adds source info. */
812 if (info.si_code >= 0)
813 return -EPERM;
814 info.si_signo = sig;
816 /* POSIX.1b doesn't mention process groups. */
817 return kill_proc_info(sig, &info, pid);
821 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
823 struct k_sigaction *k;
825 if (sig < 1 || sig > _NSIG ||
826 (act && (sig == SIGKILL || sig == SIGSTOP)))
827 return -EINVAL;
829 spin_lock_irq(&current->sigmask_lock);
830 k = &current->sig->action[sig-1];
832 if (oact) *oact = *k;
834 if (act) {
835 *k = *act;
836 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
839 * POSIX 3.3.1.3:
840 * "Setting a signal action to SIG_IGN for a signal that is
841 * pending shall cause the pending signal to be discarded,
842 * whether or not it is blocked."
844 * "Setting a signal action to SIG_DFL for a signal that is
845 * pending and whose default action is to ignore the signal
846 * (for example, SIGCHLD), shall cause the pending signal to
847 * be discarded, whether or not it is blocked"
849 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
850 * signal isn't actually ignored, but does automatic child
851 * reaping, while SIG_DFL is explicitly said by POSIX to force
852 * the signal to be ignored.
855 if (k->sa.sa_handler == SIG_IGN
856 || (k->sa.sa_handler == SIG_DFL
857 && (sig == SIGCONT ||
858 sig == SIGCHLD ||
859 sig == SIGWINCH))) {
860 /* So dequeue any that might be pending.
861 XXX: process-wide signals? */
862 if (sig >= SIGRTMIN &&
863 sigismember(&current->signal, sig)) {
864 struct signal_queue *q, **pp;
865 pp = &current->sigqueue;
866 q = current->sigqueue;
867 while (q) {
868 if (q->info.si_signo != sig)
869 pp = &q->next;
870 else {
871 *pp = q->next;
872 kmem_cache_free(signal_queue_cachep, q);
873 atomic_dec(&nr_queued_signals);
875 q = *pp;
879 sigdelset(&current->signal, sig);
880 recalc_sigpending(current);
884 spin_unlock_irq(&current->sigmask_lock);
886 return 0;
889 int
890 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
892 stack_t oss;
893 int error;
895 if (uoss) {
896 oss.ss_sp = (void *) current->sas_ss_sp;
897 oss.ss_size = current->sas_ss_size;
898 oss.ss_flags = sas_ss_flags(sp);
901 if (uss) {
902 void *ss_sp;
903 size_t ss_size;
904 int ss_flags;
906 error = -EFAULT;
907 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
908 || __get_user(ss_sp, &uss->ss_sp)
909 || __get_user(ss_flags, &uss->ss_flags)
910 || __get_user(ss_size, &uss->ss_size))
911 goto out;
913 error = -EPERM;
914 if (on_sig_stack (sp))
915 goto out;
917 error = -EINVAL;
918 if (ss_flags & ~SS_DISABLE)
919 goto out;
921 if (ss_flags & SS_DISABLE) {
922 ss_size = 0;
923 ss_sp = NULL;
924 } else {
925 error = -ENOMEM;
926 if (ss_size < MINSIGSTKSZ)
927 goto out;
930 current->sas_ss_sp = (unsigned long) ss_sp;
931 current->sas_ss_size = ss_size;
934 if (uoss) {
935 error = -EFAULT;
936 if (copy_to_user(uoss, &oss, sizeof(oss)))
937 goto out;
940 error = 0;
941 out:
942 return error;
945 #if !defined(__alpha__)
946 /* Alpha has its own versions with special arguments. */
948 asmlinkage long
949 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
951 int error;
952 old_sigset_t old_set, new_set;
954 if (set) {
955 error = -EFAULT;
956 if (copy_from_user(&new_set, set, sizeof(*set)))
957 goto out;
958 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
960 spin_lock_irq(&current->sigmask_lock);
961 old_set = current->blocked.sig[0];
963 error = 0;
964 switch (how) {
965 default:
966 error = -EINVAL;
967 break;
968 case SIG_BLOCK:
969 sigaddsetmask(&current->blocked, new_set);
970 break;
971 case SIG_UNBLOCK:
972 sigdelsetmask(&current->blocked, new_set);
973 break;
974 case SIG_SETMASK:
975 current->blocked.sig[0] = new_set;
976 break;
979 recalc_sigpending(current);
980 spin_unlock_irq(&current->sigmask_lock);
981 if (error)
982 goto out;
983 if (oset)
984 goto set_old;
985 } else if (oset) {
986 old_set = current->blocked.sig[0];
987 set_old:
988 error = -EFAULT;
989 if (copy_to_user(oset, &old_set, sizeof(*oset)))
990 goto out;
992 error = 0;
993 out:
994 return error;
997 asmlinkage long
998 sys_sigpending(old_sigset_t *set)
1000 int error;
1001 old_sigset_t pending;
1003 spin_lock_irq(&current->sigmask_lock);
1004 pending = current->blocked.sig[0] & current->signal.sig[0];
1005 spin_unlock_irq(&current->sigmask_lock);
1007 error = -EFAULT;
1008 if (!copy_to_user(set, &pending, sizeof(*set)))
1009 error = 0;
1010 return error;
1013 #ifndef __sparc__
1014 asmlinkage long
1015 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1016 size_t sigsetsize)
1018 struct k_sigaction new_sa, old_sa;
1019 int ret = -EINVAL;
1021 /* XXX: Don't preclude handling different sized sigset_t's. */
1022 if (sigsetsize != sizeof(sigset_t))
1023 goto out;
1025 if (act) {
1026 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1027 return -EFAULT;
1030 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1032 if (!ret && oact) {
1033 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1034 return -EFAULT;
1036 out:
1037 return ret;
1039 #endif /* __sparc__ */
1040 #endif
1042 #if !defined(__alpha__) && !defined(__ia64__)
1044 * For backwards compatibility. Functionality superseded by sigprocmask.
1046 asmlinkage long
1047 sys_sgetmask(void)
1049 /* SMP safe */
1050 return current->blocked.sig[0];
1053 asmlinkage long
1054 sys_ssetmask(int newmask)
1056 int old;
1058 spin_lock_irq(&current->sigmask_lock);
1059 old = current->blocked.sig[0];
1061 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1062 sigmask(SIGSTOP)));
1063 recalc_sigpending(current);
1064 spin_unlock_irq(&current->sigmask_lock);
1066 return old;
1068 #endif /* !defined(__alpha__) */
1070 #if !defined(__alpha__) && !defined(__mips__)
1072 * For backwards compatibility. Functionality superseded by sigaction.
1074 asmlinkage unsigned long
1075 sys_signal(int sig, __sighandler_t handler)
1077 struct k_sigaction new_sa, old_sa;
1078 int ret;
1080 new_sa.sa.sa_handler = handler;
1081 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1083 ret = do_sigaction(sig, &new_sa, &old_sa);
1085 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1087 #endif /* !alpha && !__ia64__ && !defined(__mips__) */