Import 2.1.118
[davej-history.git] / kernel / signal.c
blob7469c0d0824306b663f6ca47cd9b9d6d69db4522
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/signal.h>
13 #include <linux/errno.h>
14 #include <linux/wait.h>
15 #include <linux/ptrace.h>
16 #include <linux/unistd.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
23 #include <asm/uaccess.h>
26 * SLAB caches for signal bits.
29 #define DEBUG_SIG 0
31 #if DEBUG_SIG
32 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
33 #else
34 #define SIG_SLAB_DEBUG 0
35 #endif
37 static kmem_cache_t *signal_queue_cachep;
39 static int nr_queued_signals;
40 static int max_queued_signals = 1024;
42 void __init signals_init(void)
44 signal_queue_cachep =
45 kmem_cache_create("signal_queue",
46 sizeof(struct signal_queue),
47 __alignof__(struct signal_queue),
48 SIG_SLAB_DEBUG, NULL, NULL);
53 * Flush all pending signals for a task.
56 void
57 flush_signals(struct task_struct *t)
59 struct signal_queue *q, *n;
61 t->sigpending = 0;
62 sigemptyset(&t->signal);
63 q = t->sigqueue;
64 t->sigqueue = NULL;
65 t->sigqueue_tail = &t->sigqueue;
67 while (q) {
68 n = q->next;
69 kmem_cache_free(signal_queue_cachep, q);
70 nr_queued_signals--;
71 q = n;
76 * Flush all handlers for a task.
79 void
80 flush_signal_handlers(struct task_struct *t)
82 int i;
83 struct k_sigaction *ka = &t->sig->action[0];
84 for (i = _NSIG ; i != 0 ; i--) {
85 if (ka->sa.sa_handler != SIG_IGN)
86 ka->sa.sa_handler = SIG_DFL;
87 ka->sa.sa_flags = 0;
88 sigemptyset(&ka->sa.sa_mask);
89 ka++;
94 * Dequeue a signal and return the element to the caller, which is
95 * expected to free it.
97 * All callers of must be holding current->sigmask_lock.
101 dequeue_signal(sigset_t *mask, siginfo_t *info)
103 unsigned long i, *s, *m, x;
104 int sig = 0;
106 #if DEBUG_SIG
107 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
108 signal_pending(current));
109 #endif
111 /* Find the first desired signal that is pending. */
112 s = current->signal.sig;
113 m = mask->sig;
114 switch (_NSIG_WORDS) {
115 default:
116 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
117 if ((x = *s &~ *m) != 0) {
118 sig = ffz(~x) + i*_NSIG_BPW + 1;
119 break;
121 break;
123 case 2: if ((x = s[0] &~ m[0]) != 0)
124 sig = 1;
125 else if ((x = s[1] &~ m[1]) != 0)
126 sig = _NSIG_BPW + 1;
127 else
128 break;
129 sig += ffz(~x);
130 break;
132 case 1: if ((x = *s &~ *m) != 0)
133 sig = ffz(~x) + 1;
134 break;
137 if (sig) {
138 int reset = 1;
140 /* Collect the siginfo appropriate to this signal. */
141 if (sig < SIGRTMIN) {
142 /* XXX: As an extension, support queueing exactly
143 one non-rt signal if SA_SIGINFO is set, so that
144 we can get more detailed information about the
145 cause of the signal. */
146 /* Deciding not to init these couple of fields is
147 more expensive that just initializing them. */
148 info->si_signo = sig;
149 info->si_errno = 0;
150 info->si_code = 0;
151 info->si_pid = 0;
152 info->si_uid = 0;
153 } else {
154 struct signal_queue *q, **pp;
155 pp = &current->sigqueue;
156 q = current->sigqueue;
158 /* Find the one we're interested in ... */
159 for ( ; q ; pp = &q->next, q = q->next)
160 if (q->info.si_signo == sig)
161 break;
162 if (q) {
163 if ((*pp = q->next) == NULL)
164 current->sigqueue_tail = pp;
165 *info = q->info;
166 kmem_cache_free(signal_queue_cachep,q);
167 nr_queued_signals--;
169 /* then see if this signal is still pending. */
170 q = *pp;
171 while (q) {
172 if (q->info.si_signo == sig) {
173 reset = 0;
174 break;
176 q = q->next;
178 } else {
179 /* Ok, it wasn't in the queue. It must have
180 been sent either by a non-rt mechanism and
181 we ran out of queue space. So zero out the
182 info. */
183 info->si_signo = sig;
184 info->si_errno = 0;
185 info->si_code = 0;
186 info->si_pid = 0;
187 info->si_uid = 0;
191 if (reset)
192 sigdelset(&current->signal, sig);
193 recalc_sigpending(current);
195 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
196 we need to xchg out the timer overrun values. */
197 } else {
198 /* XXX: Once CLONE_PID is in to join those "threads" that are
199 part of the same "process", look for signals sent to the
200 "process" as well. */
202 /* Sanity check... */
203 if (mask == &current->blocked && signal_pending(current)) {
204 printk(KERN_CRIT "SIG: sigpending lied\n");
205 current->sigpending = 0;
209 #if DEBUG_SIG
210 printk(" %d -> %d\n", signal_pending(current), sig);
211 #endif
213 return sig;
217 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
219 struct k_sigaction *ka;
220 unsigned long flags;
221 int ret;
223 #if DEBUG_SIG
224 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
225 #endif
227 ret = -EINVAL;
228 if (sig < 0 || sig > _NSIG)
229 goto out_nolock;
231 /* If t->sig is gone, we must be trying to kill the task. So
232 pretend that it doesn't exist anymore. */
233 ret = -ESRCH;
234 if (t->sig == NULL)
235 goto out_nolock;
237 /* The somewhat baroque permissions check... */
238 ret = -EPERM;
239 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
240 && ((sig != SIGCONT) || (current->session != t->session))
241 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
242 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
243 && !capable(CAP_SYS_ADMIN))
244 goto out_nolock;
246 /* The null signal is a permissions and process existance probe.
247 No signal is actually delivered. */
248 ret = 0;
249 if (!sig)
250 goto out_nolock;
252 ka = &t->sig->action[sig-1];
253 spin_lock_irqsave(&t->sigmask_lock, flags);
255 switch (sig) {
256 case SIGKILL: case SIGCONT:
257 /* Wake up the process if stopped. */
258 if (t->state == TASK_STOPPED)
259 wake_up_process(t);
260 t->exit_code = 0;
261 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
262 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
263 /* Inflict this corner case with recalculations, not mainline */
264 recalc_sigpending(t);
265 break;
267 case SIGSTOP: case SIGTSTP:
268 case SIGTTIN: case SIGTTOU:
269 /* If we're stopping again, cancel SIGCONT */
270 sigdelset(&t->signal, SIGCONT);
271 /* Inflict this corner case with recalculations, not mainline */
272 recalc_sigpending(t);
273 break;
276 /* Optimize away the signal, if it's a signal that can be
277 handled immediately (ie non-blocked and untraced) and
278 that is ignored (either explicitly or by default). */
280 if (!(t->flags & PF_PTRACED) && !sigismember(&t->blocked, sig)
281 /* Don't bother with ignored sigs (SIGCHLD is special) */
282 && ((ka->sa.sa_handler == SIG_IGN && sig != SIGCHLD)
283 /* Some signals are ignored by default.. (but SIGCONT
284 already did its deed) */
285 || (ka->sa.sa_handler == SIG_DFL
286 && (sig == SIGCONT || sig == SIGCHLD
287 || sig == SIGWINCH || sig == SIGURG)))) {
288 goto out;
291 if (sig < SIGRTMIN) {
292 /* Non-real-time signals are not queued. */
293 /* XXX: As an extension, support queueing exactly one
294 non-rt signal if SA_SIGINFO is set, so that we can
295 get more detailed information about the cause of
296 the signal. */
297 if (sigismember(&t->signal, sig))
298 goto out;
299 } else {
300 /* Real-time signals must be queued if sent by sigqueue, or
301 some other real-time mechanism. It is implementation
302 defined whether kill() does so. We attempt to do so, on
303 the principle of least surprise, but since kill is not
304 allowed to fail with EAGAIN when low on memory we just
305 make sure at least one signal gets delivered and don't
306 pass on the info struct. */
308 struct signal_queue *q = 0;
310 if (nr_queued_signals < max_queued_signals) {
311 q = (struct signal_queue *)
312 kmem_cache_alloc(signal_queue_cachep, GFP_KERNEL);
313 nr_queued_signals++;
316 if (q) {
317 q->next = NULL;
318 *t->sigqueue_tail = q;
319 t->sigqueue_tail = &q->next;
320 switch ((unsigned long) info) {
321 case 0:
322 q->info.si_signo = sig;
323 q->info.si_errno = 0;
324 q->info.si_code = SI_USER;
325 q->info.si_pid = current->pid;
326 q->info.si_uid = current->uid;
327 break;
328 case 1:
329 q->info.si_signo = sig;
330 q->info.si_errno = 0;
331 q->info.si_code = SI_KERNEL;
332 q->info.si_pid = 0;
333 q->info.si_uid = 0;
334 break;
335 default:
336 q->info = *info;
337 break;
339 } else {
340 /* If this was sent by a rt mechanism, try again. */
341 if (info->si_code < 0) {
342 ret = -EAGAIN;
343 goto out;
345 /* Otherwise, mention that the signal is pending,
346 but don't queue the info. */
350 sigaddset(&t->signal, sig);
351 if (!sigismember(&t->blocked, sig))
352 t->sigpending = 1;
354 out:
355 spin_unlock_irqrestore(&t->sigmask_lock, flags);
356 if (t->state == TASK_INTERRUPTIBLE && signal_pending(t))
357 wake_up_process(t);
359 out_nolock:
360 #if DEBUG_SIG
361 printk(" %d -> %d\n", signal_pending(t), ret);
362 #endif
364 return ret;
368 * Force a signal that the process can't ignore: if necessary
369 * we unblock the signal and change any SIG_IGN to SIG_DFL.
373 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
375 if (t->sig == NULL)
376 return -ESRCH;
378 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
379 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
380 sigdelset(&t->blocked, sig);
382 return send_sig_info(sig, info, t);
386 * kill_pg() sends a signal to a process group: this is what the tty
387 * control characters do (^C, ^Z etc)
391 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
393 int retval = -EINVAL;
394 if (pgrp > 0) {
395 struct task_struct *p;
396 int found = 0;
398 retval = -ESRCH;
399 read_lock(&tasklist_lock);
400 for_each_task(p) {
401 if (p->pgrp == pgrp) {
402 int err = send_sig_info(sig, info, p);
403 if (err != 0)
404 retval = err;
405 else
406 found++;
409 read_unlock(&tasklist_lock);
410 if (found)
411 retval = 0;
413 return retval;
417 * kill_sl() sends a signal to the session leader: this is used
418 * to send SIGHUP to the controlling process of a terminal when
419 * the connection is lost.
423 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
425 int retval = -EINVAL;
426 if (sess > 0) {
427 struct task_struct *p;
428 int found = 0;
430 retval = -ESRCH;
431 read_lock(&tasklist_lock);
432 for_each_task(p) {
433 if (p->leader && p->session == sess) {
434 int err = send_sig_info(sig, info, p);
435 if (err)
436 retval = err;
437 else
438 found++;
441 read_unlock(&tasklist_lock);
442 if (found)
443 retval = 0;
445 return retval;
448 inline int
449 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
451 int error;
452 struct task_struct *p;
454 read_lock(&tasklist_lock);
455 p = find_task_by_pid(pid);
456 error = -ESRCH;
457 if (p)
458 error = send_sig_info(sig, info, p);
459 read_unlock(&tasklist_lock);
460 return error;
464 * kill_something() interprets pid in interesting ways just like kill(2).
466 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
467 * is probably wrong. Should make it like BSD or SYSV.
471 kill_something_info(int sig, struct siginfo *info, int pid)
473 if (!pid) {
474 return kill_pg_info(sig, info, current->pgrp);
475 } else if (pid == -1) {
476 int retval = 0, count = 0;
477 struct task_struct * p;
479 read_lock(&tasklist_lock);
480 for_each_task(p) {
481 if (p->pid > 1 && p != current) {
482 int err = send_sig_info(sig, info, p);
483 ++count;
484 if (err != -EPERM)
485 retval = err;
488 read_unlock(&tasklist_lock);
489 return count ? retval : -ESRCH;
490 } else if (pid < 0) {
491 return kill_pg_info(sig, info, -pid);
492 } else {
493 return kill_proc_info(sig, info, pid);
498 * These are for backward compatibility with the rest of the kernel source.
502 send_sig(int sig, struct task_struct *p, int priv)
504 return send_sig_info(sig, (void*)(long)(priv != 0), p);
507 void
508 force_sig(int sig, struct task_struct *p)
510 force_sig_info(sig, (void*)1L, p);
514 kill_pg(pid_t pgrp, int sig, int priv)
516 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
520 kill_sl(pid_t sess, int sig, int priv)
522 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
526 kill_proc(pid_t pid, int sig, int priv)
528 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
532 * Let a parent know about a status change of a child.
535 void
536 notify_parent(struct task_struct *tsk, int sig)
538 struct siginfo info;
539 int why;
541 info.si_signo = sig;
542 info.si_errno = 0;
543 info.si_pid = tsk->pid;
545 /* FIXME: find out whether or not this is supposed to be c*time. */
546 info.si_utime = tsk->times.tms_utime;
547 info.si_stime = tsk->times.tms_stime;
549 why = SI_KERNEL; /* shouldn't happen */
550 switch (tsk->state) {
551 case TASK_ZOMBIE:
552 if (tsk->exit_code & 0x80)
553 why = CLD_DUMPED;
554 else if (tsk->exit_code & 0x7f)
555 why = CLD_KILLED;
556 else
557 why = CLD_EXITED;
558 break;
559 case TASK_STOPPED:
560 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
561 why = CLD_STOPPED;
562 break;
564 default:
565 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
566 tsk->state);
567 break;
569 info.si_code = why;
571 send_sig_info(sig, &info, tsk->p_pptr);
572 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
575 EXPORT_SYMBOL(dequeue_signal);
576 EXPORT_SYMBOL(flush_signals);
577 EXPORT_SYMBOL(force_sig);
578 EXPORT_SYMBOL(force_sig_info);
579 EXPORT_SYMBOL(kill_pg);
580 EXPORT_SYMBOL(kill_pg_info);
581 EXPORT_SYMBOL(kill_proc);
582 EXPORT_SYMBOL(kill_proc_info);
583 EXPORT_SYMBOL(kill_sl);
584 EXPORT_SYMBOL(kill_sl_info);
585 EXPORT_SYMBOL(notify_parent);
586 EXPORT_SYMBOL(recalc_sigpending);
587 EXPORT_SYMBOL(send_sig);
588 EXPORT_SYMBOL(send_sig_info);
592 * System call entry points.
596 * We don't need to get the kernel lock - this is all local to this
597 * particular thread.. (and that's good, because this is _heavily_
598 * used by various programs)
601 asmlinkage int
602 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
604 int error = -EINVAL;
605 sigset_t old_set, new_set;
607 /* XXX: Don't preclude handling different sized sigset_t's. */
608 if (sigsetsize != sizeof(sigset_t))
609 goto out;
611 if (set) {
612 error = -EFAULT;
613 if (copy_from_user(&new_set, set, sizeof(*set)))
614 goto out;
615 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
617 spin_lock_irq(&current->sigmask_lock);
618 old_set = current->blocked;
620 error = 0;
621 switch (how) {
622 default:
623 error = -EINVAL;
624 break;
625 case SIG_BLOCK:
626 sigorsets(&new_set, &old_set, &new_set);
627 break;
628 case SIG_UNBLOCK:
629 signandsets(&new_set, &old_set, &new_set);
630 break;
631 case SIG_SETMASK:
632 break;
635 current->blocked = new_set;
636 recalc_sigpending(current);
637 spin_unlock_irq(&current->sigmask_lock);
638 if (error)
639 goto out;
640 if (oset)
641 goto set_old;
642 } else if (oset) {
643 spin_lock_irq(&current->sigmask_lock);
644 old_set = current->blocked;
645 spin_unlock_irq(&current->sigmask_lock);
647 set_old:
648 error = -EFAULT;
649 if (copy_to_user(oset, &old_set, sizeof(*oset)))
650 goto out;
652 error = 0;
653 out:
654 return error;
657 asmlinkage int
658 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
660 int error = -EINVAL;
661 sigset_t pending;
663 /* XXX: Don't preclude handling different sized sigset_t's. */
664 if (sigsetsize != sizeof(sigset_t))
665 goto out;
667 spin_lock_irq(&current->sigmask_lock);
668 sigandsets(&pending, &current->blocked, &current->signal);
669 spin_unlock_irq(&current->sigmask_lock);
671 error = -EFAULT;
672 if (!copy_to_user(set, &pending, sizeof(*set)))
673 error = 0;
674 out:
675 return error;
678 asmlinkage int
679 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
680 const struct timespec *uts, size_t sigsetsize)
682 int ret, sig;
683 sigset_t these;
684 struct timespec ts;
685 siginfo_t info;
687 /* XXX: Don't preclude handling different sized sigset_t's. */
688 if (sigsetsize != sizeof(sigset_t))
689 return -EINVAL;
691 if (copy_from_user(&these, uthese, sizeof(these)))
692 return -EFAULT;
693 else {
694 /* Invert the set of allowed signals to get those we
695 want to block. */
696 signotset(&these);
699 if (uts) {
700 if (copy_from_user(&ts, uts, sizeof(ts)))
701 return -EFAULT;
702 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
703 || ts.tv_sec < 0)
704 return -EINVAL;
707 spin_lock_irq(&current->sigmask_lock);
708 sig = dequeue_signal(&these, &info);
709 if (!sig) {
710 /* None ready -- temporarily unblock those we're interested
711 in so that we'll be awakened when they arrive. */
712 unsigned long expire;
713 sigset_t oldblocked = current->blocked;
714 sigandsets(&current->blocked, &current->blocked, &these);
715 recalc_sigpending(current);
716 spin_unlock_irq(&current->sigmask_lock);
718 expire = ~0UL;
719 if (uts) {
720 expire = (timespec_to_jiffies(&ts)
721 + (ts.tv_sec || ts.tv_nsec));
722 expire += jiffies;
724 current->timeout = expire;
726 current->state = TASK_INTERRUPTIBLE;
727 schedule();
729 spin_lock_irq(&current->sigmask_lock);
730 sig = dequeue_signal(&these, &info);
731 current->blocked = oldblocked;
732 recalc_sigpending(current);
734 spin_unlock_irq(&current->sigmask_lock);
736 if (sig) {
737 ret = sig;
738 if (uinfo) {
739 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
740 ret = -EFAULT;
742 } else {
743 ret = -EAGAIN;
744 if (current->timeout != 0) {
745 current->timeout = 0;
746 ret = -EINTR;
750 return ret;
753 asmlinkage int
754 sys_kill(int pid, int sig)
756 struct siginfo info;
758 info.si_signo = sig;
759 info.si_errno = 0;
760 info.si_code = SI_USER;
761 info.si_pid = current->pid;
762 info.si_uid = current->uid;
764 return kill_something_info(sig, &info, pid);
767 asmlinkage int
768 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
770 siginfo_t info;
772 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
773 return -EFAULT;
775 /* Not even root can pretend to send signals from the kernel.
776 Nor can they impersonate a kill(), which adds source info. */
777 if (info.si_code >= 0)
778 return -EPERM;
779 info.si_signo = sig;
781 /* POSIX.1b doesn't mention process groups. */
782 return kill_proc_info(sig, &info, pid);
786 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
788 struct k_sigaction *k;
790 if (sig < 1 || sig > _NSIG ||
791 (act && (sig == SIGKILL || sig == SIGSTOP)))
792 return -EINVAL;
794 spin_lock_irq(&current->sigmask_lock);
795 k = &current->sig->action[sig-1];
797 if (oact) *oact = *k;
799 if (act) {
800 *k = *act;
801 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
804 * POSIX 3.3.1.3:
805 * "Setting a signal action to SIG_IGN for a signal that is
806 * pending shall cause the pending signal to be discarded,
807 * whether or not it is blocked."
809 * "Setting a signal action to SIG_DFL for a signal that is
810 * pending and whose default action is to ignore the signal
811 * (for example, SIGCHLD), shall cause the pending signal to
812 * be discarded, whether or not it is blocked"
814 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
815 * signal isn't actually ignored, but does automatic child
816 * reaping, while SIG_DFL is explicitly said by POSIX to force
817 * the signal to be ignored.
820 if (k->sa.sa_handler == SIG_IGN
821 || (k->sa.sa_handler == SIG_DFL
822 && (sig == SIGCONT ||
823 sig == SIGCHLD ||
824 sig == SIGWINCH))) {
825 /* So dequeue any that might be pending.
826 XXX: process-wide signals? */
827 if (sig >= SIGRTMIN &&
828 sigismember(&current->signal, sig)) {
829 struct signal_queue *q, **pp;
830 pp = &current->sigqueue;
831 q = current->sigqueue;
832 while (q) {
833 if (q->info.si_signo != sig)
834 pp = &q->next;
835 else {
836 *pp = q->next;
837 kmem_cache_free(signal_queue_cachep, q);
838 nr_queued_signals--;
840 q = *pp;
844 sigdelset(&current->signal, sig);
845 recalc_sigpending(current);
849 spin_unlock_irq(&current->sigmask_lock);
851 return 0;
854 int
855 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
857 stack_t oss;
858 int error;
860 if (uoss) {
861 oss.ss_sp = (void *) current->sas_ss_sp;
862 oss.ss_size = current->sas_ss_size;
863 oss.ss_flags = sas_ss_flags(sp);
866 if (uss) {
867 void *ss_sp;
868 size_t ss_size;
869 int ss_flags;
871 error = -EFAULT;
872 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
873 || __get_user(ss_sp, &uss->ss_sp)
874 || __get_user(ss_flags, &uss->ss_flags)
875 || __get_user(ss_size, &uss->ss_size))
876 goto out;
878 error = -EPERM;
879 if (on_sig_stack (sp))
880 goto out;
882 error = -EINVAL;
883 if (ss_flags & ~SS_DISABLE)
884 goto out;
886 if (ss_flags & SS_DISABLE) {
887 ss_size = 0;
888 ss_sp = NULL;
889 } else {
890 error = -ENOMEM;
891 if (ss_size < MINSIGSTKSZ)
892 goto out;
895 current->sas_ss_sp = (unsigned long) ss_sp;
896 current->sas_ss_size = ss_size;
899 if (uoss) {
900 error = -EFAULT;
901 if (copy_to_user(uoss, &oss, sizeof(oss)))
902 goto out;
905 error = 0;
906 out:
907 return error;
910 #if !defined(__alpha__)
911 /* Alpha has its own versions with special arguments. */
913 asmlinkage int
914 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
916 int error;
917 old_sigset_t old_set, new_set;
919 if (set) {
920 error = -EFAULT;
921 if (copy_from_user(&new_set, set, sizeof(*set)))
922 goto out;
923 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
925 spin_lock_irq(&current->sigmask_lock);
926 old_set = current->blocked.sig[0];
928 error = 0;
929 switch (how) {
930 default:
931 error = -EINVAL;
932 break;
933 case SIG_BLOCK:
934 sigaddsetmask(&current->blocked, new_set);
935 break;
936 case SIG_UNBLOCK:
937 sigdelsetmask(&current->blocked, new_set);
938 break;
939 case SIG_SETMASK:
940 current->blocked.sig[0] = new_set;
941 break;
944 recalc_sigpending(current);
945 spin_unlock_irq(&current->sigmask_lock);
946 if (error)
947 goto out;
948 if (oset)
949 goto set_old;
950 } else if (oset) {
951 old_set = current->blocked.sig[0];
952 set_old:
953 error = -EFAULT;
954 if (copy_to_user(oset, &old_set, sizeof(*oset)))
955 goto out;
957 error = 0;
958 out:
959 return error;
962 asmlinkage int
963 sys_sigpending(old_sigset_t *set)
965 int error;
966 old_sigset_t pending;
968 spin_lock_irq(&current->sigmask_lock);
969 pending = current->blocked.sig[0] & current->signal.sig[0];
970 spin_unlock_irq(&current->sigmask_lock);
972 error = -EFAULT;
973 if (!copy_to_user(set, &pending, sizeof(*set)))
974 error = 0;
975 return error;
978 #ifndef __sparc__
979 asmlinkage int
980 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
981 size_t sigsetsize)
983 struct k_sigaction new_sa, old_sa;
984 int ret = -EINVAL;
986 /* XXX: Don't preclude handling different sized sigset_t's. */
987 if (sigsetsize != sizeof(sigset_t))
988 goto out;
990 if (act) {
991 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
992 return -EFAULT;
995 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
997 if (!ret && oact) {
998 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
999 return -EFAULT;
1001 out:
1002 return ret;
1004 #endif /* __sparc__ */
1005 #endif
1007 #if !defined(__alpha__)
1009 * For backwards compatibility. Functionality superseded by sigprocmask.
1011 asmlinkage int
1012 sys_sgetmask(void)
1014 /* SMP safe */
1015 return current->blocked.sig[0];
1018 asmlinkage int
1019 sys_ssetmask(int newmask)
1021 int old;
1023 spin_lock_irq(&current->sigmask_lock);
1024 old = current->blocked.sig[0];
1026 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1027 sigmask(SIGSTOP)));
1028 recalc_sigpending(current);
1029 spin_unlock_irq(&current->sigmask_lock);
1031 return old;
1035 * For backwards compatibility. Functionality superseded by sigaction.
1037 asmlinkage unsigned long
1038 sys_signal(int sig, __sighandler_t handler)
1040 struct k_sigaction new_sa, old_sa;
1041 int ret;
1043 new_sa.sa.sa_handler = handler;
1044 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1046 ret = do_sigaction(sig, &new_sa, &old_sa);
1048 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1050 #endif /* !alpha */