Make HZ_TO_STD macro name lowercase.
[linux-2.6/linux-mips.git] / kernel / signal.c
blob4e73949da2926e0a95d3d44e5060eaaddb1d4e4f
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/param.h>
18 #include <asm/uaccess.h>
21 * SLAB caches for signal bits.
24 #define DEBUG_SIG 0
26 #if DEBUG_SIG
27 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
28 #else
29 #define SIG_SLAB_DEBUG 0
30 #endif
32 static kmem_cache_t *signal_queue_cachep;
34 atomic_t nr_queued_signals;
35 int max_queued_signals = 1024;
37 void __init signals_init(void)
39 signal_queue_cachep =
40 kmem_cache_create("signal_queue",
41 sizeof(struct signal_queue),
42 __alignof__(struct signal_queue),
43 SIG_SLAB_DEBUG, NULL, NULL);
44 if (!signal_queue_cachep)
45 panic("signals_init(): cannot create signal_queue SLAB cache");
50 * Flush all pending signals for a task.
53 void
54 flush_signals(struct task_struct *t)
56 struct signal_queue *q, *n;
58 t->sigpending = 0;
59 sigemptyset(&t->signal);
60 q = t->sigqueue;
61 t->sigqueue = NULL;
62 t->sigqueue_tail = &t->sigqueue;
64 while (q) {
65 n = q->next;
66 kmem_cache_free(signal_queue_cachep, q);
67 atomic_dec(&nr_queued_signals);
68 q = n;
73 * Flush all handlers for a task.
76 void
77 flush_signal_handlers(struct task_struct *t)
79 int i;
80 struct k_sigaction *ka = &t->sig->action[0];
81 for (i = _NSIG ; i != 0 ; i--) {
82 if (ka->sa.sa_handler != SIG_IGN)
83 ka->sa.sa_handler = SIG_DFL;
84 ka->sa.sa_flags = 0;
85 sigemptyset(&ka->sa.sa_mask);
86 ka++;
91 * Dequeue a signal and return the element to the caller, which is
92 * expected to free it.
94 * All callers must be holding current->sigmask_lock.
97 int
98 dequeue_signal(sigset_t *mask, siginfo_t *info)
100 unsigned long i, *s, *m, x;
101 int sig = 0;
103 #if DEBUG_SIG
104 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
105 signal_pending(current));
106 #endif
108 /* Find the first desired signal that is pending. */
109 s = current->signal.sig;
110 m = mask->sig;
111 switch (_NSIG_WORDS) {
112 default:
113 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
114 if ((x = *s &~ *m) != 0) {
115 sig = ffz(~x) + i*_NSIG_BPW + 1;
116 break;
118 break;
120 case 2: if ((x = s[0] &~ m[0]) != 0)
121 sig = 1;
122 else if ((x = s[1] &~ m[1]) != 0)
123 sig = _NSIG_BPW + 1;
124 else
125 break;
126 sig += ffz(~x);
127 break;
129 case 1: if ((x = *s &~ *m) != 0)
130 sig = ffz(~x) + 1;
131 break;
134 if (sig) {
135 int reset = 1;
137 /* Collect the siginfo appropriate to this signal. */
138 struct signal_queue *q, **pp;
139 pp = &current->sigqueue;
140 q = current->sigqueue;
142 /* Find the one we're interested in ... */
143 for ( ; q ; pp = &q->next, q = q->next)
144 if (q->info.si_signo == sig)
145 break;
146 if (q) {
147 if ((*pp = q->next) == NULL)
148 current->sigqueue_tail = pp;
149 copy_siginfo(info, &q->info);
150 kmem_cache_free(signal_queue_cachep,q);
151 atomic_dec(&nr_queued_signals);
153 /* Then see if this signal is still pending.
154 (Non rt signals may not be queued twice.)
156 if (sig >= SIGRTMIN)
157 for (q = *pp; q; q = q->next)
158 if (q->info.si_signo == sig) {
159 reset = 0;
160 break;
163 } else {
164 /* Ok, it wasn't in the queue. We must have
165 been out of queue space. So zero out the
166 info. */
167 info->si_signo = sig;
168 info->si_errno = 0;
169 info->si_code = 0;
170 info->si_pid = 0;
171 info->si_uid = 0;
174 if (reset) {
175 sigdelset(&current->signal, sig);
176 recalc_sigpending(current);
179 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
180 we need to xchg out the timer overrun values. */
181 } else {
182 /* XXX: Once CLONE_PID is in to join those "threads" that are
183 part of the same "process", look for signals sent to the
184 "process" as well. */
186 /* Sanity check... */
187 if (mask == &current->blocked && signal_pending(current)) {
188 printk(KERN_CRIT "SIG: sigpending lied\n");
189 current->sigpending = 0;
193 #if DEBUG_SIG
194 printk(" %d -> %d\n", signal_pending(current), sig);
195 #endif
197 return sig;
201 * Remove signal sig from queue and from t->signal.
202 * Returns 1 if sig was found in t->signal.
204 * All callers must be holding t->sigmask_lock.
206 static int rm_sig_from_queue(int sig, struct task_struct *t)
208 struct signal_queue *q, **pp;
210 if (sig >= SIGRTMIN) {
211 printk(KERN_CRIT "SIG: rm_sig_from_queue() doesn't support rt signals\n");
212 return 0;
215 if (!sigismember(&t->signal, sig))
216 return 0;
218 sigdelset(&t->signal, sig);
220 pp = &t->sigqueue;
221 q = t->sigqueue;
223 /* Find the one we're interested in ...
224 It may appear only once. */
225 for ( ; q ; pp = &q->next, q = q->next)
226 if (q->info.si_signo == sig)
227 break;
228 if (q) {
229 if ((*pp = q->next) == NULL)
230 t->sigqueue_tail = pp;
231 kmem_cache_free(signal_queue_cachep,q);
232 atomic_dec(&nr_queued_signals);
234 return 1;
238 * Determine whether a signal should be posted or not.
240 * Signals with SIG_IGN can be ignored, except for the
241 * special case of a SIGCHLD.
243 * Some signals with SIG_DFL default to a non-action.
245 static int ignored_signal(int sig, struct task_struct *t)
247 struct signal_struct *signals;
248 struct k_sigaction *ka;
250 /* Don't ignore traced or blocked signals */
251 if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
252 return 0;
254 signals = t->sig;
255 if (!signals)
256 return 1;
258 ka = &signals->action[sig-1];
259 switch ((unsigned long) ka->sa.sa_handler) {
260 case (unsigned long) SIG_DFL:
261 if (sig == SIGCONT ||
262 sig == SIGWINCH ||
263 sig == SIGCHLD ||
264 sig == SIGURG)
265 break;
266 return 0;
268 case (unsigned long) SIG_IGN:
269 if (sig != SIGCHLD)
270 break;
271 /* fallthrough */
272 default:
273 return 0;
275 return 1;
279 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
281 unsigned long flags;
282 int ret;
283 struct signal_queue *q = 0;
286 #if DEBUG_SIG
287 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
288 #endif
290 ret = -EINVAL;
291 if (sig < 0 || sig > _NSIG)
292 goto out_nolock;
293 /* The somewhat baroque permissions check... */
294 ret = -EPERM;
295 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
296 && ((sig != SIGCONT) || (current->session != t->session))
297 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
298 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
299 && !capable(CAP_KILL))
300 goto out_nolock;
302 /* The null signal is a permissions and process existance probe.
303 No signal is actually delivered. Same goes for zombies. */
304 ret = 0;
305 if (!sig || !t->sig)
306 goto out_nolock;
308 spin_lock_irqsave(&t->sigmask_lock, flags);
309 switch (sig) {
310 case SIGKILL: case SIGCONT:
311 /* Wake up the process if stopped. */
312 if (t->state == TASK_STOPPED)
313 wake_up_process(t);
314 t->exit_code = 0;
315 if (rm_sig_from_queue(SIGSTOP, t) || rm_sig_from_queue(SIGTSTP, t) ||
316 rm_sig_from_queue(SIGTTOU, t) || rm_sig_from_queue(SIGTTIN, t))
317 recalc_sigpending(t);
318 break;
320 case SIGSTOP: case SIGTSTP:
321 case SIGTTIN: case SIGTTOU:
322 /* If we're stopping again, cancel SIGCONT */
323 if (rm_sig_from_queue(SIGCONT, t))
324 recalc_sigpending(t);
325 break;
328 /* Optimize away the signal, if it's a signal that can be
329 handled immediately (ie non-blocked and untraced) and
330 that is ignored (either explicitly or by default). */
332 if (ignored_signal(sig, t))
333 goto out;
335 /* Support queueing exactly one non-rt signal, so that we
336 can get more detailed information about the cause of
337 the signal. */
338 if (sig < SIGRTMIN && sigismember(&t->signal, sig))
339 goto out;
341 /* Real-time signals must be queued if sent by sigqueue, or
342 some other real-time mechanism. It is implementation
343 defined whether kill() does so. We attempt to do so, on
344 the principle of least surprise, but since kill is not
345 allowed to fail with EAGAIN when low on memory we just
346 make sure at least one signal gets delivered and don't
347 pass on the info struct. */
349 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
350 q = (struct signal_queue *)
351 kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
354 if (q) {
355 atomic_inc(&nr_queued_signals);
356 q->next = NULL;
357 *t->sigqueue_tail = q;
358 t->sigqueue_tail = &q->next;
359 switch ((unsigned long) info) {
360 case 0:
361 q->info.si_signo = sig;
362 q->info.si_errno = 0;
363 q->info.si_code = SI_USER;
364 q->info.si_pid = current->pid;
365 q->info.si_uid = current->uid;
366 break;
367 case 1:
368 q->info.si_signo = sig;
369 q->info.si_errno = 0;
370 q->info.si_code = SI_KERNEL;
371 q->info.si_pid = 0;
372 q->info.si_uid = 0;
373 break;
374 default:
375 copy_siginfo(&q->info, info);
376 break;
378 } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
379 && info->si_code != SI_USER) {
381 * Queue overflow, abort. We may abort if the signal was rt
382 * and sent by user using something other than kill().
384 ret = -EAGAIN;
385 goto out;
388 sigaddset(&t->signal, sig);
389 if (!sigismember(&t->blocked, sig)) {
390 t->sigpending = 1;
391 #ifdef CONFIG_SMP
393 * If the task is running on a different CPU
394 * force a reschedule on the other CPU - note that
395 * the code below is a tad loose and might occasionally
396 * kick the wrong CPU if we catch the process in the
397 * process of changing - but no harm is done by that
398 * other than doing an extra (lightweight) IPI interrupt.
400 * note that we rely on the previous spin_lock to
401 * lock interrupts for us! No need to set need_resched
402 * since signal event passing goes through ->blocked.
404 spin_lock(&runqueue_lock);
405 if (t->has_cpu && t->processor != smp_processor_id())
406 smp_send_reschedule(t->processor);
407 spin_unlock(&runqueue_lock);
408 #endif /* CONFIG_SMP */
411 out:
412 spin_unlock_irqrestore(&t->sigmask_lock, flags);
413 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
414 wake_up_process(t);
416 out_nolock:
417 #if DEBUG_SIG
418 printk(" %d -> %d\n", signal_pending(t), ret);
419 #endif
421 return ret;
425 * Force a signal that the process can't ignore: if necessary
426 * we unblock the signal and change any SIG_IGN to SIG_DFL.
430 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
432 unsigned long int flags;
434 spin_lock_irqsave(&t->sigmask_lock, flags);
435 if (t->sig == NULL) {
436 spin_unlock_irqrestore(&t->sigmask_lock, flags);
437 return -ESRCH;
440 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
441 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
442 sigdelset(&t->blocked, sig);
443 recalc_sigpending(t);
444 spin_unlock_irqrestore(&t->sigmask_lock, flags);
446 return send_sig_info(sig, info, t);
450 * kill_pg_info() sends a signal to a process group: this is what the tty
451 * control characters do (^C, ^Z etc)
455 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
457 int retval = -EINVAL;
458 if (pgrp > 0) {
459 struct task_struct *p;
460 int found = 0;
462 retval = -ESRCH;
463 read_lock(&tasklist_lock);
464 for_each_task(p) {
465 if (p->pgrp == pgrp) {
466 int err = send_sig_info(sig, info, p);
467 if (err != 0)
468 retval = err;
469 else
470 found++;
473 read_unlock(&tasklist_lock);
474 if (found)
475 retval = 0;
477 return retval;
481 * kill_sl_info() sends a signal to the session leader: this is used
482 * to send SIGHUP to the controlling process of a terminal when
483 * the connection is lost.
487 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
489 int retval = -EINVAL;
490 if (sess > 0) {
491 struct task_struct *p;
492 int found = 0;
494 retval = -ESRCH;
495 read_lock(&tasklist_lock);
496 for_each_task(p) {
497 if (p->leader && p->session == sess) {
498 int err = send_sig_info(sig, info, p);
499 if (err)
500 retval = err;
501 else
502 found++;
505 read_unlock(&tasklist_lock);
506 if (found)
507 retval = 0;
509 return retval;
512 inline int
513 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
515 int error;
516 struct task_struct *p;
518 read_lock(&tasklist_lock);
519 p = find_task_by_pid(pid);
520 error = -ESRCH;
521 if (p)
522 error = send_sig_info(sig, info, p);
523 read_unlock(&tasklist_lock);
524 return error;
528 * kill_something_info() interprets pid in interesting ways just like kill(2).
530 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
531 * is probably wrong. Should make it like BSD or SYSV.
535 kill_something_info(int sig, struct siginfo *info, int pid)
537 if (!pid) {
538 return kill_pg_info(sig, info, current->pgrp);
539 } else if (pid == -1) {
540 int retval = 0, count = 0;
541 struct task_struct * p;
543 read_lock(&tasklist_lock);
544 for_each_task(p) {
545 if (p->pid > 1 && p != current) {
546 int err = send_sig_info(sig, info, p);
547 ++count;
548 if (err != -EPERM)
549 retval = err;
552 read_unlock(&tasklist_lock);
553 return count ? retval : -ESRCH;
554 } else if (pid < 0) {
555 return kill_pg_info(sig, info, -pid);
556 } else {
557 return kill_proc_info(sig, info, pid);
562 * These are for backward compatibility with the rest of the kernel source.
566 send_sig(int sig, struct task_struct *p, int priv)
568 return send_sig_info(sig, (void*)(long)(priv != 0), p);
571 void
572 force_sig(int sig, struct task_struct *p)
574 force_sig_info(sig, (void*)1L, p);
578 kill_pg(pid_t pgrp, int sig, int priv)
580 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
584 kill_sl(pid_t sess, int sig, int priv)
586 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
590 kill_proc(pid_t pid, int sig, int priv)
592 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
596 * Let a parent know about a status change of a child.
599 void
600 notify_parent(struct task_struct *tsk, int sig)
602 struct siginfo info;
603 int why, status;
605 info.si_signo = sig;
606 info.si_errno = 0;
607 info.si_pid = tsk->pid;
608 info.si_uid = tsk->uid;
610 /* FIXME: find out whether or not this is supposed to be c*time. */
611 info.si_utime = hz_to_std(tsk->times.tms_utime);
612 info.si_stime = hz_to_std(tsk->times.tms_stime);
614 status = tsk->exit_code & 0x7f;
615 why = SI_KERNEL; /* shouldn't happen */
616 switch (tsk->state) {
617 case TASK_ZOMBIE:
618 if (tsk->exit_code & 0x80)
619 why = CLD_DUMPED;
620 else if (tsk->exit_code & 0x7f)
621 why = CLD_KILLED;
622 else {
623 why = CLD_EXITED;
624 status = tsk->exit_code >> 8;
626 break;
627 case TASK_STOPPED:
628 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
629 if (tsk->ptrace & PT_PTRACED)
630 why = CLD_TRAPPED;
631 else
632 why = CLD_STOPPED;
633 break;
635 default:
636 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
637 tsk->state);
638 break;
640 info.si_code = why;
641 info.si_status = status;
643 send_sig_info(sig, &info, tsk->p_pptr);
644 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
647 EXPORT_SYMBOL(dequeue_signal);
648 EXPORT_SYMBOL(flush_signals);
649 EXPORT_SYMBOL(force_sig);
650 EXPORT_SYMBOL(force_sig_info);
651 EXPORT_SYMBOL(kill_pg);
652 EXPORT_SYMBOL(kill_pg_info);
653 EXPORT_SYMBOL(kill_proc);
654 EXPORT_SYMBOL(kill_proc_info);
655 EXPORT_SYMBOL(kill_sl);
656 EXPORT_SYMBOL(kill_sl_info);
657 EXPORT_SYMBOL(notify_parent);
658 EXPORT_SYMBOL(recalc_sigpending);
659 EXPORT_SYMBOL(send_sig);
660 EXPORT_SYMBOL(send_sig_info);
664 * System call entry points.
668 * We don't need to get the kernel lock - this is all local to this
669 * particular thread.. (and that's good, because this is _heavily_
670 * used by various programs)
673 asmlinkage long
674 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
676 int error = -EINVAL;
677 sigset_t old_set, new_set;
679 /* XXX: Don't preclude handling different sized sigset_t's. */
680 if (sigsetsize != sizeof(sigset_t))
681 goto out;
683 if (set) {
684 error = -EFAULT;
685 if (copy_from_user(&new_set, set, sizeof(*set)))
686 goto out;
687 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
689 spin_lock_irq(&current->sigmask_lock);
690 old_set = current->blocked;
692 error = 0;
693 switch (how) {
694 default:
695 error = -EINVAL;
696 break;
697 case SIG_BLOCK:
698 sigorsets(&new_set, &old_set, &new_set);
699 break;
700 case SIG_UNBLOCK:
701 signandsets(&new_set, &old_set, &new_set);
702 break;
703 case SIG_SETMASK:
704 break;
707 current->blocked = new_set;
708 recalc_sigpending(current);
709 spin_unlock_irq(&current->sigmask_lock);
710 if (error)
711 goto out;
712 if (oset)
713 goto set_old;
714 } else if (oset) {
715 spin_lock_irq(&current->sigmask_lock);
716 old_set = current->blocked;
717 spin_unlock_irq(&current->sigmask_lock);
719 set_old:
720 error = -EFAULT;
721 if (copy_to_user(oset, &old_set, sizeof(*oset)))
722 goto out;
724 error = 0;
725 out:
726 return error;
729 asmlinkage long
730 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
732 int error = -EINVAL;
733 sigset_t pending;
735 /* XXX: Don't preclude handling different sized sigset_t's. */
736 if (sigsetsize != sizeof(sigset_t))
737 goto out;
739 spin_lock_irq(&current->sigmask_lock);
740 sigandsets(&pending, &current->blocked, &current->signal);
741 spin_unlock_irq(&current->sigmask_lock);
743 error = -EFAULT;
744 if (!copy_to_user(set, &pending, sizeof(*set)))
745 error = 0;
746 out:
747 return error;
750 asmlinkage long
751 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
752 const struct timespec *uts, size_t sigsetsize)
754 int ret, sig;
755 sigset_t these;
756 struct timespec ts;
757 siginfo_t info;
758 long timeout = 0;
760 /* XXX: Don't preclude handling different sized sigset_t's. */
761 if (sigsetsize != sizeof(sigset_t))
762 return -EINVAL;
764 if (copy_from_user(&these, uthese, sizeof(these)))
765 return -EFAULT;
768 * Invert the set of allowed signals to get those we
769 * want to block.
771 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
772 signotset(&these);
774 if (uts) {
775 if (copy_from_user(&ts, uts, sizeof(ts)))
776 return -EFAULT;
777 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
778 || ts.tv_sec < 0)
779 return -EINVAL;
782 spin_lock_irq(&current->sigmask_lock);
783 sig = dequeue_signal(&these, &info);
784 if (!sig) {
785 /* None ready -- temporarily unblock those we're interested
786 in so that we'll be awakened when they arrive. */
787 sigset_t oldblocked = current->blocked;
788 sigandsets(&current->blocked, &current->blocked, &these);
789 recalc_sigpending(current);
790 spin_unlock_irq(&current->sigmask_lock);
792 timeout = MAX_SCHEDULE_TIMEOUT;
793 if (uts)
794 timeout = (timespec_to_jiffies(&ts)
795 + (ts.tv_sec || ts.tv_nsec));
797 current->state = TASK_INTERRUPTIBLE;
798 timeout = schedule_timeout(timeout);
800 spin_lock_irq(&current->sigmask_lock);
801 sig = dequeue_signal(&these, &info);
802 current->blocked = oldblocked;
803 recalc_sigpending(current);
805 spin_unlock_irq(&current->sigmask_lock);
807 if (sig) {
808 ret = sig;
809 if (uinfo) {
810 if (copy_siginfo_to_user(uinfo, &info))
811 ret = -EFAULT;
813 } else {
814 ret = -EAGAIN;
815 if (timeout)
816 ret = -EINTR;
819 return ret;
822 asmlinkage long
823 sys_kill(int pid, int sig)
825 struct siginfo info;
827 info.si_signo = sig;
828 info.si_errno = 0;
829 info.si_code = SI_USER;
830 info.si_pid = current->pid;
831 info.si_uid = current->uid;
833 return kill_something_info(sig, &info, pid);
836 asmlinkage long
837 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
839 siginfo_t info;
841 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
842 return -EFAULT;
844 /* Not even root can pretend to send signals from the kernel.
845 Nor can they impersonate a kill(), which adds source info. */
846 if (info.si_code >= 0)
847 return -EPERM;
848 info.si_signo = sig;
850 /* POSIX.1b doesn't mention process groups. */
851 return kill_proc_info(sig, &info, pid);
855 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
857 struct k_sigaction *k;
859 if (sig < 1 || sig > _NSIG ||
860 (act && (sig == SIGKILL || sig == SIGSTOP)))
861 return -EINVAL;
863 spin_lock_irq(&current->sigmask_lock);
864 k = &current->sig->action[sig-1];
866 if (oact) *oact = *k;
868 if (act) {
869 *k = *act;
870 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
873 * POSIX 3.3.1.3:
874 * "Setting a signal action to SIG_IGN for a signal that is
875 * pending shall cause the pending signal to be discarded,
876 * whether or not it is blocked."
878 * "Setting a signal action to SIG_DFL for a signal that is
879 * pending and whose default action is to ignore the signal
880 * (for example, SIGCHLD), shall cause the pending signal to
881 * be discarded, whether or not it is blocked"
883 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
884 * signal isn't actually ignored, but does automatic child
885 * reaping, while SIG_DFL is explicitly said by POSIX to force
886 * the signal to be ignored.
889 if (k->sa.sa_handler == SIG_IGN
890 || (k->sa.sa_handler == SIG_DFL
891 && (sig == SIGCONT ||
892 sig == SIGCHLD ||
893 sig == SIGWINCH))) {
894 /* So dequeue any that might be pending.
895 XXX: process-wide signals? */
896 if (sig >= SIGRTMIN &&
897 sigismember(&current->signal, sig)) {
898 struct signal_queue *q, **pp;
899 pp = &current->sigqueue;
900 q = current->sigqueue;
901 while (q) {
902 if (q->info.si_signo != sig)
903 pp = &q->next;
904 else {
905 if ((*pp = q->next) == NULL)
906 current->sigqueue_tail = pp;
907 kmem_cache_free(signal_queue_cachep, q);
908 atomic_dec(&nr_queued_signals);
910 q = *pp;
914 sigdelset(&current->signal, sig);
915 recalc_sigpending(current);
919 spin_unlock_irq(&current->sigmask_lock);
921 return 0;
924 int
925 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
927 stack_t oss;
928 int error;
930 if (uoss) {
931 oss.ss_sp = (void *) current->sas_ss_sp;
932 oss.ss_size = current->sas_ss_size;
933 oss.ss_flags = sas_ss_flags(sp);
936 if (uss) {
937 void *ss_sp;
938 size_t ss_size;
939 int ss_flags;
941 error = -EFAULT;
942 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
943 || __get_user(ss_sp, &uss->ss_sp)
944 || __get_user(ss_flags, &uss->ss_flags)
945 || __get_user(ss_size, &uss->ss_size))
946 goto out;
948 error = -EPERM;
949 if (on_sig_stack (sp))
950 goto out;
952 error = -EINVAL;
955 * Note - this code used to test ss_flags incorrectly
956 * old code may have been written using ss_flags==0
957 * to mean ss_flags==SS_ONSTACK (as this was the only
958 * way that worked) - this fix preserves that older
959 * mechanism
961 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
962 goto out;
964 if (ss_flags == SS_DISABLE) {
965 ss_size = 0;
966 ss_sp = NULL;
967 } else {
968 error = -ENOMEM;
969 if (ss_size < MINSIGSTKSZ)
970 goto out;
973 current->sas_ss_sp = (unsigned long) ss_sp;
974 current->sas_ss_size = ss_size;
977 if (uoss) {
978 error = -EFAULT;
979 if (copy_to_user(uoss, &oss, sizeof(oss)))
980 goto out;
983 error = 0;
984 out:
985 return error;
988 #if !defined(__alpha__)
989 /* Alpha has its own versions with special arguments. */
991 asmlinkage long
992 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
994 int error;
995 old_sigset_t old_set, new_set;
997 if (set) {
998 error = -EFAULT;
999 if (copy_from_user(&new_set, set, sizeof(*set)))
1000 goto out;
1001 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1003 spin_lock_irq(&current->sigmask_lock);
1004 old_set = current->blocked.sig[0];
1006 error = 0;
1007 switch (how) {
1008 default:
1009 error = -EINVAL;
1010 break;
1011 case SIG_BLOCK:
1012 sigaddsetmask(&current->blocked, new_set);
1013 break;
1014 case SIG_UNBLOCK:
1015 sigdelsetmask(&current->blocked, new_set);
1016 break;
1017 case SIG_SETMASK:
1018 current->blocked.sig[0] = new_set;
1019 break;
1022 recalc_sigpending(current);
1023 spin_unlock_irq(&current->sigmask_lock);
1024 if (error)
1025 goto out;
1026 if (oset)
1027 goto set_old;
1028 } else if (oset) {
1029 old_set = current->blocked.sig[0];
1030 set_old:
1031 error = -EFAULT;
1032 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1033 goto out;
1035 error = 0;
1036 out:
1037 return error;
1040 asmlinkage long
1041 sys_sigpending(old_sigset_t *set)
1043 int error;
1044 old_sigset_t pending;
1046 spin_lock_irq(&current->sigmask_lock);
1047 pending = current->blocked.sig[0] & current->signal.sig[0];
1048 spin_unlock_irq(&current->sigmask_lock);
1050 error = -EFAULT;
1051 if (!copy_to_user(set, &pending, sizeof(*set)))
1052 error = 0;
1053 return error;
1056 #ifndef __sparc__
1057 asmlinkage long
1058 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1059 size_t sigsetsize)
1061 struct k_sigaction new_sa, old_sa;
1062 int ret = -EINVAL;
1064 /* XXX: Don't preclude handling different sized sigset_t's. */
1065 if (sigsetsize != sizeof(sigset_t))
1066 goto out;
1068 if (act) {
1069 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1070 return -EFAULT;
1073 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1075 if (!ret && oact) {
1076 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1077 return -EFAULT;
1079 out:
1080 return ret;
1082 #endif /* __sparc__ */
1083 #endif
1085 #if !defined(__alpha__) && !defined(__ia64__)
1087 * For backwards compatibility. Functionality superseded by sigprocmask.
1089 asmlinkage long
1090 sys_sgetmask(void)
1092 /* SMP safe */
1093 return current->blocked.sig[0];
1096 asmlinkage long
1097 sys_ssetmask(int newmask)
1099 int old;
1101 spin_lock_irq(&current->sigmask_lock);
1102 old = current->blocked.sig[0];
1104 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1105 sigmask(SIGSTOP)));
1106 recalc_sigpending(current);
1107 spin_unlock_irq(&current->sigmask_lock);
1109 return old;
1111 #endif /* !defined(__alpha__) */
1113 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1115 * For backwards compatibility. Functionality superseded by sigaction.
1117 asmlinkage unsigned long
1118 sys_signal(int sig, __sighandler_t handler)
1120 struct k_sigaction new_sa, old_sa;
1121 int ret;
1123 new_sa.sa.sa_handler = handler;
1124 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1126 ret = do_sigaction(sig, &new_sa, &old_sa);
1128 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1130 #endif /* !alpha && !__ia64__ && !defined(__mips__) */