Import 2.3.9
[davej-history.git] / kernel / signal.c
blobb6ae00178cab90868b28f88e18135f7bbf576f20
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/unistd.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
16 #include <asm/uaccess.h>
19 * SLAB caches for signal bits.
22 #define DEBUG_SIG 0
24 #if DEBUG_SIG
25 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
26 #else
27 #define SIG_SLAB_DEBUG 0
28 #endif
30 static kmem_cache_t *signal_queue_cachep;
32 int nr_queued_signals;
33 int max_queued_signals = 1024;
35 void __init signals_init(void)
37 signal_queue_cachep =
38 kmem_cache_create("signal_queue",
39 sizeof(struct signal_queue),
40 __alignof__(struct signal_queue),
41 SIG_SLAB_DEBUG, NULL, NULL);
46 * Flush all pending signals for a task.
49 void
50 flush_signals(struct task_struct *t)
52 struct signal_queue *q, *n;
54 t->sigpending = 0;
55 sigemptyset(&t->signal);
56 q = t->sigqueue;
57 t->sigqueue = NULL;
58 t->sigqueue_tail = &t->sigqueue;
60 while (q) {
61 n = q->next;
62 kmem_cache_free(signal_queue_cachep, q);
63 nr_queued_signals--;
64 q = n;
69 * Flush all handlers for a task.
72 void
73 flush_signal_handlers(struct task_struct *t)
75 int i;
76 struct k_sigaction *ka = &t->sig->action[0];
77 for (i = _NSIG ; i != 0 ; i--) {
78 if (ka->sa.sa_handler != SIG_IGN)
79 ka->sa.sa_handler = SIG_DFL;
80 ka->sa.sa_flags = 0;
81 sigemptyset(&ka->sa.sa_mask);
82 ka++;
87 * Dequeue a signal and return the element to the caller, which is
88 * expected to free it.
90 * All callers of must be holding current->sigmask_lock.
93 int
94 dequeue_signal(sigset_t *mask, siginfo_t *info)
96 unsigned long i, *s, *m, x;
97 int sig = 0;
99 #if DEBUG_SIG
100 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
101 signal_pending(current));
102 #endif
104 /* Find the first desired signal that is pending. */
105 s = current->signal.sig;
106 m = mask->sig;
107 switch (_NSIG_WORDS) {
108 default:
109 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
110 if ((x = *s &~ *m) != 0) {
111 sig = ffz(~x) + i*_NSIG_BPW + 1;
112 break;
114 break;
116 case 2: if ((x = s[0] &~ m[0]) != 0)
117 sig = 1;
118 else if ((x = s[1] &~ m[1]) != 0)
119 sig = _NSIG_BPW + 1;
120 else
121 break;
122 sig += ffz(~x);
123 break;
125 case 1: if ((x = *s &~ *m) != 0)
126 sig = ffz(~x) + 1;
127 break;
130 if (sig) {
131 int reset = 1;
133 /* Collect the siginfo appropriate to this signal. */
134 if (sig < SIGRTMIN) {
135 /* XXX: As an extension, support queueing exactly
136 one non-rt signal if SA_SIGINFO is set, so that
137 we can get more detailed information about the
138 cause of the signal. */
139 /* Deciding not to init these couple of fields is
140 more expensive that just initializing them. */
141 info->si_signo = sig;
142 info->si_errno = 0;
143 info->si_code = 0;
144 info->si_pid = 0;
145 info->si_uid = 0;
146 } else {
147 struct signal_queue *q, **pp;
148 pp = &current->sigqueue;
149 q = current->sigqueue;
151 /* Find the one we're interested in ... */
152 for ( ; q ; pp = &q->next, q = q->next)
153 if (q->info.si_signo == sig)
154 break;
155 if (q) {
156 if ((*pp = q->next) == NULL)
157 current->sigqueue_tail = pp;
158 *info = q->info;
159 kmem_cache_free(signal_queue_cachep,q);
160 nr_queued_signals--;
162 /* then see if this signal is still pending. */
163 q = *pp;
164 while (q) {
165 if (q->info.si_signo == sig) {
166 reset = 0;
167 break;
169 q = q->next;
171 } else {
172 /* Ok, it wasn't in the queue. It must have
173 been sent either by a non-rt mechanism and
174 we ran out of queue space. So zero out the
175 info. */
176 info->si_signo = sig;
177 info->si_errno = 0;
178 info->si_code = 0;
179 info->si_pid = 0;
180 info->si_uid = 0;
184 if (reset)
185 sigdelset(&current->signal, sig);
186 recalc_sigpending(current);
188 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
189 we need to xchg out the timer overrun values. */
190 } else {
191 /* XXX: Once CLONE_PID is in to join those "threads" that are
192 part of the same "process", look for signals sent to the
193 "process" as well. */
195 /* Sanity check... */
196 if (mask == &current->blocked && signal_pending(current)) {
197 printk(KERN_CRIT "SIG: sigpending lied\n");
198 current->sigpending = 0;
202 #if DEBUG_SIG
203 printk(" %d -> %d\n", signal_pending(current), sig);
204 #endif
206 return sig;
210 * Determine whether a signal should be posted or not.
212 * Signals with SIG_IGN can be ignored, except for the
213 * special case of a SIGCHLD.
215 * Some signals with SIG_DFL default to a non-action.
217 static int ignored_signal(int sig, struct task_struct *t)
219 struct signal_struct *signals;
220 struct k_sigaction *ka;
222 /* Don't ignore traced or blocked signals */
223 if ((t->flags & PF_PTRACED) || sigismember(&t->blocked, sig))
224 return 0;
226 signals = t->sig;
227 if (!signals)
228 return 1;
230 ka = &signals->action[sig-1];
231 switch ((unsigned long) ka->sa.sa_handler) {
232 case (unsigned long) SIG_DFL:
233 if (sig == SIGCONT ||
234 sig == SIGWINCH ||
235 sig == SIGCHLD ||
236 sig == SIGURG)
237 break;
238 return 0;
240 case (unsigned long) SIG_IGN:
241 if (sig != SIGCHLD)
242 break;
243 /* fallthrough */
244 default:
245 return 0;
247 return 1;
251 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
253 unsigned long flags;
254 int ret;
256 #if DEBUG_SIG
257 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
258 #endif
260 ret = -EINVAL;
261 if (sig < 0 || sig > _NSIG)
262 goto out_nolock;
263 /* The somewhat baroque permissions check... */
264 ret = -EPERM;
265 if ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
266 && ((sig != SIGCONT) || (current->session != t->session))
267 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
268 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
269 && !capable(CAP_KILL))
270 goto out_nolock;
272 /* The null signal is a permissions and process existance probe.
273 No signal is actually delivered. Same goes for zombies. */
274 ret = 0;
275 if (!sig || !t->sig)
276 goto out_nolock;
278 spin_lock_irqsave(&t->sigmask_lock, flags);
279 switch (sig) {
280 case SIGKILL: case SIGCONT:
281 /* Wake up the process if stopped. */
282 if (t->state == TASK_STOPPED)
283 wake_up_process(t);
284 t->exit_code = 0;
285 sigdelsetmask(&t->signal, (sigmask(SIGSTOP)|sigmask(SIGTSTP)|
286 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
287 /* Inflict this corner case with recalculations, not mainline */
288 recalc_sigpending(t);
289 break;
291 case SIGSTOP: case SIGTSTP:
292 case SIGTTIN: case SIGTTOU:
293 /* If we're stopping again, cancel SIGCONT */
294 sigdelset(&t->signal, SIGCONT);
295 /* Inflict this corner case with recalculations, not mainline */
296 recalc_sigpending(t);
297 break;
300 /* Optimize away the signal, if it's a signal that can be
301 handled immediately (ie non-blocked and untraced) and
302 that is ignored (either explicitly or by default). */
304 if (ignored_signal(sig, t))
305 goto out;
307 if (sig < SIGRTMIN) {
308 /* Non-real-time signals are not queued. */
309 /* XXX: As an extension, support queueing exactly one
310 non-rt signal if SA_SIGINFO is set, so that we can
311 get more detailed information about the cause of
312 the signal. */
313 if (sigismember(&t->signal, sig))
314 goto out;
315 } else {
316 /* Real-time signals must be queued if sent by sigqueue, or
317 some other real-time mechanism. It is implementation
318 defined whether kill() does so. We attempt to do so, on
319 the principle of least surprise, but since kill is not
320 allowed to fail with EAGAIN when low on memory we just
321 make sure at least one signal gets delivered and don't
322 pass on the info struct. */
324 struct signal_queue *q = 0;
326 if (nr_queued_signals < max_queued_signals) {
327 q = (struct signal_queue *)
328 kmem_cache_alloc(signal_queue_cachep, GFP_ATOMIC);
331 if (q) {
332 nr_queued_signals++;
333 q->next = NULL;
334 *t->sigqueue_tail = q;
335 t->sigqueue_tail = &q->next;
336 switch ((unsigned long) info) {
337 case 0:
338 q->info.si_signo = sig;
339 q->info.si_errno = 0;
340 q->info.si_code = SI_USER;
341 q->info.si_pid = current->pid;
342 q->info.si_uid = current->uid;
343 break;
344 case 1:
345 q->info.si_signo = sig;
346 q->info.si_errno = 0;
347 q->info.si_code = SI_KERNEL;
348 q->info.si_pid = 0;
349 q->info.si_uid = 0;
350 break;
351 default:
352 q->info = *info;
353 break;
355 } else {
356 /* If this was sent by a rt mechanism, try again. */
357 if (info->si_code < 0) {
358 ret = -EAGAIN;
359 goto out;
361 /* Otherwise, mention that the signal is pending,
362 but don't queue the info. */
366 sigaddset(&t->signal, sig);
367 if (!sigismember(&t->blocked, sig)) {
368 t->sigpending = 1;
369 #ifdef __SMP__
371 * If the task is running on a different CPU
372 * force a reschedule on the other CPU - note that
373 * the code below is a tad loose and might occasionally
374 * kick the wrong CPU if we catch the process in the
375 * process of changing - but no harm is done by that
376 * other than doing an extra (lightweight) IPI interrupt.
378 * note that we rely on the previous spin_lock to
379 * lock interrupts for us! No need to set need_resched
380 * since signal event passing goes through ->blocked.
382 spin_lock(&runqueue_lock);
383 if (t->has_cpu && t->processor != smp_processor_id())
384 smp_send_reschedule(t->processor);
385 spin_unlock(&runqueue_lock);
386 #endif /* __SMP__ */
389 out:
390 spin_unlock_irqrestore(&t->sigmask_lock, flags);
391 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
392 wake_up_process(t);
394 out_nolock:
395 #if DEBUG_SIG
396 printk(" %d -> %d\n", signal_pending(t), ret);
397 #endif
399 return ret;
403 * Force a signal that the process can't ignore: if necessary
404 * we unblock the signal and change any SIG_IGN to SIG_DFL.
408 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
410 unsigned long int flags;
412 spin_lock_irqsave(&t->sigmask_lock, flags);
413 if (t->sig == NULL) {
414 spin_unlock_irqrestore(&t->sigmask_lock, flags);
415 return -ESRCH;
418 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
419 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
420 sigdelset(&t->blocked, sig);
421 recalc_sigpending(t);
422 spin_unlock_irqrestore(&t->sigmask_lock, flags);
424 return send_sig_info(sig, info, t);
428 * kill_pg() sends a signal to a process group: this is what the tty
429 * control characters do (^C, ^Z etc)
433 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
435 int retval = -EINVAL;
436 if (pgrp > 0) {
437 struct task_struct *p;
438 int found = 0;
440 retval = -ESRCH;
441 read_lock(&tasklist_lock);
442 for_each_task(p) {
443 if (p->pgrp == pgrp) {
444 int err = send_sig_info(sig, info, p);
445 if (err != 0)
446 retval = err;
447 else
448 found++;
451 read_unlock(&tasklist_lock);
452 if (found)
453 retval = 0;
455 return retval;
459 * kill_sl() sends a signal to the session leader: this is used
460 * to send SIGHUP to the controlling process of a terminal when
461 * the connection is lost.
465 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
467 int retval = -EINVAL;
468 if (sess > 0) {
469 struct task_struct *p;
470 int found = 0;
472 retval = -ESRCH;
473 read_lock(&tasklist_lock);
474 for_each_task(p) {
475 if (p->leader && p->session == sess) {
476 int err = send_sig_info(sig, info, p);
477 if (err)
478 retval = err;
479 else
480 found++;
483 read_unlock(&tasklist_lock);
484 if (found)
485 retval = 0;
487 return retval;
490 inline int
491 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
493 int error;
494 struct task_struct *p;
496 read_lock(&tasklist_lock);
497 p = find_task_by_pid(pid);
498 error = -ESRCH;
499 if (p)
500 error = send_sig_info(sig, info, p);
501 read_unlock(&tasklist_lock);
502 return error;
506 * kill_something() interprets pid in interesting ways just like kill(2).
508 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
509 * is probably wrong. Should make it like BSD or SYSV.
513 kill_something_info(int sig, struct siginfo *info, int pid)
515 if (!pid) {
516 return kill_pg_info(sig, info, current->pgrp);
517 } else if (pid == -1) {
518 int retval = 0, count = 0;
519 struct task_struct * p;
521 read_lock(&tasklist_lock);
522 for_each_task(p) {
523 if (p->pid > 1 && p != current) {
524 int err = send_sig_info(sig, info, p);
525 ++count;
526 if (err != -EPERM)
527 retval = err;
530 read_unlock(&tasklist_lock);
531 return count ? retval : -ESRCH;
532 } else if (pid < 0) {
533 return kill_pg_info(sig, info, -pid);
534 } else {
535 return kill_proc_info(sig, info, pid);
540 * These are for backward compatibility with the rest of the kernel source.
544 send_sig(int sig, struct task_struct *p, int priv)
546 return send_sig_info(sig, (void*)(long)(priv != 0), p);
549 void
550 force_sig(int sig, struct task_struct *p)
552 force_sig_info(sig, (void*)1L, p);
556 kill_pg(pid_t pgrp, int sig, int priv)
558 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
562 kill_sl(pid_t sess, int sig, int priv)
564 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
568 kill_proc(pid_t pid, int sig, int priv)
570 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
574 * Let a parent know about a status change of a child.
577 void
578 notify_parent(struct task_struct *tsk, int sig)
580 struct siginfo info;
581 int why;
583 info.si_signo = sig;
584 info.si_errno = 0;
585 info.si_pid = tsk->pid;
587 /* FIXME: find out whether or not this is supposed to be c*time. */
588 info.si_utime = tsk->times.tms_utime;
589 info.si_stime = tsk->times.tms_stime;
591 why = SI_KERNEL; /* shouldn't happen */
592 switch (tsk->state) {
593 case TASK_ZOMBIE:
594 if (tsk->exit_code & 0x80)
595 why = CLD_DUMPED;
596 else if (tsk->exit_code & 0x7f)
597 why = CLD_KILLED;
598 else
599 why = CLD_EXITED;
600 break;
601 case TASK_STOPPED:
602 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
603 why = CLD_STOPPED;
604 break;
606 default:
607 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
608 tsk->state);
609 break;
611 info.si_code = why;
613 send_sig_info(sig, &info, tsk->p_pptr);
614 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
617 EXPORT_SYMBOL(dequeue_signal);
618 EXPORT_SYMBOL(flush_signals);
619 EXPORT_SYMBOL(force_sig);
620 EXPORT_SYMBOL(force_sig_info);
621 EXPORT_SYMBOL(kill_pg);
622 EXPORT_SYMBOL(kill_pg_info);
623 EXPORT_SYMBOL(kill_proc);
624 EXPORT_SYMBOL(kill_proc_info);
625 EXPORT_SYMBOL(kill_sl);
626 EXPORT_SYMBOL(kill_sl_info);
627 EXPORT_SYMBOL(notify_parent);
628 EXPORT_SYMBOL(recalc_sigpending);
629 EXPORT_SYMBOL(send_sig);
630 EXPORT_SYMBOL(send_sig_info);
634 * System call entry points.
638 * We don't need to get the kernel lock - this is all local to this
639 * particular thread.. (and that's good, because this is _heavily_
640 * used by various programs)
643 asmlinkage int
644 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
646 int error = -EINVAL;
647 sigset_t old_set, new_set;
649 /* XXX: Don't preclude handling different sized sigset_t's. */
650 if (sigsetsize != sizeof(sigset_t))
651 goto out;
653 if (set) {
654 error = -EFAULT;
655 if (copy_from_user(&new_set, set, sizeof(*set)))
656 goto out;
657 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
659 spin_lock_irq(&current->sigmask_lock);
660 old_set = current->blocked;
662 error = 0;
663 switch (how) {
664 default:
665 error = -EINVAL;
666 break;
667 case SIG_BLOCK:
668 sigorsets(&new_set, &old_set, &new_set);
669 break;
670 case SIG_UNBLOCK:
671 signandsets(&new_set, &old_set, &new_set);
672 break;
673 case SIG_SETMASK:
674 break;
677 current->blocked = new_set;
678 recalc_sigpending(current);
679 spin_unlock_irq(&current->sigmask_lock);
680 if (error)
681 goto out;
682 if (oset)
683 goto set_old;
684 } else if (oset) {
685 spin_lock_irq(&current->sigmask_lock);
686 old_set = current->blocked;
687 spin_unlock_irq(&current->sigmask_lock);
689 set_old:
690 error = -EFAULT;
691 if (copy_to_user(oset, &old_set, sizeof(*oset)))
692 goto out;
694 error = 0;
695 out:
696 return error;
699 asmlinkage int
700 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
702 int error = -EINVAL;
703 sigset_t pending;
705 /* XXX: Don't preclude handling different sized sigset_t's. */
706 if (sigsetsize != sizeof(sigset_t))
707 goto out;
709 spin_lock_irq(&current->sigmask_lock);
710 sigandsets(&pending, &current->blocked, &current->signal);
711 spin_unlock_irq(&current->sigmask_lock);
713 error = -EFAULT;
714 if (!copy_to_user(set, &pending, sizeof(*set)))
715 error = 0;
716 out:
717 return error;
720 asmlinkage int
721 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
722 const struct timespec *uts, size_t sigsetsize)
724 int ret, sig;
725 sigset_t these;
726 struct timespec ts;
727 siginfo_t info;
728 long timeout = 0;
730 /* XXX: Don't preclude handling different sized sigset_t's. */
731 if (sigsetsize != sizeof(sigset_t))
732 return -EINVAL;
734 if (copy_from_user(&these, uthese, sizeof(these)))
735 return -EFAULT;
736 else {
737 /* Invert the set of allowed signals to get those we
738 want to block. */
739 signotset(&these);
742 if (uts) {
743 if (copy_from_user(&ts, uts, sizeof(ts)))
744 return -EFAULT;
745 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
746 || ts.tv_sec < 0)
747 return -EINVAL;
750 spin_lock_irq(&current->sigmask_lock);
751 sig = dequeue_signal(&these, &info);
752 if (!sig) {
753 /* None ready -- temporarily unblock those we're interested
754 in so that we'll be awakened when they arrive. */
755 sigset_t oldblocked = current->blocked;
756 sigandsets(&current->blocked, &current->blocked, &these);
757 recalc_sigpending(current);
758 spin_unlock_irq(&current->sigmask_lock);
760 timeout = MAX_SCHEDULE_TIMEOUT;
761 if (uts)
762 timeout = (timespec_to_jiffies(&ts)
763 + (ts.tv_sec || ts.tv_nsec));
765 current->state = TASK_INTERRUPTIBLE;
766 timeout = schedule_timeout(timeout);
768 spin_lock_irq(&current->sigmask_lock);
769 sig = dequeue_signal(&these, &info);
770 current->blocked = oldblocked;
771 recalc_sigpending(current);
773 spin_unlock_irq(&current->sigmask_lock);
775 if (sig) {
776 ret = sig;
777 if (uinfo) {
778 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
779 ret = -EFAULT;
781 } else {
782 ret = -EAGAIN;
783 if (timeout)
784 ret = -EINTR;
787 return ret;
790 asmlinkage int
791 sys_kill(int pid, int sig)
793 struct siginfo info;
795 info.si_signo = sig;
796 info.si_errno = 0;
797 info.si_code = SI_USER;
798 info.si_pid = current->pid;
799 info.si_uid = current->uid;
801 return kill_something_info(sig, &info, pid);
804 asmlinkage int
805 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
807 siginfo_t info;
809 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
810 return -EFAULT;
812 /* Not even root can pretend to send signals from the kernel.
813 Nor can they impersonate a kill(), which adds source info. */
814 if (info.si_code >= 0)
815 return -EPERM;
816 info.si_signo = sig;
818 /* POSIX.1b doesn't mention process groups. */
819 return kill_proc_info(sig, &info, pid);
823 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
825 struct k_sigaction *k;
827 if (sig < 1 || sig > _NSIG ||
828 (act && (sig == SIGKILL || sig == SIGSTOP)))
829 return -EINVAL;
831 spin_lock_irq(&current->sigmask_lock);
832 k = &current->sig->action[sig-1];
834 if (oact) *oact = *k;
836 if (act) {
837 *k = *act;
838 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
841 * POSIX 3.3.1.3:
842 * "Setting a signal action to SIG_IGN for a signal that is
843 * pending shall cause the pending signal to be discarded,
844 * whether or not it is blocked."
846 * "Setting a signal action to SIG_DFL for a signal that is
847 * pending and whose default action is to ignore the signal
848 * (for example, SIGCHLD), shall cause the pending signal to
849 * be discarded, whether or not it is blocked"
851 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
852 * signal isn't actually ignored, but does automatic child
853 * reaping, while SIG_DFL is explicitly said by POSIX to force
854 * the signal to be ignored.
857 if (k->sa.sa_handler == SIG_IGN
858 || (k->sa.sa_handler == SIG_DFL
859 && (sig == SIGCONT ||
860 sig == SIGCHLD ||
861 sig == SIGWINCH))) {
862 /* So dequeue any that might be pending.
863 XXX: process-wide signals? */
864 if (sig >= SIGRTMIN &&
865 sigismember(&current->signal, sig)) {
866 struct signal_queue *q, **pp;
867 pp = &current->sigqueue;
868 q = current->sigqueue;
869 while (q) {
870 if (q->info.si_signo != sig)
871 pp = &q->next;
872 else {
873 *pp = q->next;
874 kmem_cache_free(signal_queue_cachep, q);
875 nr_queued_signals--;
877 q = *pp;
881 sigdelset(&current->signal, sig);
882 recalc_sigpending(current);
886 spin_unlock_irq(&current->sigmask_lock);
888 return 0;
891 int
892 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
894 stack_t oss;
895 int error;
897 if (uoss) {
898 oss.ss_sp = (void *) current->sas_ss_sp;
899 oss.ss_size = current->sas_ss_size;
900 oss.ss_flags = sas_ss_flags(sp);
903 if (uss) {
904 void *ss_sp;
905 size_t ss_size;
906 int ss_flags;
908 error = -EFAULT;
909 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
910 || __get_user(ss_sp, &uss->ss_sp)
911 || __get_user(ss_flags, &uss->ss_flags)
912 || __get_user(ss_size, &uss->ss_size))
913 goto out;
915 error = -EPERM;
916 if (on_sig_stack (sp))
917 goto out;
919 error = -EINVAL;
920 if (ss_flags & ~SS_DISABLE)
921 goto out;
923 if (ss_flags & SS_DISABLE) {
924 ss_size = 0;
925 ss_sp = NULL;
926 } else {
927 error = -ENOMEM;
928 if (ss_size < MINSIGSTKSZ)
929 goto out;
932 current->sas_ss_sp = (unsigned long) ss_sp;
933 current->sas_ss_size = ss_size;
936 if (uoss) {
937 error = -EFAULT;
938 if (copy_to_user(uoss, &oss, sizeof(oss)))
939 goto out;
942 error = 0;
943 out:
944 return error;
947 #if !defined(__alpha__)
948 /* Alpha has its own versions with special arguments. */
950 asmlinkage int
951 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
953 int error;
954 old_sigset_t old_set, new_set;
956 if (set) {
957 error = -EFAULT;
958 if (copy_from_user(&new_set, set, sizeof(*set)))
959 goto out;
960 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
962 spin_lock_irq(&current->sigmask_lock);
963 old_set = current->blocked.sig[0];
965 error = 0;
966 switch (how) {
967 default:
968 error = -EINVAL;
969 break;
970 case SIG_BLOCK:
971 sigaddsetmask(&current->blocked, new_set);
972 break;
973 case SIG_UNBLOCK:
974 sigdelsetmask(&current->blocked, new_set);
975 break;
976 case SIG_SETMASK:
977 current->blocked.sig[0] = new_set;
978 break;
981 recalc_sigpending(current);
982 spin_unlock_irq(&current->sigmask_lock);
983 if (error)
984 goto out;
985 if (oset)
986 goto set_old;
987 } else if (oset) {
988 old_set = current->blocked.sig[0];
989 set_old:
990 error = -EFAULT;
991 if (copy_to_user(oset, &old_set, sizeof(*oset)))
992 goto out;
994 error = 0;
995 out:
996 return error;
999 asmlinkage int
1000 sys_sigpending(old_sigset_t *set)
1002 int error;
1003 old_sigset_t pending;
1005 spin_lock_irq(&current->sigmask_lock);
1006 pending = current->blocked.sig[0] & current->signal.sig[0];
1007 spin_unlock_irq(&current->sigmask_lock);
1009 error = -EFAULT;
1010 if (!copy_to_user(set, &pending, sizeof(*set)))
1011 error = 0;
1012 return error;
1015 #ifndef __sparc__
1016 asmlinkage int
1017 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1018 size_t sigsetsize)
1020 struct k_sigaction new_sa, old_sa;
1021 int ret = -EINVAL;
1023 /* XXX: Don't preclude handling different sized sigset_t's. */
1024 if (sigsetsize != sizeof(sigset_t))
1025 goto out;
1027 if (act) {
1028 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1029 return -EFAULT;
1032 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1034 if (!ret && oact) {
1035 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1036 return -EFAULT;
1038 out:
1039 return ret;
1041 #endif /* __sparc__ */
1042 #endif
1044 #if !defined(__alpha__) && !defined(__ia64__)
1046 * For backwards compatibility. Functionality superseded by sigprocmask.
1048 asmlinkage int
1049 sys_sgetmask(void)
1051 /* SMP safe */
1052 return current->blocked.sig[0];
1055 asmlinkage int
1056 sys_ssetmask(int newmask)
1058 int old;
1060 spin_lock_irq(&current->sigmask_lock);
1061 old = current->blocked.sig[0];
1063 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1064 sigmask(SIGSTOP)));
1065 recalc_sigpending(current);
1066 spin_unlock_irq(&current->sigmask_lock);
1068 return old;
1072 * For backwards compatibility. Functionality superseded by sigaction.
1074 asmlinkage unsigned long
1075 sys_signal(int sig, __sighandler_t handler)
1077 struct k_sigaction new_sa, old_sa;
1078 int ret;
1080 new_sa.sa.sa_handler = handler;
1081 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1083 ret = do_sigaction(sig, &new_sa, &old_sa);
1085 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1087 #endif /* !alpha && !__ia64__ */