- pre5:
[davej-history.git] / kernel / signal.c
blob57bca292681e4e3090ab6463aa361d5fe0770d60
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/uaccess.h>
20 * SLAB caches for signal bits.
23 #define DEBUG_SIG 0
25 #if DEBUG_SIG
26 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #else
28 #define SIG_SLAB_DEBUG 0
29 #endif
31 static kmem_cache_t *sigqueue_cachep;
33 atomic_t nr_queued_signals;
34 int max_queued_signals = 1024;
36 void __init signals_init(void)
38 sigqueue_cachep =
39 kmem_cache_create("sigqueue",
40 sizeof(struct sigqueue),
41 __alignof__(struct sigqueue),
42 SIG_SLAB_DEBUG, NULL, NULL);
43 if (!sigqueue_cachep)
44 panic("signals_init(): cannot create sigueue SLAB cache");
48 /* Given the mask, find the first available signal that should be serviced. */
50 static int
51 next_signal(struct task_struct *tsk, sigset_t *mask)
53 unsigned long i, *s, *m, x;
54 int sig = 0;
56 s = tsk->pending.signal.sig;
57 m = mask->sig;
58 switch (_NSIG_WORDS) {
59 default:
60 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
61 if ((x = *s &~ *m) != 0) {
62 sig = ffz(~x) + i*_NSIG_BPW + 1;
63 break;
65 break;
67 case 2: if ((x = s[0] &~ m[0]) != 0)
68 sig = 1;
69 else if ((x = s[1] &~ m[1]) != 0)
70 sig = _NSIG_BPW + 1;
71 else
72 break;
73 sig += ffz(~x);
74 break;
76 case 1: if ((x = *s &~ *m) != 0)
77 sig = ffz(~x) + 1;
78 break;
81 return sig;
84 static void flush_sigqueue(struct sigpending *queue)
86 struct sigqueue *q, *n;
88 sigemptyset(&queue->signal);
89 q = queue->head;
90 queue->head = NULL;
91 queue->tail = &queue->head;
93 while (q) {
94 n = q->next;
95 kmem_cache_free(sigqueue_cachep, q);
96 atomic_dec(&nr_queued_signals);
97 q = n;
102 * Flush all pending signals for a task.
105 void
106 flush_signals(struct task_struct *t)
108 t->sigpending = 0;
109 flush_sigqueue(&t->pending);
112 void exit_sighand(struct task_struct *tsk)
114 struct signal_struct * sig = tsk->sig;
116 spin_lock_irq(&tsk->sigmask_lock);
117 if (sig) {
118 tsk->sig = NULL;
119 if (atomic_dec_and_test(&sig->count))
120 kmem_cache_free(sigact_cachep, sig);
122 tsk->sigpending = 0;
123 flush_sigqueue(&tsk->pending);
124 spin_unlock_irq(&tsk->sigmask_lock);
128 * Flush all handlers for a task.
131 void
132 flush_signal_handlers(struct task_struct *t)
134 int i;
135 struct k_sigaction *ka = &t->sig->action[0];
136 for (i = _NSIG ; i != 0 ; i--) {
137 if (ka->sa.sa_handler != SIG_IGN)
138 ka->sa.sa_handler = SIG_DFL;
139 ka->sa.sa_flags = 0;
140 sigemptyset(&ka->sa.sa_mask);
141 ka++;
145 /* Notify the system that a driver wants to block all signals for this
146 * process, and wants to be notified if any signals at all were to be
147 * sent/acted upon. If the notifier routine returns non-zero, then the
148 * signal will be acted upon after all. If the notifier routine returns 0,
149 * then then signal will be blocked. Only one block per process is
150 * allowed. priv is a pointer to private data that the notifier routine
151 * can use to determine if the signal should be blocked or not. */
153 void
154 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
156 unsigned long flags;
158 spin_lock_irqsave(&current->sigmask_lock, flags);
159 current->notifier_mask = mask;
160 current->notifier_data = priv;
161 current->notifier = notifier;
162 spin_unlock_irqrestore(&current->sigmask_lock, flags);
165 /* Notify the system that blocking has ended. */
167 void
168 unblock_all_signals(void)
170 unsigned long flags;
172 spin_lock_irqsave(&current->sigmask_lock, flags);
173 current->notifier = NULL;
174 current->notifier_data = NULL;
175 recalc_sigpending(current);
176 spin_unlock_irqrestore(&current->sigmask_lock, flags);
179 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
181 if (sigismember(&list->signal, sig)) {
182 /* Collect the siginfo appropriate to this signal. */
183 struct sigqueue *q, **pp;
184 pp = &list->head;
185 while ((q = *pp) != NULL) {
186 if (q->info.si_signo == sig)
187 goto found_it;
188 pp = &q->next;
191 /* Ok, it wasn't in the queue. We must have
192 been out of queue space. So zero out the
193 info. */
194 info->si_signo = sig;
195 info->si_errno = 0;
196 info->si_code = 0;
197 info->si_pid = 0;
198 info->si_uid = 0;
199 return 1;
201 found_it:
202 if ((*pp = q->next) == NULL)
203 list->tail = pp;
205 /* Copy the sigqueue information and free the queue entry */
206 copy_siginfo(info, &q->info);
207 kmem_cache_free(sigqueue_cachep,q);
208 atomic_dec(&nr_queued_signals);
210 /* Non-RT signals can exist multiple times.. */
211 if (sig >= SIGRTMIN) {
212 while ((q = *pp) != NULL) {
213 if (q->info.si_signo == sig)
214 goto found_another;
215 pp = &q->next;
219 sigdelset(&list->signal, sig);
220 found_another:
221 return 1;
223 return 0;
227 * Dequeue a signal and return the element to the caller, which is
228 * expected to free it.
230 * All callers must be holding current->sigmask_lock.
234 dequeue_signal(sigset_t *mask, siginfo_t *info)
236 int sig = 0;
238 #if DEBUG_SIG
239 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
240 signal_pending(current));
241 #endif
243 sig = next_signal(current, mask);
244 if (current->notifier) {
245 if (sigismember(current->notifier_mask, sig)) {
246 if (!(current->notifier)(current->notifier_data)) {
247 current->sigpending = 0;
248 return 0;
253 if (sig) {
254 if (!collect_signal(sig, &current->pending, info))
255 sig = 0;
257 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
258 we need to xchg out the timer overrun values. */
260 recalc_sigpending(current);
262 #if DEBUG_SIG
263 printk(" %d -> %d\n", signal_pending(current), sig);
264 #endif
266 return sig;
269 static int rm_from_queue(int sig, struct sigpending *s)
271 struct sigqueue *q, **pp;
273 if (!sigismember(&s->signal, sig))
274 return 0;
276 sigdelset(&s->signal, sig);
278 pp = &s->head;
280 while ((q = *pp) != NULL) {
281 if (q->info.si_signo == sig) {
282 if ((*pp = q->next) == NULL)
283 s->tail = pp;
284 kmem_cache_free(sigqueue_cachep,q);
285 atomic_dec(&nr_queued_signals);
286 continue;
288 pp = &q->next;
290 return 1;
294 * Remove signal sig from t->pending.
295 * Returns 1 if sig was found.
297 * All callers must be holding t->sigmask_lock.
299 static int rm_sig_from_queue(int sig, struct task_struct *t)
301 return rm_from_queue(sig, &t->pending);
305 * Bad permissions for sending the signal
307 int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
309 return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
310 && ((sig != SIGCONT) || (current->session != t->session))
311 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
312 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
313 && !capable(CAP_KILL);
317 * Signal type:
318 * < 0 : global action (kill - spread to all non-blocked threads)
319 * = 0 : ignored
320 * > 0 : wake up.
322 static int signal_type(int sig, struct signal_struct *signals)
324 unsigned long handler;
326 if (!signals)
327 return 0;
329 handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
330 if (handler > 1)
331 return 1;
333 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
334 if (handler == 1)
335 return sig == SIGCHLD;
337 /* Default handler. Normally lethal, but.. */
338 switch (sig) {
340 /* Ignored */
341 case SIGCONT: case SIGWINCH:
342 case SIGCHLD: case SIGURG:
343 return 0;
345 /* Implicit behaviour */
346 case SIGTSTP: case SIGTTIN: case SIGTTOU:
347 return 1;
349 /* Implicit actions (kill or do special stuff) */
350 default:
351 return -1;
357 * Determine whether a signal should be posted or not.
359 * Signals with SIG_IGN can be ignored, except for the
360 * special case of a SIGCHLD.
362 * Some signals with SIG_DFL default to a non-action.
364 static int ignored_signal(int sig, struct task_struct *t)
366 /* Don't ignore traced or blocked signals */
367 if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
368 return 0;
370 return signal_type(sig, t->sig) == 0;
374 * Handle TASK_STOPPED cases etc implicit behaviour
375 * of certain magical signals.
377 * SIGKILL gets spread out to every thread.
379 static void handle_stop_signal(int sig, struct task_struct *t)
381 switch (sig) {
382 case SIGKILL: case SIGCONT:
383 /* Wake up the process if stopped. */
384 if (t->state == TASK_STOPPED)
385 wake_up_process(t);
386 t->exit_code = 0;
387 rm_sig_from_queue(SIGSTOP, t);
388 rm_sig_from_queue(SIGTSTP, t);
389 rm_sig_from_queue(SIGTTOU, t);
390 rm_sig_from_queue(SIGTTIN, t);
391 break;
393 case SIGSTOP: case SIGTSTP:
394 case SIGTTIN: case SIGTTOU:
395 /* If we're stopping again, cancel SIGCONT */
396 rm_sig_from_queue(SIGCONT, t);
397 break;
401 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
403 struct sigqueue * q = NULL;
405 /* Real-time signals must be queued if sent by sigqueue, or
406 some other real-time mechanism. It is implementation
407 defined whether kill() does so. We attempt to do so, on
408 the principle of least surprise, but since kill is not
409 allowed to fail with EAGAIN when low on memory we just
410 make sure at least one signal gets delivered and don't
411 pass on the info struct. */
413 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
414 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
417 if (q) {
418 atomic_inc(&nr_queued_signals);
419 q->next = NULL;
420 *signals->tail = q;
421 signals->tail = &q->next;
422 switch ((unsigned long) info) {
423 case 0:
424 q->info.si_signo = sig;
425 q->info.si_errno = 0;
426 q->info.si_code = SI_USER;
427 q->info.si_pid = current->pid;
428 q->info.si_uid = current->uid;
429 break;
430 case 1:
431 q->info.si_signo = sig;
432 q->info.si_errno = 0;
433 q->info.si_code = SI_KERNEL;
434 q->info.si_pid = 0;
435 q->info.si_uid = 0;
436 break;
437 default:
438 copy_siginfo(&q->info, info);
439 break;
441 } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
442 && info->si_code != SI_USER) {
444 * Queue overflow, abort. We may abort if the signal was rt
445 * and sent by user using something other than kill().
447 return -EAGAIN;
450 sigaddset(&signals->signal, sig);
451 return 0;
455 * Tell a process that it has a new active signal..
457 * NOTE! we rely on the previous spin_lock to
458 * lock interrupts for us! We can only be called with
459 * "sigmask_lock" held, and the local interrupt must
460 * have been disabled when that got aquired!
462 * No need to set need_resched since signal event passing
463 * goes through ->blocked
465 static inline void signal_wake_up(struct task_struct *t)
467 t->sigpending = 1;
469 if (t->state & TASK_INTERRUPTIBLE) {
470 wake_up_process(t);
471 return;
474 #ifdef CONFIG_SMP
476 * If the task is running on a different CPU
477 * force a reschedule on the other CPU to make
478 * it notice the new signal quickly.
480 * The code below is a tad loose and might occasionally
481 * kick the wrong CPU if we catch the process in the
482 * process of changing - but no harm is done by that
483 * other than doing an extra (lightweight) IPI interrupt.
485 spin_lock(&runqueue_lock);
486 if (t->has_cpu && t->processor != smp_processor_id())
487 smp_send_reschedule(t->processor);
488 spin_unlock(&runqueue_lock);
489 #endif /* CONFIG_SMP */
492 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
494 int retval = send_signal(sig, info, &t->pending);
496 if (!retval && !sigismember(&t->blocked, sig))
497 signal_wake_up(t);
499 return retval;
503 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
505 unsigned long flags;
506 int ret;
509 #if DEBUG_SIG
510 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
511 #endif
513 ret = -EINVAL;
514 if (sig < 0 || sig > _NSIG)
515 goto out_nolock;
516 /* The somewhat baroque permissions check... */
517 ret = -EPERM;
518 if (bad_signal(sig, info, t))
519 goto out_nolock;
521 /* The null signal is a permissions and process existance probe.
522 No signal is actually delivered. Same goes for zombies. */
523 ret = 0;
524 if (!sig || !t->sig)
525 goto out_nolock;
527 spin_lock_irqsave(&t->sigmask_lock, flags);
528 handle_stop_signal(sig, t);
530 /* Optimize away the signal, if it's a signal that can be
531 handled immediately (ie non-blocked and untraced) and
532 that is ignored (either explicitly or by default). */
534 if (ignored_signal(sig, t))
535 goto out;
537 /* Support queueing exactly one non-rt signal, so that we
538 can get more detailed information about the cause of
539 the signal. */
540 if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
541 goto out;
543 ret = deliver_signal(sig, info, t);
544 out:
545 spin_unlock_irqrestore(&t->sigmask_lock, flags);
546 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
547 wake_up_process(t);
548 out_nolock:
549 #if DEBUG_SIG
550 printk(" %d -> %d\n", signal_pending(t), ret);
551 #endif
553 return ret;
557 * Force a signal that the process can't ignore: if necessary
558 * we unblock the signal and change any SIG_IGN to SIG_DFL.
562 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
564 unsigned long int flags;
566 spin_lock_irqsave(&t->sigmask_lock, flags);
567 if (t->sig == NULL) {
568 spin_unlock_irqrestore(&t->sigmask_lock, flags);
569 return -ESRCH;
572 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
573 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
574 sigdelset(&t->blocked, sig);
575 recalc_sigpending(t);
576 spin_unlock_irqrestore(&t->sigmask_lock, flags);
578 return send_sig_info(sig, info, t);
582 * kill_pg_info() sends a signal to a process group: this is what the tty
583 * control characters do (^C, ^Z etc)
587 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
589 int retval = -EINVAL;
590 if (pgrp > 0) {
591 struct task_struct *p;
593 retval = -ESRCH;
594 read_lock(&tasklist_lock);
595 for_each_task(p) {
596 if (p->pgrp == pgrp) {
597 int err = send_sig_info(sig, info, p);
598 if (retval)
599 retval = err;
602 read_unlock(&tasklist_lock);
604 return retval;
608 * kill_sl_info() sends a signal to the session leader: this is used
609 * to send SIGHUP to the controlling process of a terminal when
610 * the connection is lost.
614 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
616 int retval = -EINVAL;
617 if (sess > 0) {
618 struct task_struct *p;
620 retval = -ESRCH;
621 read_lock(&tasklist_lock);
622 for_each_task(p) {
623 if (p->leader && p->session == sess) {
624 int err = send_sig_info(sig, info, p);
625 if (retval)
626 retval = err;
629 read_unlock(&tasklist_lock);
631 return retval;
634 inline int
635 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
637 int error;
638 struct task_struct *p;
640 read_lock(&tasklist_lock);
641 p = find_task_by_pid(pid);
642 error = -ESRCH;
643 if (p)
644 error = send_sig_info(sig, info, p);
645 read_unlock(&tasklist_lock);
646 return error;
651 * kill_something_info() interprets pid in interesting ways just like kill(2).
653 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
654 * is probably wrong. Should make it like BSD or SYSV.
657 static int kill_something_info(int sig, struct siginfo *info, int pid)
659 if (!pid) {
660 return kill_pg_info(sig, info, current->pgrp);
661 } else if (pid == -1) {
662 int retval = 0, count = 0;
663 struct task_struct * p;
665 read_lock(&tasklist_lock);
666 for_each_task(p) {
667 if (p->pid > 1 && p != current) {
668 int err = send_sig_info(sig, info, p);
669 ++count;
670 if (err != -EPERM)
671 retval = err;
674 read_unlock(&tasklist_lock);
675 return count ? retval : -ESRCH;
676 } else if (pid < 0) {
677 return kill_pg_info(sig, info, -pid);
678 } else {
679 return kill_proc_info(sig, info, pid);
684 * These are for backward compatibility with the rest of the kernel source.
688 send_sig(int sig, struct task_struct *p, int priv)
690 return send_sig_info(sig, (void*)(long)(priv != 0), p);
693 void
694 force_sig(int sig, struct task_struct *p)
696 force_sig_info(sig, (void*)1L, p);
700 kill_pg(pid_t pgrp, int sig, int priv)
702 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
706 kill_sl(pid_t sess, int sig, int priv)
708 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
712 kill_proc(pid_t pid, int sig, int priv)
714 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
718 * Joy. Or not. Pthread wants us to wake up every thread
719 * in our parent group.
721 static void wake_up_parent(struct task_struct *parent)
723 struct task_struct *tsk = parent;
725 do {
726 wake_up_interruptible(&tsk->wait_chldexit);
727 tsk = next_thread(tsk);
728 } while (tsk != parent);
732 * Let a parent know about a status change of a child.
735 void do_notify_parent(struct task_struct *tsk, int sig)
737 struct siginfo info;
738 int why, status;
740 info.si_signo = sig;
741 info.si_errno = 0;
742 info.si_pid = tsk->pid;
743 info.si_uid = tsk->uid;
745 /* FIXME: find out whether or not this is supposed to be c*time. */
746 info.si_utime = tsk->times.tms_utime;
747 info.si_stime = tsk->times.tms_stime;
749 status = tsk->exit_code & 0x7f;
750 why = SI_KERNEL; /* shouldn't happen */
751 switch (tsk->state) {
752 case TASK_ZOMBIE:
753 if (tsk->exit_code & 0x80)
754 why = CLD_DUMPED;
755 else if (tsk->exit_code & 0x7f)
756 why = CLD_KILLED;
757 else {
758 why = CLD_EXITED;
759 status = tsk->exit_code >> 8;
761 break;
762 case TASK_STOPPED:
763 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
764 if (tsk->ptrace & PT_PTRACED)
765 why = CLD_TRAPPED;
766 else
767 why = CLD_STOPPED;
768 break;
770 default:
771 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
772 tsk->state);
773 break;
775 info.si_code = why;
776 info.si_status = status;
778 send_sig_info(sig, &info, tsk->p_pptr);
779 wake_up_parent(tsk->p_pptr);
784 * We need the tasklist lock because it's the only
785 * thing that protects out "parent" pointer.
787 * exit.c calls "do_notify_parent()" directly, because
788 * it already has the tasklist lock.
790 void
791 notify_parent(struct task_struct *tsk, int sig)
793 read_lock(&tasklist_lock);
794 do_notify_parent(tsk, sig);
795 read_unlock(&tasklist_lock);
798 EXPORT_SYMBOL(dequeue_signal);
799 EXPORT_SYMBOL(flush_signals);
800 EXPORT_SYMBOL(force_sig);
801 EXPORT_SYMBOL(force_sig_info);
802 EXPORT_SYMBOL(kill_pg);
803 EXPORT_SYMBOL(kill_pg_info);
804 EXPORT_SYMBOL(kill_proc);
805 EXPORT_SYMBOL(kill_proc_info);
806 EXPORT_SYMBOL(kill_sl);
807 EXPORT_SYMBOL(kill_sl_info);
808 EXPORT_SYMBOL(notify_parent);
809 EXPORT_SYMBOL(recalc_sigpending);
810 EXPORT_SYMBOL(send_sig);
811 EXPORT_SYMBOL(send_sig_info);
812 EXPORT_SYMBOL(block_all_signals);
813 EXPORT_SYMBOL(unblock_all_signals);
817 * System call entry points.
821 * We don't need to get the kernel lock - this is all local to this
822 * particular thread.. (and that's good, because this is _heavily_
823 * used by various programs)
826 asmlinkage long
827 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
829 int error = -EINVAL;
830 sigset_t old_set, new_set;
832 /* XXX: Don't preclude handling different sized sigset_t's. */
833 if (sigsetsize != sizeof(sigset_t))
834 goto out;
836 if (set) {
837 error = -EFAULT;
838 if (copy_from_user(&new_set, set, sizeof(*set)))
839 goto out;
840 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
842 spin_lock_irq(&current->sigmask_lock);
843 old_set = current->blocked;
845 error = 0;
846 switch (how) {
847 default:
848 error = -EINVAL;
849 break;
850 case SIG_BLOCK:
851 sigorsets(&new_set, &old_set, &new_set);
852 break;
853 case SIG_UNBLOCK:
854 signandsets(&new_set, &old_set, &new_set);
855 break;
856 case SIG_SETMASK:
857 break;
860 current->blocked = new_set;
861 recalc_sigpending(current);
862 spin_unlock_irq(&current->sigmask_lock);
863 if (error)
864 goto out;
865 if (oset)
866 goto set_old;
867 } else if (oset) {
868 spin_lock_irq(&current->sigmask_lock);
869 old_set = current->blocked;
870 spin_unlock_irq(&current->sigmask_lock);
872 set_old:
873 error = -EFAULT;
874 if (copy_to_user(oset, &old_set, sizeof(*oset)))
875 goto out;
877 error = 0;
878 out:
879 return error;
882 long do_sigpending(void *set, unsigned long sigsetsize)
884 long error = -EINVAL;
885 sigset_t pending;
887 if (sigsetsize > sizeof(sigset_t))
888 goto out;
890 spin_lock_irq(&current->sigmask_lock);
891 sigandsets(&pending, &current->blocked, &current->pending.signal);
892 spin_unlock_irq(&current->sigmask_lock);
894 error = -EFAULT;
895 if (!copy_to_user(set, &pending, sigsetsize))
896 error = 0;
897 out:
898 return error;
901 asmlinkage long
902 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
904 return do_sigpending(set, sigsetsize);
907 asmlinkage long
908 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
909 const struct timespec *uts, size_t sigsetsize)
911 int ret, sig;
912 sigset_t these;
913 struct timespec ts;
914 siginfo_t info;
915 long timeout = 0;
917 /* XXX: Don't preclude handling different sized sigset_t's. */
918 if (sigsetsize != sizeof(sigset_t))
919 return -EINVAL;
921 if (copy_from_user(&these, uthese, sizeof(these)))
922 return -EFAULT;
925 * Invert the set of allowed signals to get those we
926 * want to block.
928 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
929 signotset(&these);
931 if (uts) {
932 if (copy_from_user(&ts, uts, sizeof(ts)))
933 return -EFAULT;
934 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
935 || ts.tv_sec < 0)
936 return -EINVAL;
939 spin_lock_irq(&current->sigmask_lock);
940 sig = dequeue_signal(&these, &info);
941 if (!sig) {
942 /* None ready -- temporarily unblock those we're interested
943 in so that we'll be awakened when they arrive. */
944 sigset_t oldblocked = current->blocked;
945 sigandsets(&current->blocked, &current->blocked, &these);
946 recalc_sigpending(current);
947 spin_unlock_irq(&current->sigmask_lock);
949 timeout = MAX_SCHEDULE_TIMEOUT;
950 if (uts)
951 timeout = (timespec_to_jiffies(&ts)
952 + (ts.tv_sec || ts.tv_nsec));
954 current->state = TASK_INTERRUPTIBLE;
955 timeout = schedule_timeout(timeout);
957 spin_lock_irq(&current->sigmask_lock);
958 sig = dequeue_signal(&these, &info);
959 current->blocked = oldblocked;
960 recalc_sigpending(current);
962 spin_unlock_irq(&current->sigmask_lock);
964 if (sig) {
965 ret = sig;
966 if (uinfo) {
967 if (copy_siginfo_to_user(uinfo, &info))
968 ret = -EFAULT;
970 } else {
971 ret = -EAGAIN;
972 if (timeout)
973 ret = -EINTR;
976 return ret;
979 asmlinkage long
980 sys_kill(int pid, int sig)
982 struct siginfo info;
984 info.si_signo = sig;
985 info.si_errno = 0;
986 info.si_code = SI_USER;
987 info.si_pid = current->pid;
988 info.si_uid = current->uid;
990 return kill_something_info(sig, &info, pid);
993 asmlinkage long
994 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
996 siginfo_t info;
998 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
999 return -EFAULT;
1001 /* Not even root can pretend to send signals from the kernel.
1002 Nor can they impersonate a kill(), which adds source info. */
1003 if (info.si_code >= 0)
1004 return -EPERM;
1005 info.si_signo = sig;
1007 /* POSIX.1b doesn't mention process groups. */
1008 return kill_proc_info(sig, &info, pid);
1012 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1014 struct k_sigaction *k;
1016 if (sig < 1 || sig > _NSIG ||
1017 (act && (sig == SIGKILL || sig == SIGSTOP)))
1018 return -EINVAL;
1020 k = &current->sig->action[sig-1];
1022 spin_lock(&current->sig->siglock);
1024 if (oact)
1025 *oact = *k;
1027 if (act) {
1028 *k = *act;
1029 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1032 * POSIX 3.3.1.3:
1033 * "Setting a signal action to SIG_IGN for a signal that is
1034 * pending shall cause the pending signal to be discarded,
1035 * whether or not it is blocked."
1037 * "Setting a signal action to SIG_DFL for a signal that is
1038 * pending and whose default action is to ignore the signal
1039 * (for example, SIGCHLD), shall cause the pending signal to
1040 * be discarded, whether or not it is blocked"
1042 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1043 * signal isn't actually ignored, but does automatic child
1044 * reaping, while SIG_DFL is explicitly said by POSIX to force
1045 * the signal to be ignored.
1048 if (k->sa.sa_handler == SIG_IGN
1049 || (k->sa.sa_handler == SIG_DFL
1050 && (sig == SIGCONT ||
1051 sig == SIGCHLD ||
1052 sig == SIGWINCH))) {
1053 spin_lock_irq(&current->sigmask_lock);
1054 if (rm_sig_from_queue(sig, current))
1055 recalc_sigpending(current);
1056 spin_unlock_irq(&current->sigmask_lock);
1060 spin_unlock(&current->sig->siglock);
1061 return 0;
1064 int
1065 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1067 stack_t oss;
1068 int error;
1070 if (uoss) {
1071 oss.ss_sp = (void *) current->sas_ss_sp;
1072 oss.ss_size = current->sas_ss_size;
1073 oss.ss_flags = sas_ss_flags(sp);
1076 if (uss) {
1077 void *ss_sp;
1078 size_t ss_size;
1079 int ss_flags;
1081 error = -EFAULT;
1082 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1083 || __get_user(ss_sp, &uss->ss_sp)
1084 || __get_user(ss_flags, &uss->ss_flags)
1085 || __get_user(ss_size, &uss->ss_size))
1086 goto out;
1088 error = -EPERM;
1089 if (on_sig_stack (sp))
1090 goto out;
1092 error = -EINVAL;
1095 * Note - this code used to test ss_flags incorrectly
1096 * old code may have been written using ss_flags==0
1097 * to mean ss_flags==SS_ONSTACK (as this was the only
1098 * way that worked) - this fix preserves that older
1099 * mechanism
1101 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1102 goto out;
1104 if (ss_flags == SS_DISABLE) {
1105 ss_size = 0;
1106 ss_sp = NULL;
1107 } else {
1108 error = -ENOMEM;
1109 if (ss_size < MINSIGSTKSZ)
1110 goto out;
1113 current->sas_ss_sp = (unsigned long) ss_sp;
1114 current->sas_ss_size = ss_size;
1117 if (uoss) {
1118 error = -EFAULT;
1119 if (copy_to_user(uoss, &oss, sizeof(oss)))
1120 goto out;
1123 error = 0;
1124 out:
1125 return error;
1128 asmlinkage long
1129 sys_sigpending(old_sigset_t *set)
1131 return do_sigpending(set, sizeof(*set));
1134 #if !defined(__alpha__)
1135 /* Alpha has its own versions with special arguments. */
1137 asmlinkage long
1138 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1140 int error;
1141 old_sigset_t old_set, new_set;
1143 if (set) {
1144 error = -EFAULT;
1145 if (copy_from_user(&new_set, set, sizeof(*set)))
1146 goto out;
1147 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1149 spin_lock_irq(&current->sigmask_lock);
1150 old_set = current->blocked.sig[0];
1152 error = 0;
1153 switch (how) {
1154 default:
1155 error = -EINVAL;
1156 break;
1157 case SIG_BLOCK:
1158 sigaddsetmask(&current->blocked, new_set);
1159 break;
1160 case SIG_UNBLOCK:
1161 sigdelsetmask(&current->blocked, new_set);
1162 break;
1163 case SIG_SETMASK:
1164 current->blocked.sig[0] = new_set;
1165 break;
1168 recalc_sigpending(current);
1169 spin_unlock_irq(&current->sigmask_lock);
1170 if (error)
1171 goto out;
1172 if (oset)
1173 goto set_old;
1174 } else if (oset) {
1175 old_set = current->blocked.sig[0];
1176 set_old:
1177 error = -EFAULT;
1178 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1179 goto out;
1181 error = 0;
1182 out:
1183 return error;
1186 #ifndef __sparc__
1187 asmlinkage long
1188 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1189 size_t sigsetsize)
1191 struct k_sigaction new_sa, old_sa;
1192 int ret = -EINVAL;
1194 /* XXX: Don't preclude handling different sized sigset_t's. */
1195 if (sigsetsize != sizeof(sigset_t))
1196 goto out;
1198 if (act) {
1199 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1200 return -EFAULT;
1203 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1205 if (!ret && oact) {
1206 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1207 return -EFAULT;
1209 out:
1210 return ret;
1212 #endif /* __sparc__ */
1213 #endif
1215 #if !defined(__alpha__) && !defined(__ia64__)
1217 * For backwards compatibility. Functionality superseded by sigprocmask.
1219 asmlinkage long
1220 sys_sgetmask(void)
1222 /* SMP safe */
1223 return current->blocked.sig[0];
1226 asmlinkage long
1227 sys_ssetmask(int newmask)
1229 int old;
1231 spin_lock_irq(&current->sigmask_lock);
1232 old = current->blocked.sig[0];
1234 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1235 sigmask(SIGSTOP)));
1236 recalc_sigpending(current);
1237 spin_unlock_irq(&current->sigmask_lock);
1239 return old;
1241 #endif /* !defined(__alpha__) */
1243 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1245 * For backwards compatibility. Functionality superseded by sigaction.
1247 asmlinkage unsigned long
1248 sys_signal(int sig, __sighandler_t handler)
1250 struct k_sigaction new_sa, old_sa;
1251 int ret;
1253 new_sa.sa.sa_handler = handler;
1254 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1256 ret = do_sigaction(sig, &new_sa, &old_sa);
1258 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1260 #endif /* !alpha && !__ia64__ && !defined(__mips__) */