Linux 2.4.0-test10pre3
[davej-history.git] / kernel / signal.c
blob4f56f54439e8d37a4079ab9a0ef59d0ae8dd0c40
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 */
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/uaccess.h>
20 * SLAB caches for signal bits.
23 #define DEBUG_SIG 0
25 #if DEBUG_SIG
26 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
27 #else
28 #define SIG_SLAB_DEBUG 0
29 #endif
31 static kmem_cache_t *sigqueue_cachep;
33 atomic_t nr_queued_signals;
34 int max_queued_signals = 1024;
36 void __init signals_init(void)
38 sigqueue_cachep =
39 kmem_cache_create("sigqueue",
40 sizeof(struct sigqueue),
41 __alignof__(struct sigqueue),
42 SIG_SLAB_DEBUG, NULL, NULL);
43 if (!sigqueue_cachep)
44 panic("signals_init(): cannot create sigqueue SLAB cache");
48 /* Given the mask, find the first available signal that should be serviced. */
50 static int
51 next_signal(struct task_struct *tsk, sigset_t *mask)
53 unsigned long i, *s, *m, x;
54 int sig = 0;
56 s = tsk->pending.signal.sig;
57 m = mask->sig;
58 switch (_NSIG_WORDS) {
59 default:
60 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
61 if ((x = *s &~ *m) != 0) {
62 sig = ffz(~x) + i*_NSIG_BPW + 1;
63 break;
65 break;
67 case 2: if ((x = s[0] &~ m[0]) != 0)
68 sig = 1;
69 else if ((x = s[1] &~ m[1]) != 0)
70 sig = _NSIG_BPW + 1;
71 else
72 break;
73 sig += ffz(~x);
74 break;
76 case 1: if ((x = *s &~ *m) != 0)
77 sig = ffz(~x) + 1;
78 break;
81 return sig;
84 static void flush_sigqueue(struct sigpending *queue)
86 struct sigqueue *q, *n;
88 sigemptyset(&queue->signal);
89 q = queue->head;
90 queue->head = NULL;
91 queue->tail = &queue->head;
93 while (q) {
94 n = q->next;
95 kmem_cache_free(sigqueue_cachep, q);
96 atomic_dec(&nr_queued_signals);
97 q = n;
102 * Flush all pending signals for a task.
105 void
106 flush_signals(struct task_struct *t)
108 t->sigpending = 0;
109 flush_sigqueue(&t->pending);
112 void exit_sighand(struct task_struct *tsk)
114 struct signal_struct * sig = tsk->sig;
116 spin_lock_irq(&tsk->sigmask_lock);
117 if (sig) {
118 tsk->sig = NULL;
119 if (atomic_dec_and_test(&sig->count))
120 kmem_cache_free(sigact_cachep, sig);
122 tsk->sigpending = 0;
123 flush_sigqueue(&tsk->pending);
124 spin_unlock_irq(&tsk->sigmask_lock);
128 * Flush all handlers for a task.
131 void
132 flush_signal_handlers(struct task_struct *t)
134 int i;
135 struct k_sigaction *ka = &t->sig->action[0];
136 for (i = _NSIG ; i != 0 ; i--) {
137 if (ka->sa.sa_handler != SIG_IGN)
138 ka->sa.sa_handler = SIG_DFL;
139 ka->sa.sa_flags = 0;
140 sigemptyset(&ka->sa.sa_mask);
141 ka++;
145 /* Notify the system that a driver wants to block all signals for this
146 * process, and wants to be notified if any signals at all were to be
147 * sent/acted upon. If the notifier routine returns non-zero, then the
148 * signal will be acted upon after all. If the notifier routine returns 0,
149 * then then signal will be blocked. Only one block per process is
150 * allowed. priv is a pointer to private data that the notifier routine
151 * can use to determine if the signal should be blocked or not. */
153 void
154 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
156 unsigned long flags;
158 spin_lock_irqsave(&current->sigmask_lock, flags);
159 current->notifier_mask = mask;
160 current->notifier_data = priv;
161 current->notifier = notifier;
162 spin_unlock_irqrestore(&current->sigmask_lock, flags);
165 /* Notify the system that blocking has ended. */
167 void
168 unblock_all_signals(void)
170 unsigned long flags;
172 spin_lock_irqsave(&current->sigmask_lock, flags);
173 current->notifier = NULL;
174 current->notifier_data = NULL;
175 recalc_sigpending(current);
176 spin_unlock_irqrestore(&current->sigmask_lock, flags);
179 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
181 if (sigismember(&list->signal, sig)) {
182 /* Collect the siginfo appropriate to this signal. */
183 struct sigqueue *q, **pp;
184 pp = &list->head;
185 while ((q = *pp) != NULL) {
186 if (q->info.si_signo == sig)
187 goto found_it;
188 pp = &q->next;
191 /* Ok, it wasn't in the queue. We must have
192 been out of queue space. So zero out the
193 info. */
194 sigdelset(&list->signal, sig);
195 info->si_signo = sig;
196 info->si_errno = 0;
197 info->si_code = 0;
198 info->si_pid = 0;
199 info->si_uid = 0;
200 return 1;
202 found_it:
203 if ((*pp = q->next) == NULL)
204 list->tail = pp;
206 /* Copy the sigqueue information and free the queue entry */
207 copy_siginfo(info, &q->info);
208 kmem_cache_free(sigqueue_cachep,q);
209 atomic_dec(&nr_queued_signals);
211 /* Non-RT signals can exist multiple times.. */
212 if (sig >= SIGRTMIN) {
213 while ((q = *pp) != NULL) {
214 if (q->info.si_signo == sig)
215 goto found_another;
216 pp = &q->next;
220 sigdelset(&list->signal, sig);
221 found_another:
222 return 1;
224 return 0;
228 * Dequeue a signal and return the element to the caller, which is
229 * expected to free it.
231 * All callers must be holding current->sigmask_lock.
235 dequeue_signal(sigset_t *mask, siginfo_t *info)
237 int sig = 0;
239 #if DEBUG_SIG
240 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
241 signal_pending(current));
242 #endif
244 sig = next_signal(current, mask);
245 if (current->notifier) {
246 if (sigismember(current->notifier_mask, sig)) {
247 if (!(current->notifier)(current->notifier_data)) {
248 current->sigpending = 0;
249 return 0;
254 if (sig) {
255 if (!collect_signal(sig, &current->pending, info))
256 sig = 0;
258 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
259 we need to xchg out the timer overrun values. */
261 recalc_sigpending(current);
263 #if DEBUG_SIG
264 printk(" %d -> %d\n", signal_pending(current), sig);
265 #endif
267 return sig;
270 static int rm_from_queue(int sig, struct sigpending *s)
272 struct sigqueue *q, **pp;
274 if (!sigismember(&s->signal, sig))
275 return 0;
277 sigdelset(&s->signal, sig);
279 pp = &s->head;
281 while ((q = *pp) != NULL) {
282 if (q->info.si_signo == sig) {
283 if ((*pp = q->next) == NULL)
284 s->tail = pp;
285 kmem_cache_free(sigqueue_cachep,q);
286 atomic_dec(&nr_queued_signals);
287 continue;
289 pp = &q->next;
291 return 1;
295 * Remove signal sig from t->pending.
296 * Returns 1 if sig was found.
298 * All callers must be holding t->sigmask_lock.
300 static int rm_sig_from_queue(int sig, struct task_struct *t)
302 return rm_from_queue(sig, &t->pending);
306 * Bad permissions for sending the signal
308 int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
310 return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
311 && ((sig != SIGCONT) || (current->session != t->session))
312 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
313 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
314 && !capable(CAP_KILL);
318 * Signal type:
319 * < 0 : global action (kill - spread to all non-blocked threads)
320 * = 0 : ignored
321 * > 0 : wake up.
323 static int signal_type(int sig, struct signal_struct *signals)
325 unsigned long handler;
327 if (!signals)
328 return 0;
330 handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
331 if (handler > 1)
332 return 1;
334 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
335 if (handler == 1)
336 return sig == SIGCHLD;
338 /* Default handler. Normally lethal, but.. */
339 switch (sig) {
341 /* Ignored */
342 case SIGCONT: case SIGWINCH:
343 case SIGCHLD: case SIGURG:
344 return 0;
346 /* Implicit behaviour */
347 case SIGTSTP: case SIGTTIN: case SIGTTOU:
348 return 1;
350 /* Implicit actions (kill or do special stuff) */
351 default:
352 return -1;
358 * Determine whether a signal should be posted or not.
360 * Signals with SIG_IGN can be ignored, except for the
361 * special case of a SIGCHLD.
363 * Some signals with SIG_DFL default to a non-action.
365 static int ignored_signal(int sig, struct task_struct *t)
367 /* Don't ignore traced or blocked signals */
368 if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
369 return 0;
371 return signal_type(sig, t->sig) == 0;
375 * Handle TASK_STOPPED cases etc implicit behaviour
376 * of certain magical signals.
378 * SIGKILL gets spread out to every thread.
380 static void handle_stop_signal(int sig, struct task_struct *t)
382 switch (sig) {
383 case SIGKILL: case SIGCONT:
384 /* Wake up the process if stopped. */
385 if (t->state == TASK_STOPPED)
386 wake_up_process(t);
387 t->exit_code = 0;
388 rm_sig_from_queue(SIGSTOP, t);
389 rm_sig_from_queue(SIGTSTP, t);
390 rm_sig_from_queue(SIGTTOU, t);
391 rm_sig_from_queue(SIGTTIN, t);
392 break;
394 case SIGSTOP: case SIGTSTP:
395 case SIGTTIN: case SIGTTOU:
396 /* If we're stopping again, cancel SIGCONT */
397 rm_sig_from_queue(SIGCONT, t);
398 break;
402 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
404 struct sigqueue * q = NULL;
406 /* Real-time signals must be queued if sent by sigqueue, or
407 some other real-time mechanism. It is implementation
408 defined whether kill() does so. We attempt to do so, on
409 the principle of least surprise, but since kill is not
410 allowed to fail with EAGAIN when low on memory we just
411 make sure at least one signal gets delivered and don't
412 pass on the info struct. */
414 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
415 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
418 if (q) {
419 atomic_inc(&nr_queued_signals);
420 q->next = NULL;
421 *signals->tail = q;
422 signals->tail = &q->next;
423 switch ((unsigned long) info) {
424 case 0:
425 q->info.si_signo = sig;
426 q->info.si_errno = 0;
427 q->info.si_code = SI_USER;
428 q->info.si_pid = current->pid;
429 q->info.si_uid = current->uid;
430 break;
431 case 1:
432 q->info.si_signo = sig;
433 q->info.si_errno = 0;
434 q->info.si_code = SI_KERNEL;
435 q->info.si_pid = 0;
436 q->info.si_uid = 0;
437 break;
438 default:
439 copy_siginfo(&q->info, info);
440 break;
442 } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
443 && info->si_code != SI_USER) {
445 * Queue overflow, abort. We may abort if the signal was rt
446 * and sent by user using something other than kill().
448 return -EAGAIN;
451 sigaddset(&signals->signal, sig);
452 return 0;
456 * Tell a process that it has a new active signal..
458 * NOTE! we rely on the previous spin_lock to
459 * lock interrupts for us! We can only be called with
460 * "sigmask_lock" held, and the local interrupt must
461 * have been disabled when that got acquired!
463 * No need to set need_resched since signal event passing
464 * goes through ->blocked
466 static inline void signal_wake_up(struct task_struct *t)
468 t->sigpending = 1;
470 if (t->state & TASK_INTERRUPTIBLE) {
471 wake_up_process(t);
472 return;
475 #ifdef CONFIG_SMP
477 * If the task is running on a different CPU
478 * force a reschedule on the other CPU to make
479 * it notice the new signal quickly.
481 * The code below is a tad loose and might occasionally
482 * kick the wrong CPU if we catch the process in the
483 * process of changing - but no harm is done by that
484 * other than doing an extra (lightweight) IPI interrupt.
486 spin_lock(&runqueue_lock);
487 if (t->has_cpu && t->processor != smp_processor_id())
488 smp_send_reschedule(t->processor);
489 spin_unlock(&runqueue_lock);
490 #endif /* CONFIG_SMP */
493 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
495 int retval = send_signal(sig, info, &t->pending);
497 if (!retval && !sigismember(&t->blocked, sig))
498 signal_wake_up(t);
500 return retval;
504 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
506 unsigned long flags;
507 int ret;
510 #if DEBUG_SIG
511 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
512 #endif
514 ret = -EINVAL;
515 if (sig < 0 || sig > _NSIG)
516 goto out_nolock;
517 /* The somewhat baroque permissions check... */
518 ret = -EPERM;
519 if (bad_signal(sig, info, t))
520 goto out_nolock;
522 /* The null signal is a permissions and process existance probe.
523 No signal is actually delivered. Same goes for zombies. */
524 ret = 0;
525 if (!sig || !t->sig)
526 goto out_nolock;
528 spin_lock_irqsave(&t->sigmask_lock, flags);
529 handle_stop_signal(sig, t);
531 /* Optimize away the signal, if it's a signal that can be
532 handled immediately (ie non-blocked and untraced) and
533 that is ignored (either explicitly or by default). */
535 if (ignored_signal(sig, t))
536 goto out;
538 /* Support queueing exactly one non-rt signal, so that we
539 can get more detailed information about the cause of
540 the signal. */
541 if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
542 goto out;
544 ret = deliver_signal(sig, info, t);
545 out:
546 spin_unlock_irqrestore(&t->sigmask_lock, flags);
547 if ((t->state & TASK_INTERRUPTIBLE) && signal_pending(t))
548 wake_up_process(t);
549 out_nolock:
550 #if DEBUG_SIG
551 printk(" %d -> %d\n", signal_pending(t), ret);
552 #endif
554 return ret;
558 * Force a signal that the process can't ignore: if necessary
559 * we unblock the signal and change any SIG_IGN to SIG_DFL.
563 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
565 unsigned long int flags;
567 spin_lock_irqsave(&t->sigmask_lock, flags);
568 if (t->sig == NULL) {
569 spin_unlock_irqrestore(&t->sigmask_lock, flags);
570 return -ESRCH;
573 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
574 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
575 sigdelset(&t->blocked, sig);
576 recalc_sigpending(t);
577 spin_unlock_irqrestore(&t->sigmask_lock, flags);
579 return send_sig_info(sig, info, t);
583 * kill_pg_info() sends a signal to a process group: this is what the tty
584 * control characters do (^C, ^Z etc)
588 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
590 int retval = -EINVAL;
591 if (pgrp > 0) {
592 struct task_struct *p;
594 retval = -ESRCH;
595 read_lock(&tasklist_lock);
596 for_each_task(p) {
597 if (p->pgrp == pgrp) {
598 int err = send_sig_info(sig, info, p);
599 if (retval)
600 retval = err;
603 read_unlock(&tasklist_lock);
605 return retval;
609 * kill_sl_info() sends a signal to the session leader: this is used
610 * to send SIGHUP to the controlling process of a terminal when
611 * the connection is lost.
615 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
617 int retval = -EINVAL;
618 if (sess > 0) {
619 struct task_struct *p;
621 retval = -ESRCH;
622 read_lock(&tasklist_lock);
623 for_each_task(p) {
624 if (p->leader && p->session == sess) {
625 int err = send_sig_info(sig, info, p);
626 if (retval)
627 retval = err;
630 read_unlock(&tasklist_lock);
632 return retval;
635 inline int
636 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
638 int error;
639 struct task_struct *p;
641 read_lock(&tasklist_lock);
642 p = find_task_by_pid(pid);
643 error = -ESRCH;
644 if (p)
645 error = send_sig_info(sig, info, p);
646 read_unlock(&tasklist_lock);
647 return error;
652 * kill_something_info() interprets pid in interesting ways just like kill(2).
654 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
655 * is probably wrong. Should make it like BSD or SYSV.
658 static int kill_something_info(int sig, struct siginfo *info, int pid)
660 if (!pid) {
661 return kill_pg_info(sig, info, current->pgrp);
662 } else if (pid == -1) {
663 int retval = 0, count = 0;
664 struct task_struct * p;
666 read_lock(&tasklist_lock);
667 for_each_task(p) {
668 if (p->pid > 1 && p != current) {
669 int err = send_sig_info(sig, info, p);
670 ++count;
671 if (err != -EPERM)
672 retval = err;
675 read_unlock(&tasklist_lock);
676 return count ? retval : -ESRCH;
677 } else if (pid < 0) {
678 return kill_pg_info(sig, info, -pid);
679 } else {
680 return kill_proc_info(sig, info, pid);
685 * These are for backward compatibility with the rest of the kernel source.
689 send_sig(int sig, struct task_struct *p, int priv)
691 return send_sig_info(sig, (void*)(long)(priv != 0), p);
694 void
695 force_sig(int sig, struct task_struct *p)
697 force_sig_info(sig, (void*)1L, p);
701 kill_pg(pid_t pgrp, int sig, int priv)
703 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
707 kill_sl(pid_t sess, int sig, int priv)
709 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
713 kill_proc(pid_t pid, int sig, int priv)
715 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
719 * Joy. Or not. Pthread wants us to wake up every thread
720 * in our parent group.
722 static void wake_up_parent(struct task_struct *parent)
724 struct task_struct *tsk = parent;
726 do {
727 wake_up_interruptible(&tsk->wait_chldexit);
728 tsk = next_thread(tsk);
729 } while (tsk != parent);
733 * Let a parent know about a status change of a child.
736 void do_notify_parent(struct task_struct *tsk, int sig)
738 struct siginfo info;
739 int why, status;
741 info.si_signo = sig;
742 info.si_errno = 0;
743 info.si_pid = tsk->pid;
744 info.si_uid = tsk->uid;
746 /* FIXME: find out whether or not this is supposed to be c*time. */
747 info.si_utime = tsk->times.tms_utime;
748 info.si_stime = tsk->times.tms_stime;
750 status = tsk->exit_code & 0x7f;
751 why = SI_KERNEL; /* shouldn't happen */
752 switch (tsk->state) {
753 case TASK_ZOMBIE:
754 if (tsk->exit_code & 0x80)
755 why = CLD_DUMPED;
756 else if (tsk->exit_code & 0x7f)
757 why = CLD_KILLED;
758 else {
759 why = CLD_EXITED;
760 status = tsk->exit_code >> 8;
762 break;
763 case TASK_STOPPED:
764 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
765 if (tsk->ptrace & PT_PTRACED)
766 why = CLD_TRAPPED;
767 else
768 why = CLD_STOPPED;
769 break;
771 default:
772 printk(KERN_DEBUG "eh? notify_parent with state %ld?\n",
773 tsk->state);
774 break;
776 info.si_code = why;
777 info.si_status = status;
779 send_sig_info(sig, &info, tsk->p_pptr);
780 wake_up_parent(tsk->p_pptr);
785 * We need the tasklist lock because it's the only
786 * thing that protects out "parent" pointer.
788 * exit.c calls "do_notify_parent()" directly, because
789 * it already has the tasklist lock.
791 void
792 notify_parent(struct task_struct *tsk, int sig)
794 read_lock(&tasklist_lock);
795 do_notify_parent(tsk, sig);
796 read_unlock(&tasklist_lock);
799 EXPORT_SYMBOL(dequeue_signal);
800 EXPORT_SYMBOL(flush_signals);
801 EXPORT_SYMBOL(force_sig);
802 EXPORT_SYMBOL(force_sig_info);
803 EXPORT_SYMBOL(kill_pg);
804 EXPORT_SYMBOL(kill_pg_info);
805 EXPORT_SYMBOL(kill_proc);
806 EXPORT_SYMBOL(kill_proc_info);
807 EXPORT_SYMBOL(kill_sl);
808 EXPORT_SYMBOL(kill_sl_info);
809 EXPORT_SYMBOL(notify_parent);
810 EXPORT_SYMBOL(recalc_sigpending);
811 EXPORT_SYMBOL(send_sig);
812 EXPORT_SYMBOL(send_sig_info);
813 EXPORT_SYMBOL(block_all_signals);
814 EXPORT_SYMBOL(unblock_all_signals);
818 * System call entry points.
822 * We don't need to get the kernel lock - this is all local to this
823 * particular thread.. (and that's good, because this is _heavily_
824 * used by various programs)
827 asmlinkage long
828 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
830 int error = -EINVAL;
831 sigset_t old_set, new_set;
833 /* XXX: Don't preclude handling different sized sigset_t's. */
834 if (sigsetsize != sizeof(sigset_t))
835 goto out;
837 if (set) {
838 error = -EFAULT;
839 if (copy_from_user(&new_set, set, sizeof(*set)))
840 goto out;
841 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
843 spin_lock_irq(&current->sigmask_lock);
844 old_set = current->blocked;
846 error = 0;
847 switch (how) {
848 default:
849 error = -EINVAL;
850 break;
851 case SIG_BLOCK:
852 sigorsets(&new_set, &old_set, &new_set);
853 break;
854 case SIG_UNBLOCK:
855 signandsets(&new_set, &old_set, &new_set);
856 break;
857 case SIG_SETMASK:
858 break;
861 current->blocked = new_set;
862 recalc_sigpending(current);
863 spin_unlock_irq(&current->sigmask_lock);
864 if (error)
865 goto out;
866 if (oset)
867 goto set_old;
868 } else if (oset) {
869 spin_lock_irq(&current->sigmask_lock);
870 old_set = current->blocked;
871 spin_unlock_irq(&current->sigmask_lock);
873 set_old:
874 error = -EFAULT;
875 if (copy_to_user(oset, &old_set, sizeof(*oset)))
876 goto out;
878 error = 0;
879 out:
880 return error;
883 long do_sigpending(void *set, unsigned long sigsetsize)
885 long error = -EINVAL;
886 sigset_t pending;
888 if (sigsetsize > sizeof(sigset_t))
889 goto out;
891 spin_lock_irq(&current->sigmask_lock);
892 sigandsets(&pending, &current->blocked, &current->pending.signal);
893 spin_unlock_irq(&current->sigmask_lock);
895 error = -EFAULT;
896 if (!copy_to_user(set, &pending, sigsetsize))
897 error = 0;
898 out:
899 return error;
902 asmlinkage long
903 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
905 return do_sigpending(set, sigsetsize);
908 asmlinkage long
909 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
910 const struct timespec *uts, size_t sigsetsize)
912 int ret, sig;
913 sigset_t these;
914 struct timespec ts;
915 siginfo_t info;
916 long timeout = 0;
918 /* XXX: Don't preclude handling different sized sigset_t's. */
919 if (sigsetsize != sizeof(sigset_t))
920 return -EINVAL;
922 if (copy_from_user(&these, uthese, sizeof(these)))
923 return -EFAULT;
926 * Invert the set of allowed signals to get those we
927 * want to block.
929 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
930 signotset(&these);
932 if (uts) {
933 if (copy_from_user(&ts, uts, sizeof(ts)))
934 return -EFAULT;
935 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
936 || ts.tv_sec < 0)
937 return -EINVAL;
940 spin_lock_irq(&current->sigmask_lock);
941 sig = dequeue_signal(&these, &info);
942 if (!sig) {
943 timeout = MAX_SCHEDULE_TIMEOUT;
944 if (uts)
945 timeout = (timespec_to_jiffies(&ts)
946 + (ts.tv_sec || ts.tv_nsec));
948 if (timeout) {
949 /* None ready -- temporarily unblock those we're
950 * interested while we are sleeping in so that we'll
951 * be awakened when they arrive. */
952 sigset_t oldblocked = current->blocked;
953 sigandsets(&current->blocked, &current->blocked, &these);
954 recalc_sigpending(current);
955 spin_unlock_irq(&current->sigmask_lock);
957 current->state = TASK_INTERRUPTIBLE;
958 timeout = schedule_timeout(timeout);
960 spin_lock_irq(&current->sigmask_lock);
961 sig = dequeue_signal(&these, &info);
962 current->blocked = oldblocked;
963 recalc_sigpending(current);
966 spin_unlock_irq(&current->sigmask_lock);
968 if (sig) {
969 ret = sig;
970 if (uinfo) {
971 if (copy_siginfo_to_user(uinfo, &info))
972 ret = -EFAULT;
974 } else {
975 ret = -EAGAIN;
976 if (timeout)
977 ret = -EINTR;
980 return ret;
983 asmlinkage long
984 sys_kill(int pid, int sig)
986 struct siginfo info;
988 info.si_signo = sig;
989 info.si_errno = 0;
990 info.si_code = SI_USER;
991 info.si_pid = current->pid;
992 info.si_uid = current->uid;
994 return kill_something_info(sig, &info, pid);
997 asmlinkage long
998 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1000 siginfo_t info;
1002 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1003 return -EFAULT;
1005 /* Not even root can pretend to send signals from the kernel.
1006 Nor can they impersonate a kill(), which adds source info. */
1007 if (info.si_code >= 0)
1008 return -EPERM;
1009 info.si_signo = sig;
1011 /* POSIX.1b doesn't mention process groups. */
1012 return kill_proc_info(sig, &info, pid);
1016 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1018 struct k_sigaction *k;
1020 if (sig < 1 || sig > _NSIG ||
1021 (act && (sig == SIGKILL || sig == SIGSTOP)))
1022 return -EINVAL;
1024 k = &current->sig->action[sig-1];
1026 spin_lock(&current->sig->siglock);
1028 if (oact)
1029 *oact = *k;
1031 if (act) {
1032 *k = *act;
1033 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1036 * POSIX 3.3.1.3:
1037 * "Setting a signal action to SIG_IGN for a signal that is
1038 * pending shall cause the pending signal to be discarded,
1039 * whether or not it is blocked."
1041 * "Setting a signal action to SIG_DFL for a signal that is
1042 * pending and whose default action is to ignore the signal
1043 * (for example, SIGCHLD), shall cause the pending signal to
1044 * be discarded, whether or not it is blocked"
1046 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1047 * signal isn't actually ignored, but does automatic child
1048 * reaping, while SIG_DFL is explicitly said by POSIX to force
1049 * the signal to be ignored.
1052 if (k->sa.sa_handler == SIG_IGN
1053 || (k->sa.sa_handler == SIG_DFL
1054 && (sig == SIGCONT ||
1055 sig == SIGCHLD ||
1056 sig == SIGWINCH))) {
1057 spin_lock_irq(&current->sigmask_lock);
1058 if (rm_sig_from_queue(sig, current))
1059 recalc_sigpending(current);
1060 spin_unlock_irq(&current->sigmask_lock);
1064 spin_unlock(&current->sig->siglock);
1065 return 0;
1068 int
1069 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1071 stack_t oss;
1072 int error;
1074 if (uoss) {
1075 oss.ss_sp = (void *) current->sas_ss_sp;
1076 oss.ss_size = current->sas_ss_size;
1077 oss.ss_flags = sas_ss_flags(sp);
1080 if (uss) {
1081 void *ss_sp;
1082 size_t ss_size;
1083 int ss_flags;
1085 error = -EFAULT;
1086 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1087 || __get_user(ss_sp, &uss->ss_sp)
1088 || __get_user(ss_flags, &uss->ss_flags)
1089 || __get_user(ss_size, &uss->ss_size))
1090 goto out;
1092 error = -EPERM;
1093 if (on_sig_stack (sp))
1094 goto out;
1096 error = -EINVAL;
1099 * Note - this code used to test ss_flags incorrectly
1100 * old code may have been written using ss_flags==0
1101 * to mean ss_flags==SS_ONSTACK (as this was the only
1102 * way that worked) - this fix preserves that older
1103 * mechanism
1105 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1106 goto out;
1108 if (ss_flags == SS_DISABLE) {
1109 ss_size = 0;
1110 ss_sp = NULL;
1111 } else {
1112 error = -ENOMEM;
1113 if (ss_size < MINSIGSTKSZ)
1114 goto out;
1117 current->sas_ss_sp = (unsigned long) ss_sp;
1118 current->sas_ss_size = ss_size;
1121 if (uoss) {
1122 error = -EFAULT;
1123 if (copy_to_user(uoss, &oss, sizeof(oss)))
1124 goto out;
1127 error = 0;
1128 out:
1129 return error;
1132 asmlinkage long
1133 sys_sigpending(old_sigset_t *set)
1135 return do_sigpending(set, sizeof(*set));
1138 #if !defined(__alpha__)
1139 /* Alpha has its own versions with special arguments. */
1141 asmlinkage long
1142 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1144 int error;
1145 old_sigset_t old_set, new_set;
1147 if (set) {
1148 error = -EFAULT;
1149 if (copy_from_user(&new_set, set, sizeof(*set)))
1150 goto out;
1151 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1153 spin_lock_irq(&current->sigmask_lock);
1154 old_set = current->blocked.sig[0];
1156 error = 0;
1157 switch (how) {
1158 default:
1159 error = -EINVAL;
1160 break;
1161 case SIG_BLOCK:
1162 sigaddsetmask(&current->blocked, new_set);
1163 break;
1164 case SIG_UNBLOCK:
1165 sigdelsetmask(&current->blocked, new_set);
1166 break;
1167 case SIG_SETMASK:
1168 current->blocked.sig[0] = new_set;
1169 break;
1172 recalc_sigpending(current);
1173 spin_unlock_irq(&current->sigmask_lock);
1174 if (error)
1175 goto out;
1176 if (oset)
1177 goto set_old;
1178 } else if (oset) {
1179 old_set = current->blocked.sig[0];
1180 set_old:
1181 error = -EFAULT;
1182 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1183 goto out;
1185 error = 0;
1186 out:
1187 return error;
1190 #ifndef __sparc__
1191 asmlinkage long
1192 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1193 size_t sigsetsize)
1195 struct k_sigaction new_sa, old_sa;
1196 int ret = -EINVAL;
1198 /* XXX: Don't preclude handling different sized sigset_t's. */
1199 if (sigsetsize != sizeof(sigset_t))
1200 goto out;
1202 if (act) {
1203 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1204 return -EFAULT;
1207 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1209 if (!ret && oact) {
1210 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1211 return -EFAULT;
1213 out:
1214 return ret;
1216 #endif /* __sparc__ */
1217 #endif
1219 #if !defined(__alpha__) && !defined(__ia64__)
1221 * For backwards compatibility. Functionality superseded by sigprocmask.
1223 asmlinkage long
1224 sys_sgetmask(void)
1226 /* SMP safe */
1227 return current->blocked.sig[0];
1230 asmlinkage long
1231 sys_ssetmask(int newmask)
1233 int old;
1235 spin_lock_irq(&current->sigmask_lock);
1236 old = current->blocked.sig[0];
1238 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
1239 sigmask(SIGSTOP)));
1240 recalc_sigpending(current);
1241 spin_unlock_irq(&current->sigmask_lock);
1243 return old;
1245 #endif /* !defined(__alpha__) */
1247 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1249 * For backwards compatibility. Functionality superseded by sigaction.
1251 asmlinkage unsigned long
1252 sys_signal(int sig, __sighandler_t handler)
1254 struct k_sigaction new_sa, old_sa;
1255 int ret;
1257 new_sa.sa.sa_handler = handler;
1258 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1260 ret = do_sigaction(sig, &new_sa, &old_sa);
1262 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1264 #endif /* !alpha && !__ia64__ && !defined(__mips__) */