1:1 Userland threading stage 2.20/4:
[dragonfly.git] / sys / kern / kern_sig.c
blob0cc89b6353364cc1b7f43e34f553faed462f7114
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94
39 * $FreeBSD: src/sys/kern/kern_sig.c,v 1.72.2.17 2003/05/16 16:34:34 obrien Exp $
40 * $DragonFly: src/sys/kern/kern_sig.c,v 1.68 2007/02/21 15:46:48 corecode Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/sysproto.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vnode.h>
52 #include <sys/event.h>
53 #include <sys/proc.h>
54 #include <sys/nlookup.h>
55 #include <sys/pioctl.h>
56 #include <sys/systm.h>
57 #include <sys/acct.h>
58 #include <sys/fcntl.h>
59 #include <sys/lock.h>
60 #include <sys/wait.h>
61 #include <sys/ktrace.h>
62 #include <sys/syslog.h>
63 #include <sys/stat.h>
64 #include <sys/sysent.h>
65 #include <sys/sysctl.h>
66 #include <sys/malloc.h>
67 #include <sys/interrupt.h>
68 #include <sys/unistd.h>
69 #include <sys/kern_syscall.h>
70 #include <sys/vkernel.h>
71 #include <sys/thread2.h>
73 #include <machine/cpu.h>
74 #include <machine/smp.h>
76 static int coredump(struct lwp *, int);
77 static char *expand_name(const char *, uid_t, pid_t);
78 static int dokillpg(int sig, int pgid, int all);
79 static int sig_ffs(sigset_t *set);
80 static int sigprop(int sig);
81 #ifdef SMP
82 static void signotify_remote(void *arg);
83 #endif
84 static int kern_sigtimedwait(sigset_t set, siginfo_t *info,
85 struct timespec *timeout);
87 static int filt_sigattach(struct knote *kn);
88 static void filt_sigdetach(struct knote *kn);
89 static int filt_signal(struct knote *kn, long hint);
91 struct filterops sig_filtops =
92 { 0, filt_sigattach, filt_sigdetach, filt_signal };
94 static int kern_logsigexit = 1;
95 SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
96 &kern_logsigexit, 0,
97 "Log processes quitting on abnormal signals to syslog(3)");
100 * Can process p, with pcred pc, send the signal sig to process q?
102 #define CANSIGNAL(q, sig) \
103 (!p_trespass(curproc->p_ucred, (q)->p_ucred) || \
104 ((sig) == SIGCONT && (q)->p_session == curproc->p_session))
107 * Policy -- Can real uid ruid with ucred uc send a signal to process q?
109 #define CANSIGIO(ruid, uc, q) \
110 ((uc)->cr_uid == 0 || \
111 (ruid) == (q)->p_ucred->cr_ruid || \
112 (uc)->cr_uid == (q)->p_ucred->cr_ruid || \
113 (ruid) == (q)->p_ucred->cr_uid || \
114 (uc)->cr_uid == (q)->p_ucred->cr_uid)
116 int sugid_coredump;
117 SYSCTL_INT(_kern, OID_AUTO, sugid_coredump, CTLFLAG_RW,
118 &sugid_coredump, 0, "Enable coredumping set user/group ID processes");
120 static int do_coredump = 1;
121 SYSCTL_INT(_kern, OID_AUTO, coredump, CTLFLAG_RW,
122 &do_coredump, 0, "Enable/Disable coredumps");
125 * Signal properties and actions.
126 * The array below categorizes the signals and their default actions
127 * according to the following properties:
129 #define SA_KILL 0x01 /* terminates process by default */
130 #define SA_CORE 0x02 /* ditto and coredumps */
131 #define SA_STOP 0x04 /* suspend process */
132 #define SA_TTYSTOP 0x08 /* ditto, from tty */
133 #define SA_IGNORE 0x10 /* ignore by default */
134 #define SA_CONT 0x20 /* continue if suspended */
135 #define SA_CANTMASK 0x40 /* non-maskable, catchable */
136 #define SA_CKPT 0x80 /* checkpoint process */
139 static int sigproptbl[NSIG] = {
140 SA_KILL, /* SIGHUP */
141 SA_KILL, /* SIGINT */
142 SA_KILL|SA_CORE, /* SIGQUIT */
143 SA_KILL|SA_CORE, /* SIGILL */
144 SA_KILL|SA_CORE, /* SIGTRAP */
145 SA_KILL|SA_CORE, /* SIGABRT */
146 SA_KILL|SA_CORE, /* SIGEMT */
147 SA_KILL|SA_CORE, /* SIGFPE */
148 SA_KILL, /* SIGKILL */
149 SA_KILL|SA_CORE, /* SIGBUS */
150 SA_KILL|SA_CORE, /* SIGSEGV */
151 SA_KILL|SA_CORE, /* SIGSYS */
152 SA_KILL, /* SIGPIPE */
153 SA_KILL, /* SIGALRM */
154 SA_KILL, /* SIGTERM */
155 SA_IGNORE, /* SIGURG */
156 SA_STOP, /* SIGSTOP */
157 SA_STOP|SA_TTYSTOP, /* SIGTSTP */
158 SA_IGNORE|SA_CONT, /* SIGCONT */
159 SA_IGNORE, /* SIGCHLD */
160 SA_STOP|SA_TTYSTOP, /* SIGTTIN */
161 SA_STOP|SA_TTYSTOP, /* SIGTTOU */
162 SA_IGNORE, /* SIGIO */
163 SA_KILL, /* SIGXCPU */
164 SA_KILL, /* SIGXFSZ */
165 SA_KILL, /* SIGVTALRM */
166 SA_KILL, /* SIGPROF */
167 SA_IGNORE, /* SIGWINCH */
168 SA_IGNORE, /* SIGINFO */
169 SA_KILL, /* SIGUSR1 */
170 SA_KILL, /* SIGUSR2 */
171 SA_IGNORE, /* SIGTHR */
172 SA_CKPT, /* SIGCKPT */
173 SA_KILL|SA_CKPT, /* SIGCKPTEXIT */
174 SA_IGNORE,
175 SA_IGNORE,
176 SA_IGNORE,
177 SA_IGNORE,
178 SA_IGNORE,
179 SA_IGNORE,
180 SA_IGNORE,
181 SA_IGNORE,
182 SA_IGNORE,
183 SA_IGNORE,
184 SA_IGNORE,
185 SA_IGNORE,
186 SA_IGNORE,
187 SA_IGNORE,
188 SA_IGNORE,
189 SA_IGNORE,
190 SA_IGNORE,
191 SA_IGNORE,
192 SA_IGNORE,
193 SA_IGNORE,
194 SA_IGNORE,
195 SA_IGNORE,
196 SA_IGNORE,
197 SA_IGNORE,
198 SA_IGNORE,
199 SA_IGNORE,
200 SA_IGNORE,
201 SA_IGNORE,
202 SA_IGNORE,
203 SA_IGNORE,
207 static __inline int
208 sigprop(int sig)
211 if (sig > 0 && sig < NSIG)
212 return (sigproptbl[_SIG_IDX(sig)]);
213 return (0);
216 static __inline int
217 sig_ffs(sigset_t *set)
219 int i;
221 for (i = 0; i < _SIG_WORDS; i++)
222 if (set->__bits[i])
223 return (ffs(set->__bits[i]) + (i * 32));
224 return (0);
228 kern_sigaction(int sig, struct sigaction *act, struct sigaction *oact)
230 struct thread *td = curthread;
231 struct proc *p = td->td_proc;
232 struct lwp *lp;
233 struct sigacts *ps = p->p_sigacts;
235 if (sig <= 0 || sig > _SIG_MAXSIG)
236 return (EINVAL);
238 if (oact) {
239 oact->sa_handler = ps->ps_sigact[_SIG_IDX(sig)];
240 oact->sa_mask = ps->ps_catchmask[_SIG_IDX(sig)];
241 oact->sa_flags = 0;
242 if (SIGISMEMBER(ps->ps_sigonstack, sig))
243 oact->sa_flags |= SA_ONSTACK;
244 if (!SIGISMEMBER(ps->ps_sigintr, sig))
245 oact->sa_flags |= SA_RESTART;
246 if (SIGISMEMBER(ps->ps_sigreset, sig))
247 oact->sa_flags |= SA_RESETHAND;
248 if (SIGISMEMBER(ps->ps_signodefer, sig))
249 oact->sa_flags |= SA_NODEFER;
250 if (SIGISMEMBER(ps->ps_siginfo, sig))
251 oact->sa_flags |= SA_SIGINFO;
252 if (SIGISMEMBER(ps->ps_sigmailbox, sig))
253 oact->sa_flags |= SA_MAILBOX;
254 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDSTOP)
255 oact->sa_flags |= SA_NOCLDSTOP;
256 if (sig == SIGCHLD && p->p_procsig->ps_flag & PS_NOCLDWAIT)
257 oact->sa_flags |= SA_NOCLDWAIT;
259 if (act) {
261 * Check for invalid requests. KILL and STOP cannot be
262 * caught.
264 if (sig == SIGKILL || sig == SIGSTOP) {
265 if (act->sa_handler != SIG_DFL)
266 return (EINVAL);
267 #if 0
268 /* (not needed, SIG_DFL forces action to occur) */
269 if (act->sa_flags & SA_MAILBOX)
270 return (EINVAL);
271 #endif
275 * Change setting atomically.
277 crit_enter();
279 ps->ps_catchmask[_SIG_IDX(sig)] = act->sa_mask;
280 SIG_CANTMASK(ps->ps_catchmask[_SIG_IDX(sig)]);
281 if (act->sa_flags & SA_SIGINFO) {
282 ps->ps_sigact[_SIG_IDX(sig)] =
283 (__sighandler_t *)act->sa_sigaction;
284 SIGADDSET(ps->ps_siginfo, sig);
285 } else {
286 ps->ps_sigact[_SIG_IDX(sig)] = act->sa_handler;
287 SIGDELSET(ps->ps_siginfo, sig);
289 if (!(act->sa_flags & SA_RESTART))
290 SIGADDSET(ps->ps_sigintr, sig);
291 else
292 SIGDELSET(ps->ps_sigintr, sig);
293 if (act->sa_flags & SA_ONSTACK)
294 SIGADDSET(ps->ps_sigonstack, sig);
295 else
296 SIGDELSET(ps->ps_sigonstack, sig);
297 if (act->sa_flags & SA_RESETHAND)
298 SIGADDSET(ps->ps_sigreset, sig);
299 else
300 SIGDELSET(ps->ps_sigreset, sig);
301 if (act->sa_flags & SA_NODEFER)
302 SIGADDSET(ps->ps_signodefer, sig);
303 else
304 SIGDELSET(ps->ps_signodefer, sig);
305 if (act->sa_flags & SA_MAILBOX)
306 SIGADDSET(ps->ps_sigmailbox, sig);
307 else
308 SIGDELSET(ps->ps_sigmailbox, sig);
309 if (sig == SIGCHLD) {
310 if (act->sa_flags & SA_NOCLDSTOP)
311 p->p_procsig->ps_flag |= PS_NOCLDSTOP;
312 else
313 p->p_procsig->ps_flag &= ~PS_NOCLDSTOP;
314 if (act->sa_flags & SA_NOCLDWAIT) {
316 * Paranoia: since SA_NOCLDWAIT is implemented
317 * by reparenting the dying child to PID 1 (and
318 * trust it to reap the zombie), PID 1 itself
319 * is forbidden to set SA_NOCLDWAIT.
321 if (p->p_pid == 1)
322 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
323 else
324 p->p_procsig->ps_flag |= PS_NOCLDWAIT;
325 } else {
326 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
330 * Set bit in p_sigignore for signals that are set to SIG_IGN,
331 * and for signals set to SIG_DFL where the default is to
332 * ignore. However, don't put SIGCONT in p_sigignore, as we
333 * have to restart the process.
335 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
336 (sigprop(sig) & SA_IGNORE &&
337 ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
338 /* never to be seen again */
339 SIGDELSET(p->p_siglist, sig);
341 * Remove the signal also from the thread lists.
343 FOREACH_LWP_IN_PROC(lp, p) {
344 SIGDELSET(lp->lwp_siglist, sig);
346 if (sig != SIGCONT)
347 /* easier in ksignal */
348 SIGADDSET(p->p_sigignore, sig);
349 SIGDELSET(p->p_sigcatch, sig);
350 } else {
351 SIGDELSET(p->p_sigignore, sig);
352 if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)
353 SIGDELSET(p->p_sigcatch, sig);
354 else
355 SIGADDSET(p->p_sigcatch, sig);
358 crit_exit();
360 return (0);
364 sys_sigaction(struct sigaction_args *uap)
366 struct sigaction act, oact;
367 struct sigaction *actp, *oactp;
368 int error;
370 actp = (uap->act != NULL) ? &act : NULL;
371 oactp = (uap->oact != NULL) ? &oact : NULL;
372 if (actp) {
373 error = copyin(uap->act, actp, sizeof(act));
374 if (error)
375 return (error);
377 error = kern_sigaction(uap->sig, actp, oactp);
378 if (oactp && !error) {
379 error = copyout(oactp, uap->oact, sizeof(oact));
381 return (error);
385 * Initialize signal state for process 0;
386 * set to ignore signals that are ignored by default.
388 void
389 siginit(struct proc *p)
391 int i;
393 for (i = 1; i <= NSIG; i++)
394 if (sigprop(i) & SA_IGNORE && i != SIGCONT)
395 SIGADDSET(p->p_sigignore, i);
399 * Reset signals for an exec of the specified process.
401 void
402 execsigs(struct proc *p)
404 struct sigacts *ps = p->p_sigacts;
405 struct lwp *lp;
406 int sig;
408 lp = ONLY_LWP_IN_PROC(p);
411 * Reset caught signals. Held signals remain held
412 * through p_sigmask (unless they were caught,
413 * and are now ignored by default).
415 while (SIGNOTEMPTY(p->p_sigcatch)) {
416 sig = sig_ffs(&p->p_sigcatch);
417 SIGDELSET(p->p_sigcatch, sig);
418 if (sigprop(sig) & SA_IGNORE) {
419 if (sig != SIGCONT)
420 SIGADDSET(p->p_sigignore, sig);
421 SIGDELSET(p->p_siglist, sig);
422 SIGDELSET(lp->lwp_siglist, sig);
424 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
428 * Reset stack state to the user stack.
429 * Clear set of signals caught on the signal stack.
431 lp->lwp_sigstk.ss_flags = SS_DISABLE;
432 lp->lwp_sigstk.ss_size = 0;
433 lp->lwp_sigstk.ss_sp = 0;
434 lp->lwp_flag &= ~LWP_ALTSTACK;
436 * Reset no zombies if child dies flag as Solaris does.
438 p->p_procsig->ps_flag &= ~PS_NOCLDWAIT;
442 * kern_sigprocmask() - MP SAFE ONLY IF p == curproc
444 * Manipulate signal mask. This routine is MP SAFE *ONLY* if
445 * p == curproc.
448 kern_sigprocmask(int how, sigset_t *set, sigset_t *oset)
450 struct thread *td = curthread;
451 struct lwp *lp = td->td_lwp;
452 int error;
454 if (oset != NULL)
455 *oset = lp->lwp_sigmask;
457 error = 0;
458 if (set != NULL) {
459 switch (how) {
460 case SIG_BLOCK:
461 SIG_CANTMASK(*set);
462 SIGSETOR(lp->lwp_sigmask, *set);
463 break;
464 case SIG_UNBLOCK:
465 SIGSETNAND(lp->lwp_sigmask, *set);
466 break;
467 case SIG_SETMASK:
468 SIG_CANTMASK(*set);
469 lp->lwp_sigmask = *set;
470 break;
471 default:
472 error = EINVAL;
473 break;
476 return (error);
480 * sigprocmask() - MP SAFE
483 sys_sigprocmask(struct sigprocmask_args *uap)
485 sigset_t set, oset;
486 sigset_t *setp, *osetp;
487 int error;
489 setp = (uap->set != NULL) ? &set : NULL;
490 osetp = (uap->oset != NULL) ? &oset : NULL;
491 if (setp) {
492 error = copyin(uap->set, setp, sizeof(set));
493 if (error)
494 return (error);
496 error = kern_sigprocmask(uap->how, setp, osetp);
497 if (osetp && !error) {
498 error = copyout(osetp, uap->oset, sizeof(oset));
500 return (error);
504 kern_sigpending(struct __sigset *set)
506 struct lwp *lp = curthread->td_lwp;
508 *set = lwp_sigpend(lp);
510 return (0);
514 sys_sigpending(struct sigpending_args *uap)
516 sigset_t set;
517 int error;
519 error = kern_sigpending(&set);
521 if (error == 0)
522 error = copyout(&set, uap->set, sizeof(set));
523 return (error);
527 * Suspend process until signal, providing mask to be set
528 * in the meantime.
531 kern_sigsuspend(struct __sigset *set)
533 struct thread *td = curthread;
534 struct lwp *lp = td->td_lwp;
535 struct proc *p = td->td_proc;
536 struct sigacts *ps = p->p_sigacts;
539 * When returning from sigsuspend, we want
540 * the old mask to be restored after the
541 * signal handler has finished. Thus, we
542 * save it here and mark the sigacts structure
543 * to indicate this.
545 lp->lwp_oldsigmask = lp->lwp_sigmask;
546 lp->lwp_flag |= LWP_OLDMASK;
548 SIG_CANTMASK(*set);
549 lp->lwp_sigmask = *set;
550 while (tsleep(ps, PCATCH, "pause", 0) == 0)
551 /* void */;
552 /* always return EINTR rather than ERESTART... */
553 return (EINTR);
557 * Note nonstandard calling convention: libc stub passes mask, not
558 * pointer, to save a copyin.
561 sys_sigsuspend(struct sigsuspend_args *uap)
563 sigset_t mask;
564 int error;
566 error = copyin(uap->sigmask, &mask, sizeof(mask));
567 if (error)
568 return (error);
570 error = kern_sigsuspend(&mask);
572 return (error);
576 kern_sigaltstack(struct sigaltstack *ss, struct sigaltstack *oss)
578 struct thread *td = curthread;
579 struct lwp *lp = td->td_lwp;
580 struct proc *p = td->td_proc;
582 if ((lp->lwp_flag & LWP_ALTSTACK) == 0)
583 lp->lwp_sigstk.ss_flags |= SS_DISABLE;
585 if (oss)
586 *oss = lp->lwp_sigstk;
588 if (ss) {
589 if (ss->ss_flags & SS_DISABLE) {
590 if (lp->lwp_sigstk.ss_flags & SS_ONSTACK)
591 return (EINVAL);
592 lp->lwp_flag &= ~LWP_ALTSTACK;
593 lp->lwp_sigstk.ss_flags = ss->ss_flags;
594 } else {
595 if (ss->ss_size < p->p_sysent->sv_minsigstksz)
596 return (ENOMEM);
597 lp->lwp_flag |= LWP_ALTSTACK;
598 lp->lwp_sigstk = *ss;
602 return (0);
606 sys_sigaltstack(struct sigaltstack_args *uap)
608 stack_t ss, oss;
609 int error;
611 if (uap->ss) {
612 error = copyin(uap->ss, &ss, sizeof(ss));
613 if (error)
614 return (error);
617 error = kern_sigaltstack(uap->ss ? &ss : NULL,
618 uap->oss ? &oss : NULL);
620 if (error == 0 && uap->oss)
621 error = copyout(&oss, uap->oss, sizeof(*uap->oss));
622 return (error);
626 * Common code for kill process group/broadcast kill.
627 * cp is calling process.
629 struct killpg_info {
630 int nfound;
631 int sig;
634 static int killpg_all_callback(struct proc *p, void *data);
636 static int
637 dokillpg(int sig, int pgid, int all)
639 struct killpg_info info;
640 struct proc *cp = curproc;
641 struct proc *p;
642 struct pgrp *pgrp;
644 info.nfound = 0;
645 info.sig = sig;
647 if (all) {
649 * broadcast
651 allproc_scan(killpg_all_callback, &info);
652 } else {
653 if (pgid == 0) {
655 * zero pgid means send to my process group.
657 pgrp = cp->p_pgrp;
658 } else {
659 pgrp = pgfind(pgid);
660 if (pgrp == NULL)
661 return (ESRCH);
663 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
664 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
665 if (p->p_pid <= 1 ||
666 p->p_stat == SZOMB ||
667 (p->p_flag & P_SYSTEM) ||
668 !CANSIGNAL(p, sig)) {
669 continue;
671 ++info.nfound;
672 if (sig)
673 ksignal(p, sig);
675 lockmgr(&pgrp->pg_lock, LK_RELEASE);
677 return (info.nfound ? 0 : ESRCH);
680 static int
681 killpg_all_callback(struct proc *p, void *data)
683 struct killpg_info *info = data;
685 if (p->p_pid <= 1 || (p->p_flag & P_SYSTEM) ||
686 p == curproc || !CANSIGNAL(p, info->sig)) {
687 return (0);
689 ++info->nfound;
690 if (info->sig)
691 ksignal(p, info->sig);
692 return(0);
696 kern_kill(int sig, int pid)
698 struct thread *td = curthread;
699 struct proc *p = td->td_proc;
701 if ((u_int)sig > _SIG_MAXSIG)
702 return (EINVAL);
703 if (pid > 0) {
704 /* kill single process */
705 if ((p = pfind(pid)) == NULL)
706 return (ESRCH);
707 if (!CANSIGNAL(p, sig))
708 return (EPERM);
709 if (sig)
710 ksignal(p, sig);
711 return (0);
713 switch (pid) {
714 case -1: /* broadcast signal */
715 return (dokillpg(sig, 0, 1));
716 case 0: /* signal own process group */
717 return (dokillpg(sig, 0, 0));
718 default: /* negative explicit process group */
719 return (dokillpg(sig, -pid, 0));
721 /* NOTREACHED */
725 sys_kill(struct kill_args *uap)
727 int error;
729 error = kern_kill(uap->signum, uap->pid);
731 return (error);
735 * Send a signal to a process group.
737 void
738 gsignal(int pgid, int sig)
740 struct pgrp *pgrp;
742 if (pgid && (pgrp = pgfind(pgid)))
743 pgsignal(pgrp, sig, 0);
747 * Send a signal to a process group. If checktty is 1,
748 * limit to members which have a controlling terminal.
750 * pg_lock interlocks against a fork that might be in progress, to
751 * ensure that the new child process picks up the signal.
753 void
754 pgsignal(struct pgrp *pgrp, int sig, int checkctty)
756 struct proc *p;
758 if (pgrp) {
759 lockmgr(&pgrp->pg_lock, LK_EXCLUSIVE);
760 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
761 if (checkctty == 0 || p->p_flag & P_CONTROLT)
762 ksignal(p, sig);
764 lockmgr(&pgrp->pg_lock, LK_RELEASE);
769 * Send a signal caused by a trap to the current process.
770 * If it will be caught immediately, deliver it with correct code.
771 * Otherwise, post it normally.
773 void
774 trapsignal(struct lwp *lp, int sig, u_long code)
776 struct proc *p = lp->lwp_proc;
777 struct sigacts *ps = p->p_sigacts;
780 * If we are a virtual kernel running an emulated user process
781 * context, switch back to the virtual kernel context before
782 * trying to post the signal.
784 if (p->p_vkernel && p->p_vkernel->vk_current) {
785 struct trapframe *tf = curthread->td_lwp->lwp_md.md_regs;
786 tf->tf_trapno = 0;
787 vkernel_trap(p, tf);
791 if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(p->p_sigcatch, sig) &&
792 !SIGISMEMBER(lp->lwp_sigmask, sig)) {
793 lp->lwp_ru.ru_nsignals++;
794 #ifdef KTRACE
795 if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
796 ktrpsig(p, sig, ps->ps_sigact[_SIG_IDX(sig)],
797 &lp->lwp_sigmask, code);
798 #endif
799 (*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)], sig,
800 &lp->lwp_sigmask, code);
801 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
802 if (!SIGISMEMBER(ps->ps_signodefer, sig))
803 SIGADDSET(lp->lwp_sigmask, sig);
804 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
806 * See kern_sigaction() for origin of this code.
808 SIGDELSET(p->p_sigcatch, sig);
809 if (sig != SIGCONT &&
810 sigprop(sig) & SA_IGNORE)
811 SIGADDSET(p->p_sigignore, sig);
812 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
814 } else {
815 lp->lwp_code = code; /* XXX for core dump/debugger */
816 lp->lwp_sig = sig; /* XXX to verify code */
817 ksignal(p, sig);
822 * Send the signal to the process. If the signal has an action, the action
823 * is usually performed by the target process rather than the caller; we add
824 * the signal to the set of pending signals for the process.
826 * Exceptions:
827 * o When a stop signal is sent to a sleeping process that takes the
828 * default action, the process is stopped without awakening it.
829 * o SIGCONT restarts stopped processes (or puts them back to sleep)
830 * regardless of the signal action (eg, blocked or ignored).
832 * Other ignored signals are discarded immediately.
834 void
835 ksignal(struct proc *p, int sig)
837 /* XXX lwp more intelligent lwp choice needed */
838 struct lwp *lp = FIRST_LWP_IN_PROC(p);
839 int prop;
840 sig_t action;
842 if (sig > _SIG_MAXSIG || sig <= 0) {
843 kprintf("ksignal: signal %d\n", sig);
844 panic("ksignal signal number");
847 crit_enter();
848 KNOTE(&p->p_klist, NOTE_SIGNAL | sig);
849 crit_exit();
851 prop = sigprop(sig);
854 * If proc is traced, always give parent a chance;
855 * if signal event is tracked by procfs, give *that*
856 * a chance, as well.
858 if ((p->p_flag & P_TRACED) || (p->p_stops & S_SIG)) {
859 action = SIG_DFL;
860 } else {
862 * If the signal is being ignored,
863 * then we forget about it immediately.
864 * (Note: we don't set SIGCONT in p_sigignore,
865 * and if it is set to SIG_IGN,
866 * action will be SIG_DFL here.)
868 if (SIGISMEMBER(p->p_sigignore, sig) || (p->p_flag & P_WEXIT))
869 return;
870 if (SIGISMEMBER(lp->lwp_sigmask, sig))
871 action = SIG_HOLD;
872 else if (SIGISMEMBER(p->p_sigcatch, sig))
873 action = SIG_CATCH;
874 else
875 action = SIG_DFL;
878 if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) &&
879 (p->p_flag & P_TRACED) == 0) {
880 p->p_nice = NZERO;
884 * If continuing, clear any pending STOP signals.
886 if (prop & SA_CONT)
887 SIG_STOPSIGMASK(p->p_siglist);
889 if (prop & SA_STOP) {
891 * If sending a tty stop signal to a member of an orphaned
892 * process group, discard the signal here if the action
893 * is default; don't stop the process below if sleeping,
894 * and don't clear any pending SIGCONT.
896 if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 &&
897 action == SIG_DFL) {
898 return;
900 SIG_CONTSIGMASK(p->p_siglist);
902 SIGADDSET(p->p_siglist, sig);
905 * Defer further processing for signals which are held,
906 * except that stopped processes must be continued by SIGCONT.
908 if (action == SIG_HOLD) {
909 if ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)
910 return;
913 crit_enter();
916 /* XXX lwp handle stop/continue */
919 * LWP is in tsleep and not stopped
921 if (lp->lwp_stat == LSSLEEP && p->p_stat != SSTOP) {
923 * If the process is sleeping uninterruptibly
924 * we can't interrupt the sleep... the signal will
925 * be noticed when the process returns through
926 * trap() or syscall().
928 if ((lp->lwp_flag & LWP_SINTR) == 0)
929 goto out;
932 * If the process is sleeping and traced, make it runnable
933 * so it can discover the signal in issignal() and stop
934 * for the parent.
936 * If the process is stopped and traced, no further action
937 * is necessary.
939 if (p->p_flag & P_TRACED)
940 goto run;
943 * If the process is sleeping and SA_CONT, and the signal
944 * mode is SIG_DFL, then make the process runnable.
946 * However, do *NOT* set LWP_BREAKTSLEEP. We do not want
947 * a SIGCONT to terminate an interruptable tsleep early
948 * and generate a spurious EINTR.
950 if ((prop & SA_CONT) && action == SIG_DFL) {
951 SIGDELSET(p->p_siglist, sig);
952 goto run_no_break;
956 * If the process is sleeping and receives a STOP signal,
957 * process immediately if possible. All other (caught or
958 * default) signals cause the process to run.
960 if (prop & SA_STOP) {
961 if (action != SIG_DFL)
962 goto run;
965 * If a child holding parent blocked, stopping
966 * could cause deadlock. Take no action at this
967 * time.
969 if (p->p_flag & P_PPWAIT)
970 goto out;
973 * Do not actually try to manipulate the process while
974 * it is sleeping, simply set SSTOP to indicate that
975 * lwps should stop as soon as they safely can.
977 SIGDELSET(p->p_siglist, sig);
978 p->p_xstat = sig;
979 proc_stop(p, 1);
980 goto out;
984 * Otherwise the signal can interrupt the sleep.
986 goto run;
990 * Process is in tsleep and is stopped
992 if (lp->lwp_stat == LSSLEEP && p->p_stat == SSTOP) {
994 * If the process is stopped and is being traced, then no
995 * further action is necessary.
997 if (p->p_flag & P_TRACED)
998 goto out;
1001 * If the process is stopped and receives a KILL signal,
1002 * make the process runnable.
1004 if (sig == SIGKILL) {
1005 proc_unstop(p);
1006 goto out;
1010 * If the process is stopped and receives a CONT signal,
1011 * then try to make the process runnable again.
1013 if (prop & SA_CONT) {
1015 * If SIGCONT is default (or ignored), we continue the
1016 * process but don't leave the signal in p_siglist, as
1017 * it has no further action. If SIGCONT is held, we
1018 * continue the process and leave the signal in
1019 * p_siglist. If the process catches SIGCONT, let it
1020 * handle the signal itself.
1022 if (action == SIG_DFL)
1023 SIGDELSET(p->p_siglist, sig);
1024 proc_unstop(p);
1025 if (action == SIG_CATCH)
1026 goto run;
1027 goto out;
1031 * If the process is stopped and receives another STOP
1032 * signal, we do not need to stop it again. If we did
1033 * the shell could get confused.
1035 if (prop & SA_STOP) {
1036 SIGDELSET(p->p_siglist, sig);
1037 goto out;
1041 * Otherwise the process is sleeping interruptably but
1042 * is stopped, just set the LWP_BREAKTSLEEP flag and take
1043 * no further action. The next runnable action will wake
1044 * the process up.
1046 lp->lwp_flag |= LWP_BREAKTSLEEP;
1047 goto out;
1051 * Otherwise the process is running
1053 * SRUN, SIDL, SZOMB do nothing with the signal,
1054 * other than kicking ourselves if we are running.
1055 * It will either never be noticed, or noticed very soon.
1057 * Note that p_thread may be NULL or may not be completely
1058 * initialized if the process is in the SIDL or SZOMB state.
1060 * For SMP we may have to forward the request to another cpu.
1061 * YYY the MP lock prevents the target process from moving
1062 * to another cpu, see kern/kern_switch.c
1064 * If the target thread is waiting on its message port,
1065 * wakeup the target thread so it can check (or ignore)
1066 * the new signal. YYY needs cleanup.
1068 if (lp == lwkt_preempted_proc()) {
1069 signotify();
1070 } else if (lp->lwp_stat == LSRUN) {
1071 struct thread *td = lp->lwp_thread;
1073 KASSERT(td != NULL,
1074 ("pid %d/%d NULL lwp_thread stat %d flags %08x/%08x",
1075 p->p_pid, lp->lwp_tid, lp->lwp_stat, p->p_flag, lp->lwp_flag));
1077 #ifdef SMP
1078 if (td->td_gd != mycpu)
1079 lwkt_send_ipiq(td->td_gd, signotify_remote, lp);
1080 else
1081 #endif
1082 if (td->td_msgport.mp_flags & MSGPORTF_WAITING)
1083 lwkt_schedule(td);
1085 goto out;
1086 /*NOTREACHED*/
1087 run:
1089 * Make runnable and break out of any tsleep as well.
1091 lp->lwp_flag |= LWP_BREAKTSLEEP;
1092 run_no_break:
1093 setrunnable(lp);
1094 out:
1095 crit_exit();
1098 #ifdef SMP
1101 * This function is called via an IPI. We will be in a critical section but
1102 * the MP lock will NOT be held. Also note that by the time the ipi message
1103 * gets to us the process 'p' (arg) may no longer be scheduled or even valid.
1105 static void
1106 signotify_remote(void *arg)
1108 struct lwp *lp = arg;
1110 if (lp == lwkt_preempted_proc()) {
1111 signotify();
1112 } else {
1113 struct thread *td = lp->lwp_thread;
1114 if (td->td_msgport.mp_flags & MSGPORTF_WAITING)
1115 lwkt_schedule(td);
1119 #endif
1121 void
1122 proc_stop(struct proc *p, int notify)
1124 /* XXX lwp */
1125 p->p_stat = SSTOP;
1126 p->p_flag &= ~P_WAITED;
1127 wakeup(p->p_pptr);
1128 if (notify > 1 ||
1129 (notify && (p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0))
1130 ksignal(p->p_pptr, SIGCHLD);
1133 void
1134 proc_unstop(struct proc *p)
1136 struct lwp *lp = FIRST_LWP_IN_PROC(p); /* XXX lwp */
1138 p->p_stat = SACTIVE;
1139 setrunnable(lp);
1142 static int
1143 kern_sigtimedwait(sigset_t waitset, siginfo_t *info, struct timespec *timeout)
1145 sigset_t savedmask, set;
1146 struct proc *p = curproc;
1147 struct lwp *lp = curthread->td_lwp;
1148 int error, sig, hz, timevalid = 0;
1149 struct timespec rts, ets, ts;
1150 struct timeval tv;
1152 error = 0;
1153 sig = 0;
1154 SIG_CANTMASK(waitset);
1155 savedmask = lp->lwp_sigmask;
1157 if (timeout) {
1158 if (timeout->tv_sec >= 0 && timeout->tv_nsec >= 0 &&
1159 timeout->tv_nsec < 1000000000) {
1160 timevalid = 1;
1161 getnanouptime(&rts);
1162 ets = rts;
1163 timespecadd(&ets, timeout);
1167 for (;;) {
1168 set = lwp_sigpend(lp);
1169 SIGSETAND(set, waitset);
1170 if ((sig = sig_ffs(&set)) != 0) {
1171 SIGFILLSET(lp->lwp_sigmask);
1172 SIGDELSET(lp->lwp_sigmask, sig);
1173 SIG_CANTMASK(lp->lwp_sigmask);
1174 sig = issignal(lp);
1176 * It may be a STOP signal, in the case, issignal
1177 * returns 0, because we may stop there, and new
1178 * signal can come in, we should restart if we got
1179 * nothing.
1181 if (sig == 0)
1182 continue;
1183 else
1184 break;
1188 * Previous checking got nothing, and we retried but still
1189 * got nothing, we should return the error status.
1191 if (error)
1192 break;
1195 * POSIX says this must be checked after looking for pending
1196 * signals.
1198 if (timeout) {
1199 if (!timevalid) {
1200 error = EINVAL;
1201 break;
1203 getnanouptime(&rts);
1204 if (timespeccmp(&rts, &ets, >=)) {
1205 error = EAGAIN;
1206 break;
1208 ts = ets;
1209 timespecsub(&ts, &rts);
1210 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1211 hz = tvtohz_high(&tv);
1212 } else
1213 hz = 0;
1215 lp->lwp_sigmask = savedmask;
1216 SIGSETNAND(lp->lwp_sigmask, waitset);
1217 error = tsleep(&p->p_sigacts, PCATCH, "sigwt", hz);
1218 if (timeout) {
1219 if (error == ERESTART) {
1220 /* can not restart a timeout wait. */
1221 error = EINTR;
1222 } else if (error == EAGAIN) {
1223 /* will calculate timeout by ourself. */
1224 error = 0;
1227 /* Retry ... */
1230 lp->lwp_sigmask = savedmask;
1231 if (sig) {
1232 error = 0;
1233 bzero(info, sizeof(*info));
1234 info->si_signo = sig;
1235 lwp_delsig(lp, sig); /* take the signal! */
1237 if (sig == SIGKILL)
1238 sigexit(p, sig);
1240 return (error);
1244 sys_sigtimedwait(struct sigtimedwait_args *uap)
1246 struct timespec ts;
1247 struct timespec *timeout;
1248 sigset_t set;
1249 siginfo_t info;
1250 int error;
1252 if (uap->timeout) {
1253 error = copyin(uap->timeout, &ts, sizeof(ts));
1254 if (error)
1255 return (error);
1256 timeout = &ts;
1257 } else {
1258 timeout = NULL;
1260 error = copyin(uap->set, &set, sizeof(set));
1261 if (error)
1262 return (error);
1263 error = kern_sigtimedwait(set, &info, timeout);
1264 if (error)
1265 return (error);
1266 if (uap->info)
1267 error = copyout(&info, uap->info, sizeof(info));
1268 /* Repost if we got an error. */
1269 if (error)
1270 ksignal(curproc, info.si_signo);
1271 else
1272 uap->sysmsg_result = info.si_signo;
1273 return (error);
1277 sys_sigwaitinfo(struct sigwaitinfo_args *uap)
1279 siginfo_t info;
1280 sigset_t set;
1281 int error;
1283 error = copyin(uap->set, &set, sizeof(set));
1284 if (error)
1285 return (error);
1286 error = kern_sigtimedwait(set, &info, NULL);
1287 if (error)
1288 return (error);
1289 if (uap->info)
1290 error = copyout(&info, uap->info, sizeof(info));
1291 /* Repost if we got an error. */
1292 if (error)
1293 ksignal(curproc, info.si_signo);
1294 else
1295 uap->sysmsg_result = info.si_signo;
1296 return (error);
1300 * If the current process has received a signal that would interrupt a
1301 * system call, return EINTR or ERESTART as appropriate.
1304 iscaught(struct lwp *lp)
1306 struct proc *p = lp->lwp_proc;
1307 int sig;
1309 if (p) {
1310 if ((sig = CURSIG(lp)) != 0) {
1311 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
1312 return (EINTR);
1313 return (ERESTART);
1316 return(EWOULDBLOCK);
1320 * If the current process has received a signal (should be caught or cause
1321 * termination, should interrupt current syscall), return the signal number.
1322 * Stop signals with default action are processed immediately, then cleared;
1323 * they aren't returned. This is checked after each entry to the system for
1324 * a syscall or trap (though this can usually be done without calling issignal
1325 * by checking the pending signal masks in the CURSIG macro.) The normal call
1326 * sequence is
1328 * This routine is called via CURSIG/__cursig and the MP lock might not be
1329 * held. Obtain the MP lock for the duration of the operation.
1331 * while (sig = CURSIG(curproc))
1332 * postsig(sig);
1335 issignal(struct lwp *lp)
1337 struct proc *p = lp->lwp_proc;
1338 sigset_t mask;
1339 int sig, prop;
1341 get_mplock();
1342 for (;;) {
1343 int traced = (p->p_flag & P_TRACED) || (p->p_stops & S_SIG);
1345 mask = lwp_sigpend(lp);
1346 SIGSETNAND(mask, lp->lwp_sigmask);
1347 if (p->p_flag & P_PPWAIT)
1348 SIG_STOPSIGMASK(mask);
1349 if (!SIGNOTEMPTY(mask)) { /* no signal to send */
1350 rel_mplock();
1351 return (0);
1353 sig = sig_ffs(&mask);
1355 STOPEVENT(p, S_SIG, sig);
1358 * We should see pending but ignored signals
1359 * only if P_TRACED was on when they were posted.
1361 if (SIGISMEMBER(p->p_sigignore, sig) && (traced == 0)) {
1362 lwp_delsig(lp, sig);
1363 continue;
1365 if ((p->p_flag & P_TRACED) && (p->p_flag & P_PPWAIT) == 0) {
1367 * If traced, always stop, and stay stopped until
1368 * released by the parent.
1370 * NOTE: SSTOP may get cleared during the loop,
1371 * but we do not re-notify the parent if we have
1372 * to loop several times waiting for the parent
1373 * to let us continue.
1375 * XXX not sure if this is still true
1377 p->p_xstat = sig;
1378 proc_stop(p, 2);
1379 do {
1380 tstop();
1381 } while (!trace_req(p) && (p->p_flag & P_TRACED));
1384 * If parent wants us to take the signal,
1385 * then it will leave it in p->p_xstat;
1386 * otherwise we just look for signals again.
1388 lwp_delsig(lp, sig); /* clear old signal */
1389 sig = p->p_xstat;
1390 if (sig == 0)
1391 continue;
1394 * Put the new signal into p_siglist. If the
1395 * signal is being masked, look for other signals.
1397 /* XXX should run via ksignal? */
1398 SIGADDSET(p->p_siglist, sig);
1399 if (SIGISMEMBER(lp->lwp_sigmask, sig))
1400 continue;
1403 * If the traced bit got turned off, go back up
1404 * to the top to rescan signals. This ensures
1405 * that p_sig* and ps_sigact are consistent.
1407 if ((p->p_flag & P_TRACED) == 0)
1408 continue;
1411 prop = sigprop(sig);
1414 * Decide whether the signal should be returned.
1415 * Return the signal's number, or fall through
1416 * to clear it from the pending mask.
1418 switch ((int)(intptr_t)p->p_sigacts->ps_sigact[_SIG_IDX(sig)]) {
1419 case (int)SIG_DFL:
1421 * Don't take default actions on system processes.
1423 if (p->p_pid <= 1) {
1424 #ifdef DIAGNOSTIC
1426 * Are you sure you want to ignore SIGSEGV
1427 * in init? XXX
1429 kprintf("Process (pid %lu) got signal %d\n",
1430 (u_long)p->p_pid, sig);
1431 #endif
1432 break; /* == ignore */
1436 * Handle the in-kernel checkpoint action
1438 if (prop & SA_CKPT) {
1439 checkpoint_signal_handler(lp);
1440 break;
1444 * If there is a pending stop signal to process
1445 * with default action, stop here,
1446 * then clear the signal. However,
1447 * if process is member of an orphaned
1448 * process group, ignore tty stop signals.
1450 if (prop & SA_STOP) {
1451 if (p->p_flag & P_TRACED ||
1452 (p->p_pgrp->pg_jobc == 0 &&
1453 prop & SA_TTYSTOP))
1454 break; /* == ignore */
1455 p->p_xstat = sig;
1456 proc_stop(p, 1);
1457 while (p->p_stat == SSTOP) {
1458 tstop();
1460 break;
1461 } else if (prop & SA_IGNORE) {
1463 * Except for SIGCONT, shouldn't get here.
1464 * Default action is to ignore; drop it.
1466 break; /* == ignore */
1467 } else {
1468 rel_mplock();
1469 return (sig);
1472 /*NOTREACHED*/
1474 case (int)SIG_IGN:
1476 * Masking above should prevent us ever trying
1477 * to take action on an ignored signal other
1478 * than SIGCONT, unless process is traced.
1480 if ((prop & SA_CONT) == 0 &&
1481 (p->p_flag & P_TRACED) == 0)
1482 kprintf("issignal\n");
1483 break; /* == ignore */
1485 default:
1487 * This signal has an action, let
1488 * postsig() process it.
1490 rel_mplock();
1491 return (sig);
1493 lwp_delsig(lp, sig); /* take the signal! */
1495 /* NOTREACHED */
1499 * Take the action for the specified signal
1500 * from the current set of pending signals.
1502 void
1503 postsig(int sig)
1505 struct lwp *lp = curthread->td_lwp;
1506 struct proc *p = lp->lwp_proc;
1507 struct sigacts *ps = p->p_sigacts;
1508 sig_t action;
1509 sigset_t returnmask;
1510 int code;
1512 KASSERT(sig != 0, ("postsig"));
1515 * If we are a virtual kernel running an emulated user process
1516 * context, switch back to the virtual kernel context before
1517 * trying to post the signal.
1519 if (p->p_vkernel && p->p_vkernel->vk_current) {
1520 struct trapframe *tf = curthread->td_lwp->lwp_md.md_regs;
1521 tf->tf_trapno = 0;
1522 vkernel_trap(p, tf);
1525 lwp_delsig(lp, sig);
1526 action = ps->ps_sigact[_SIG_IDX(sig)];
1527 #ifdef KTRACE
1528 if (KTRPOINT(lp->lwp_thread, KTR_PSIG))
1529 ktrpsig(p, sig, action, lp->lwp_flag & LWP_OLDMASK ?
1530 &lp->lwp_oldsigmask : &lp->lwp_sigmask, 0);
1531 #endif
1532 STOPEVENT(p, S_SIG, sig);
1534 if (action == SIG_DFL) {
1536 * Default action, where the default is to kill
1537 * the process. (Other cases were ignored above.)
1539 sigexit(p, sig);
1540 /* NOTREACHED */
1541 } else {
1543 * If we get here, the signal must be caught.
1545 KASSERT(action != SIG_IGN && !SIGISMEMBER(lp->lwp_sigmask, sig),
1546 ("postsig action"));
1548 crit_enter();
1551 * Reset the signal handler if asked to
1553 if (SIGISMEMBER(ps->ps_sigreset, sig)) {
1555 * See kern_sigaction() for origin of this code.
1557 SIGDELSET(p->p_sigcatch, sig);
1558 if (sig != SIGCONT &&
1559 sigprop(sig) & SA_IGNORE)
1560 SIGADDSET(p->p_sigignore, sig);
1561 ps->ps_sigact[_SIG_IDX(sig)] = SIG_DFL;
1565 * Handle the mailbox case. Copyout to the appropriate
1566 * location but do not generate a signal frame. The system
1567 * call simply returns EINTR and the user is responsible for
1568 * polling the mailbox.
1570 if (SIGISMEMBER(ps->ps_sigmailbox, sig)) {
1571 int sig_copy = sig;
1572 copyout(&sig_copy, (void *)action, sizeof(int));
1573 curproc->p_flag |= P_MAILBOX;
1574 crit_exit();
1575 goto done;
1579 * Set the signal mask and calculate the mask to restore
1580 * when the signal function returns.
1582 * Special case: user has done a sigsuspend. Here the
1583 * current mask is not of interest, but rather the
1584 * mask from before the sigsuspend is what we want
1585 * restored after the signal processing is completed.
1587 if (lp->lwp_flag & LWP_OLDMASK) {
1588 returnmask = lp->lwp_oldsigmask;
1589 lp->lwp_flag &= ~LWP_OLDMASK;
1590 } else {
1591 returnmask = lp->lwp_sigmask;
1594 SIGSETOR(lp->lwp_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
1595 if (!SIGISMEMBER(ps->ps_signodefer, sig))
1596 SIGADDSET(lp->lwp_sigmask, sig);
1598 crit_exit();
1599 lp->lwp_ru.ru_nsignals++;
1600 if (lp->lwp_sig != sig) {
1601 code = 0;
1602 } else {
1603 code = lp->lwp_code;
1604 lp->lwp_code = 0;
1605 lp->lwp_sig = 0;
1607 (*p->p_sysent->sv_sendsig)(action, sig, &returnmask, code);
1609 done:
1614 * Kill the current process for stated reason.
1616 void
1617 killproc(struct proc *p, char *why)
1619 log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n",
1620 p->p_pid, p->p_comm,
1621 p->p_ucred ? p->p_ucred->cr_uid : -1, why);
1622 ksignal(p, SIGKILL);
1626 * Force the current process to exit with the specified signal, dumping core
1627 * if appropriate. We bypass the normal tests for masked and caught signals,
1628 * allowing unrecoverable failures to terminate the process without changing
1629 * signal state. Mark the accounting record with the signal termination.
1630 * If dumping core, save the signal number for the debugger. Calls exit and
1631 * does not return.
1633 void
1634 sigexit(struct proc *p, int sig)
1636 struct lwp *lp = FIRST_LWP_IN_PROC(p); /* XXX lwp */
1638 p->p_acflag |= AXSIG;
1639 if (sigprop(sig) & SA_CORE) {
1640 lp->lwp_sig = sig;
1642 * Log signals which would cause core dumps
1643 * (Log as LOG_INFO to appease those who don't want
1644 * these messages.)
1645 * XXX : Todo, as well as euid, write out ruid too
1647 if (coredump(lp, sig) == 0)
1648 sig |= WCOREFLAG;
1649 if (kern_logsigexit)
1650 log(LOG_INFO,
1651 "pid %d (%s), uid %d: exited on signal %d%s\n",
1652 p->p_pid, p->p_comm,
1653 p->p_ucred ? p->p_ucred->cr_uid : -1,
1654 sig &~ WCOREFLAG,
1655 sig & WCOREFLAG ? " (core dumped)" : "");
1657 exit1(W_EXITCODE(0, sig));
1658 /* NOTREACHED */
1661 static char corefilename[MAXPATHLEN+1] = {"%N.core"};
1662 SYSCTL_STRING(_kern, OID_AUTO, corefile, CTLFLAG_RW, corefilename,
1663 sizeof(corefilename), "process corefile name format string");
1666 * expand_name(name, uid, pid)
1667 * Expand the name described in corefilename, using name, uid, and pid.
1668 * corefilename is a kprintf-like string, with three format specifiers:
1669 * %N name of process ("name")
1670 * %P process id (pid)
1671 * %U user id (uid)
1672 * For example, "%N.core" is the default; they can be disabled completely
1673 * by using "/dev/null", or all core files can be stored in "/cores/%U/%N-%P".
1674 * This is controlled by the sysctl variable kern.corefile (see above).
1677 static char *
1678 expand_name(const char *name, uid_t uid, pid_t pid)
1680 char *temp;
1681 char buf[11]; /* Buffer for pid/uid -- max 4B */
1682 int i, n;
1683 char *format = corefilename;
1684 size_t namelen;
1686 temp = kmalloc(MAXPATHLEN + 1, M_TEMP, M_NOWAIT);
1687 if (temp == NULL)
1688 return NULL;
1689 namelen = strlen(name);
1690 for (i = 0, n = 0; n < MAXPATHLEN && format[i]; i++) {
1691 int l;
1692 switch (format[i]) {
1693 case '%': /* Format character */
1694 i++;
1695 switch (format[i]) {
1696 case '%':
1697 temp[n++] = '%';
1698 break;
1699 case 'N': /* process name */
1700 if ((n + namelen) > MAXPATHLEN) {
1701 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
1702 pid, name, uid, temp, name);
1703 kfree(temp, M_TEMP);
1704 return NULL;
1706 memcpy(temp+n, name, namelen);
1707 n += namelen;
1708 break;
1709 case 'P': /* process id */
1710 l = ksprintf(buf, "%u", pid);
1711 if ((n + l) > MAXPATHLEN) {
1712 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
1713 pid, name, uid, temp, name);
1714 kfree(temp, M_TEMP);
1715 return NULL;
1717 memcpy(temp+n, buf, l);
1718 n += l;
1719 break;
1720 case 'U': /* user id */
1721 l = ksprintf(buf, "%u", uid);
1722 if ((n + l) > MAXPATHLEN) {
1723 log(LOG_ERR, "pid %d (%s), uid (%u): Path `%s%s' is too long\n",
1724 pid, name, uid, temp, name);
1725 kfree(temp, M_TEMP);
1726 return NULL;
1728 memcpy(temp+n, buf, l);
1729 n += l;
1730 break;
1731 default:
1732 log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format);
1734 break;
1735 default:
1736 temp[n++] = format[i];
1739 temp[n] = '\0';
1740 return temp;
1744 * Dump a process' core. The main routine does some
1745 * policy checking, and creates the name of the coredump;
1746 * then it passes on a vnode and a size limit to the process-specific
1747 * coredump routine if there is one; if there _is not_ one, it returns
1748 * ENOSYS; otherwise it returns the error from the process-specific routine.
1750 * The parameter `lp' is the lwp which triggered the coredump.
1753 static int
1754 coredump(struct lwp *lp, int sig)
1756 struct proc *p = lp->lwp_proc;
1757 struct vnode *vp;
1758 struct ucred *cred = p->p_ucred;
1759 struct flock lf;
1760 struct nlookupdata nd;
1761 struct vattr vattr;
1762 int error, error1;
1763 char *name; /* name of corefile */
1764 off_t limit;
1766 STOPEVENT(p, S_CORE, 0);
1768 if (((sugid_coredump == 0) && p->p_flag & P_SUGID) || do_coredump == 0)
1769 return (EFAULT);
1772 * Note that the bulk of limit checking is done after
1773 * the corefile is created. The exception is if the limit
1774 * for corefiles is 0, in which case we don't bother
1775 * creating the corefile at all. This layout means that
1776 * a corefile is truncated instead of not being created,
1777 * if it is larger than the limit.
1779 limit = p->p_rlimit[RLIMIT_CORE].rlim_cur;
1780 if (limit == 0)
1781 return EFBIG;
1783 name = expand_name(p->p_comm, p->p_ucred->cr_uid, p->p_pid);
1784 if (name == NULL)
1785 return (EINVAL);
1786 error = nlookup_init(&nd, name, UIO_SYSSPACE, NLC_LOCKVP);
1787 if (error == 0)
1788 error = vn_open(&nd, NULL, O_CREAT | FWRITE | O_NOFOLLOW, S_IRUSR | S_IWUSR);
1789 kfree(name, M_TEMP);
1790 if (error) {
1791 nlookup_done(&nd);
1792 return (error);
1794 vp = nd.nl_open_vp;
1795 nd.nl_open_vp = NULL;
1796 nlookup_done(&nd);
1798 vn_unlock(vp);
1799 lf.l_whence = SEEK_SET;
1800 lf.l_start = 0;
1801 lf.l_len = 0;
1802 lf.l_type = F_WRLCK;
1803 error = VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &lf, 0);
1804 if (error)
1805 goto out2;
1807 /* Don't dump to non-regular files or files with links. */
1808 if (vp->v_type != VREG ||
1809 VOP_GETATTR(vp, &vattr) || vattr.va_nlink != 1) {
1810 error = EFAULT;
1811 goto out1;
1814 VATTR_NULL(&vattr);
1815 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1816 vattr.va_size = 0;
1817 VOP_SETATTR(vp, &vattr, cred);
1818 p->p_acflag |= ACORE;
1819 vn_unlock(vp);
1821 error = p->p_sysent->sv_coredump ?
1822 p->p_sysent->sv_coredump(lp, sig, vp, limit) : ENOSYS;
1824 out1:
1825 lf.l_type = F_UNLCK;
1826 VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, 0);
1827 out2:
1828 error1 = vn_close(vp, FWRITE);
1829 if (error == 0)
1830 error = error1;
1831 return (error);
1835 * Nonexistent system call-- signal process (may want to handle it).
1836 * Flag error in case process won't see signal immediately (blocked or ignored).
1838 /* ARGSUSED */
1840 sys_nosys(struct nosys_args *args)
1842 ksignal(curproc, SIGSYS);
1843 return (EINVAL);
1847 * Send a SIGIO or SIGURG signal to a process or process group using
1848 * stored credentials rather than those of the current process.
1850 void
1851 pgsigio(struct sigio *sigio, int sig, int checkctty)
1853 if (sigio == NULL)
1854 return;
1856 if (sigio->sio_pgid > 0) {
1857 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred,
1858 sigio->sio_proc))
1859 ksignal(sigio->sio_proc, sig);
1860 } else if (sigio->sio_pgid < 0) {
1861 struct proc *p;
1863 lockmgr(&sigio->sio_pgrp->pg_lock, LK_EXCLUSIVE);
1864 LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
1865 if (CANSIGIO(sigio->sio_ruid, sigio->sio_ucred, p) &&
1866 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
1867 ksignal(p, sig);
1869 lockmgr(&sigio->sio_pgrp->pg_lock, LK_RELEASE);
1873 static int
1874 filt_sigattach(struct knote *kn)
1876 struct proc *p = curproc;
1878 kn->kn_ptr.p_proc = p;
1879 kn->kn_flags |= EV_CLEAR; /* automatically set */
1881 /* XXX lock the proc here while adding to the list? */
1882 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
1884 return (0);
1887 static void
1888 filt_sigdetach(struct knote *kn)
1890 struct proc *p = kn->kn_ptr.p_proc;
1892 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
1896 * signal knotes are shared with proc knotes, so we apply a mask to
1897 * the hint in order to differentiate them from process hints. This
1898 * could be avoided by using a signal-specific knote list, but probably
1899 * isn't worth the trouble.
1901 static int
1902 filt_signal(struct knote *kn, long hint)
1904 if (hint & NOTE_SIGNAL) {
1905 hint &= ~NOTE_SIGNAL;
1907 if (kn->kn_id == hint)
1908 kn->kn_data++;
1910 return (kn->kn_data != 0);