4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/bitmap.h>
34 #include <sys/sysmacros.h>
35 #include <sys/systm.h>
38 #include <sys/errno.h>
40 #include <sys/poll_impl.h> /* only needed for kludge in sigwaiting_send() */
41 #include <sys/signal.h>
42 #include <sys/siginfo.h>
43 #include <sys/fault.h>
44 #include <sys/ucontext.h>
45 #include <sys/procfs.h>
47 #include <sys/class.h>
49 #include <sys/procset.h>
51 #include <sys/cpuvar.h>
52 #include <sys/prsystm.h>
53 #include <sys/debug.h>
55 #include <sys/bitmap.h>
58 #include <sys/schedctl.h>
59 #include <sys/contract/process_impl.h>
60 #include <sys/cyclic.h>
61 #include <sys/dtrace.h>
64 const k_sigset_t nullsmask
= {0, 0, 0};
66 const k_sigset_t fillset
= /* MUST be contiguous */
67 {FILLSET0
, FILLSET1
, FILLSET2
};
69 const k_sigset_t cantmask
=
70 {CANTMASK0
, CANTMASK1
, CANTMASK2
};
72 const k_sigset_t cantreset
=
73 {(sigmask(SIGILL
)|sigmask(SIGTRAP
)|sigmask(SIGPWR
)), 0, 0};
75 const k_sigset_t ignoredefault
=
76 {(sigmask(SIGCONT
)|sigmask(SIGCLD
)|sigmask(SIGPWR
)
77 |sigmask(SIGWINCH
)|sigmask(SIGURG
)|sigmask(SIGWAITING
)),
78 (sigmask(SIGLWP
)|sigmask(SIGCANCEL
)|sigmask(SIGFREEZE
)
79 |sigmask(SIGTHAW
)|sigmask(SIGXRES
)|sigmask(SIGJVM1
)
80 |sigmask(SIGJVM2
)|sigmask(SIGINFO
)), 0};
82 const k_sigset_t stopdefault
=
83 {(sigmask(SIGSTOP
)|sigmask(SIGTSTP
)|sigmask(SIGTTOU
)|sigmask(SIGTTIN
)),
86 const k_sigset_t coredefault
=
87 {(sigmask(SIGQUIT
)|sigmask(SIGILL
)|sigmask(SIGTRAP
)|sigmask(SIGIOT
)
88 |sigmask(SIGEMT
)|sigmask(SIGFPE
)|sigmask(SIGBUS
)|sigmask(SIGSEGV
)
89 |sigmask(SIGSYS
)|sigmask(SIGXCPU
)|sigmask(SIGXFSZ
)), 0, 0};
91 const k_sigset_t holdvfork
=
92 {(sigmask(SIGTTOU
)|sigmask(SIGTTIN
)|sigmask(SIGTSTP
)), 0, 0};
94 static int isjobstop(int);
95 static void post_sigcld(proc_t
*, sigqueue_t
*);
98 * Internal variables for counting number of user thread stop requests posted.
99 * They may not be accurate at some special situation such as that a virtually
100 * stopped thread starts to run.
102 static int num_utstop
;
104 * Internal variables for broadcasting an event when all thread stop requests
107 static kcondvar_t utstop_cv
;
109 static kmutex_t thread_stop_lock
;
110 void del_one_utstop(void);
113 * Send the specified signal to the specified process.
116 psignal(proc_t
*p
, int sig
)
118 mutex_enter(&p
->p_lock
);
119 sigtoproc(p
, NULL
, sig
);
120 mutex_exit(&p
->p_lock
);
124 * Send the specified signal to the specified thread.
127 tsignal(kthread_t
*t
, int sig
)
129 proc_t
*p
= ttoproc(t
);
131 mutex_enter(&p
->p_lock
);
132 sigtoproc(p
, t
, sig
);
133 mutex_exit(&p
->p_lock
);
137 signal_is_blocked(kthread_t
*t
, int sig
)
139 return (sigismember(&t
->t_hold
, sig
) ||
140 (schedctl_sigblock(t
) && !sigismember(&cantmask
, sig
)));
144 * Return true if the signal can safely be discarded on generation.
145 * That is, if there is no need for the signal on the receiving end.
146 * The answer is true if the process is a zombie or
147 * if all of these conditions are true:
148 * the signal is being ignored
149 * the process is single-threaded
150 * the signal is not being traced by /proc
151 * the signal is not blocked by the process
152 * the signal is not being accepted via sigwait()
155 sig_discardable(proc_t
*p
, int sig
)
157 kthread_t
*t
= p
->p_tlist
;
159 return (t
== NULL
|| /* if zombie or ... */
160 (sigismember(&p
->p_ignore
, sig
) && /* signal is ignored */
161 t
->t_forw
== t
&& /* and single-threaded */
162 !tracing(p
, sig
) && /* and no /proc tracing */
163 !signal_is_blocked(t
, sig
) && /* and signal not blocked */
164 !sigismember(&t
->t_sigwait
, sig
))); /* and not being accepted */
168 * Return true if this thread is going to eat this signal soon.
169 * Note that, if the signal is SIGKILL, we force stopped threads to be
170 * set running (to make SIGKILL be a sure kill), but only if the process
171 * is not currently locked by /proc (the P_PR_LOCK flag). Code in /proc
172 * relies on the fact that a process will not change shape while P_PR_LOCK
173 * is set (it drops and reacquires p->p_lock while leaving P_PR_LOCK set).
174 * We wish that we could simply call prbarrier() below, in sigtoproc(), to
175 * ensure that the process is not locked by /proc, but prbarrier() drops
176 * and reacquires p->p_lock and dropping p->p_lock here would be damaging.
179 eat_signal(kthread_t
*t
, int sig
)
182 ASSERT(THREAD_LOCK_HELD(t
));
185 * Do not do anything if the target thread has the signal blocked.
187 if (!signal_is_blocked(t
, sig
)) {
188 t
->t_sig_check
= 1; /* have thread do an issig */
189 if (ISWAKEABLE(t
) || ISWAITING(t
)) {
192 } else if (t
->t_state
== TS_STOPPED
&& sig
== SIGKILL
&&
193 !(ttoproc(t
)->p_proc_flag
& P_PR_LOCK
)) {
194 ttoproc(t
)->p_stopsig
= 0;
195 t
->t_dtrace_stop
= 0;
196 t
->t_schedflag
|= TS_XSTART
| TS_PSTART
;
198 } else if (t
!= curthread
&& t
->t_state
== TS_ONPROC
) {
199 aston(t
); /* make it do issig promptly */
201 poke_cpu(t
->t_cpu
->cpu_id
);
203 } else if (t
->t_state
== TS_RUN
) {
213 * If a non-null thread pointer is passed, then post the signal
214 * to the thread/lwp, otherwise post the signal to the process.
217 sigtoproc(proc_t
*p
, kthread_t
*t
, int sig
)
220 int ext
= !(curproc
->p_flag
& SSYS
) &&
221 (curproc
->p_ct_process
!= p
->p_ct_process
);
223 ASSERT(MUTEX_HELD(&p
->p_lock
));
225 /* System processes don't get signals */
226 if (sig
<= 0 || sig
>= NSIG
|| (p
->p_flag
& SSYS
))
230 * Regardless of origin or directedness,
231 * SIGKILL kills all lwps in the process immediately
232 * and jobcontrol signals affect all lwps in the process.
234 if (sig
== SIGKILL
) {
235 p
->p_flag
|= SKILLED
| (ext
? SEXTKILLED
: 0);
237 } else if (sig
== SIGCONT
) {
239 * The SSCONT flag will remain set until a stopping
240 * signal comes in (below). This is harmless.
243 sigdelq(p
, NULL
, SIGSTOP
);
244 sigdelq(p
, NULL
, SIGTSTP
);
245 sigdelq(p
, NULL
, SIGTTOU
);
246 sigdelq(p
, NULL
, SIGTTIN
);
247 sigdiffset(&p
->p_sig
, &stopdefault
);
248 sigdiffset(&p
->p_extsig
, &stopdefault
);
250 if ((tt
= p
->p_tlist
) != NULL
) {
252 sigdelq(p
, tt
, SIGSTOP
);
253 sigdelq(p
, tt
, SIGTSTP
);
254 sigdelq(p
, tt
, SIGTTOU
);
255 sigdelq(p
, tt
, SIGTTIN
);
256 sigdiffset(&tt
->t_sig
, &stopdefault
);
257 sigdiffset(&tt
->t_extsig
, &stopdefault
);
258 } while ((tt
= tt
->t_forw
) != p
->p_tlist
);
260 if ((tt
= p
->p_tlist
) != NULL
) {
263 if (tt
->t_state
== TS_STOPPED
&&
264 tt
->t_whystop
== PR_JOBCONTROL
) {
265 tt
->t_schedflag
|= TS_XSTART
;
269 } while ((tt
= tt
->t_forw
) != p
->p_tlist
);
271 } else if (sigismember(&stopdefault
, sig
)) {
273 * This test has a race condition which we can't fix:
274 * By the time the stopping signal is received by
275 * the target process/thread, the signal handler
276 * and/or the detached state might have changed.
278 if (PTOU(p
)->u_signal
[sig
-1] == SIG_DFL
&&
279 (sig
== SIGSTOP
|| !p
->p_pgidp
->pid_pgorphaned
))
280 p
->p_flag
&= ~SSCONT
;
281 sigdelq(p
, NULL
, SIGCONT
);
282 sigdelset(&p
->p_sig
, SIGCONT
);
283 sigdelset(&p
->p_extsig
, SIGCONT
);
284 if ((tt
= p
->p_tlist
) != NULL
) {
286 sigdelq(p
, tt
, SIGCONT
);
287 sigdelset(&tt
->t_sig
, SIGCONT
);
288 sigdelset(&tt
->t_extsig
, SIGCONT
);
289 } while ((tt
= tt
->t_forw
) != p
->p_tlist
);
293 if (sig_discardable(p
, sig
)) {
294 DTRACE_PROC3(signal__discard
, kthread_t
*, p
->p_tlist
,
295 proc_t
*, p
, int, sig
);
301 * This is a directed signal, wake up the lwp.
303 sigaddset(&t
->t_sig
, sig
);
305 sigaddset(&t
->t_extsig
, sig
);
307 (void) eat_signal(t
, sig
);
309 DTRACE_PROC2(signal__send
, kthread_t
*, t
, int, sig
);
310 } else if ((tt
= p
->p_tlist
) != NULL
) {
312 * Make sure that some lwp that already exists
313 * in the process fields the signal soon.
314 * Wake up an interruptibly sleeping lwp if necessary.
315 * For SIGKILL make all of the lwps see the signal;
316 * This is needed to guarantee a sure kill for processes
317 * with a mix of realtime and non-realtime threads.
321 sigaddset(&p
->p_sig
, sig
);
323 sigaddset(&p
->p_extsig
, sig
);
326 if (eat_signal(tt
, sig
) && sig
!= SIGKILL
) {
333 } while ((tt
= tt
->t_forw
) != p
->p_tlist
);
335 * If the process is deadlocked, make somebody run and die.
337 if (sig
== SIGKILL
&& p
->p_stat
!= SIDL
&&
338 p
->p_lwprcnt
== 0 && p
->p_lwpcnt
== su
&&
339 !(p
->p_proc_flag
& P_PR_LOCK
)) {
342 tt
->t_schedflag
|= TS_CSTART
;
347 DTRACE_PROC2(signal__send
, kthread_t
*, tt
, int, sig
);
354 proc_t
*p
= ttoproc(curthread
);
356 ASSERT(MUTEX_HELD(&p
->p_lock
));
358 if (PTOU(curproc
)->u_signal
[sig
-1] == SIG_DFL
&&
359 sigismember(&stopdefault
, sig
)) {
361 * If SIGCONT has been posted since we promoted this signal
362 * from pending to current, then don't do a jobcontrol stop.
364 if (!(p
->p_flag
& SSCONT
) &&
365 (sig
== SIGSTOP
|| !p
->p_pgidp
->pid_pgorphaned
) &&
366 curthread
!= p
->p_agenttp
) {
369 stop(PR_JOBCONTROL
, sig
);
370 mutex_exit(&p
->p_lock
);
371 sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
372 mutex_enter(&pidlock
);
374 * Only the first lwp to continue notifies the parent.
376 if (p
->p_pidflag
& CLDCONT
)
379 p
->p_pidflag
|= CLDCONT
;
380 p
->p_wcode
= CLD_CONTINUED
;
381 p
->p_wdata
= SIGCONT
;
384 mutex_exit(&pidlock
);
385 mutex_enter(&p
->p_lock
);
393 * Returns true if the current process has a signal to process, and
394 * the signal is not held. The signal to process is put in p_cursig.
395 * This is asked at least once each time a process enters the system
396 * (though this can usually be done without actually calling issig by
397 * checking the pending signal masks). A signal does not do anything
398 * directly to a process; it sets a flag that asks the process to do
399 * something to itself.
401 * The "why" argument indicates the allowable side-effects of the call:
403 * FORREAL: Extract the next pending signal from p_sig into p_cursig;
404 * stop the process if a stop has been requested or if a traced signal
407 * JUSTLOOKING: Don't stop the process, just indicate whether or not
408 * a signal might be pending (FORREAL is needed to tell for sure).
410 * XXX: Changes to the logic in these routines should be propagated
411 * to lm_sigispending(). See bug 1201594.
414 static int issig_forreal(void);
415 static int issig_justlooking(void);
420 ASSERT(why
== FORREAL
|| why
== JUSTLOOKING
);
422 return ((why
== FORREAL
)? issig_forreal() : issig_justlooking());
427 issig_justlooking(void)
429 kthread_t
*t
= curthread
;
430 klwp_t
*lwp
= ttolwp(t
);
431 proc_t
*p
= ttoproc(t
);
435 * This function answers the question:
436 * "Is there any reason to call issig_forreal()?"
438 * We have to answer the question w/o grabbing any locks
439 * because we are (most likely) being called after we
440 * put ourselves on the sleep queue.
443 if (t
->t_dtrace_stop
| t
->t_dtrace_sig
)
447 * Another piece of complexity in this process. When single-stepping a
448 * process, we don't want an intervening signal or TP_PAUSE request to
449 * suspend the current thread. Otherwise, the controlling process will
450 * hang beacuse we will be stopped with TS_PSTART set in t_schedflag.
451 * We will trigger any remaining signals when we re-enter the kernel on
452 * the single step trap.
454 if (lwp
->lwp_pcb
.pcb_flags
& NORMAL_STEP
)
457 if ((lwp
->lwp_asleep
&& MUSTRETURN(p
, t
)) ||
458 (p
->p_flag
& (SEXITLWPS
|SKILLED
)) ||
459 (lwp
->lwp_nostop
== 0 &&
460 (p
->p_stopsig
| (p
->p_flag
& (SHOLDFORK1
|SHOLDWATCH
)) |
462 (TP_PRSTOP
|TP_HOLDLWP
|TP_CHKPT
|TP_PAUSE
)))) ||
466 if (p
->p_flag
& SVFWAIT
)
469 sigorset(&set
, &t
->t_sig
);
470 if (schedctl_sigblock(t
)) /* all blockable signals blocked */
471 sigandset(&set
, &cantmask
);
473 sigdiffset(&set
, &t
->t_hold
);
474 if (p
->p_flag
& SVFORK
)
475 sigdiffset(&set
, &holdvfork
);
477 if (!sigisempty(&set
)) {
480 for (sig
= 1; sig
< NSIG
; sig
++) {
481 if (sigismember(&set
, sig
) &&
483 sigismember(&t
->t_sigwait
, sig
) ||
484 !sigismember(&p
->p_ignore
, sig
))) {
486 * Don't promote a signal that will stop
487 * the process when lwp_nostop is set.
489 if (!lwp
->lwp_nostop
||
490 PTOU(p
)->u_signal
[sig
-1] != SIG_DFL
||
491 !sigismember(&stopdefault
, sig
))
503 int sig
= 0, ext
= 0;
504 kthread_t
*t
= curthread
;
505 klwp_t
*lwp
= ttolwp(t
);
506 proc_t
*p
= ttoproc(t
);
508 int sigcld_found
= 0;
509 int nostop_break
= 0;
511 ASSERT(t
->t_state
== TS_ONPROC
);
513 mutex_enter(&p
->p_lock
);
514 schedctl_finish_sigblock(t
);
516 if (t
->t_dtrace_stop
| t
->t_dtrace_sig
) {
517 if (t
->t_dtrace_stop
) {
519 * If DTrace's "stop" action has been invoked on us,
522 t
->t_proc_flag
|= TP_PRSTOP
;
525 if (t
->t_dtrace_sig
!= 0) {
529 * Post the signal generated as the result of
530 * DTrace's "raise" action as a normal signal before
531 * the full-fledged signal checking begins.
533 bzero(&info
, sizeof (info
));
534 info
.si_signo
= t
->t_dtrace_sig
;
535 info
.si_code
= SI_DTRACE
;
537 sigaddq(p
, NULL
, &info
, KM_NOSLEEP
);
544 if (p
->p_flag
& (SEXITLWPS
|SKILLED
)) {
545 lwp
->lwp_cursig
= sig
= SIGKILL
;
546 lwp
->lwp_extsig
= ext
= (p
->p_flag
& SEXTKILLED
) != 0;
552 * Another piece of complexity in this process. When
553 * single-stepping a process, we don't want an intervening
554 * signal or TP_PAUSE request to suspend the current thread.
555 * Otherwise, the controlling process will hang beacuse we will
556 * be stopped with TS_PSTART set in t_schedflag. We will
557 * trigger any remaining signals when we re-enter the kernel on
558 * the single step trap.
560 if (lwp
->lwp_pcb
.pcb_flags
& NORMAL_STEP
) {
566 * Hold the lwp here for watchpoint manipulation.
568 if ((t
->t_proc_flag
& TP_PAUSE
) && !lwp
->lwp_nostop
) {
569 stop(PR_SUSPENDED
, SUSPEND_PAUSE
);
573 if (lwp
->lwp_asleep
&& MUSTRETURN(p
, t
)) {
574 if ((sig
= lwp
->lwp_cursig
) != 0) {
576 * Make sure we call ISSIG() in post_syscall()
577 * to re-validate this current signal.
585 * If the request is PR_CHECKPOINT, ignore the rest of signals
586 * or requests. Honor other stop requests or signals later.
587 * Go back to top of loop here to check if an exit or hold
588 * event has occurred while stopped.
590 if ((t
->t_proc_flag
& TP_CHKPT
) && !lwp
->lwp_nostop
) {
591 stop(PR_CHECKPOINT
, 0);
596 * Honor SHOLDFORK1, SHOLDWATCH, and TP_HOLDLWP before dealing
597 * with signals or /proc. Another lwp is executing fork1(),
598 * or is undergoing watchpoint activity (remapping a page),
599 * or is executing lwp_suspend() on this lwp.
600 * Again, go back to top of loop to check if an exit
601 * or hold event has occurred while stopped.
603 if (((p
->p_flag
& (SHOLDFORK1
|SHOLDWATCH
)) ||
604 (t
->t_proc_flag
& TP_HOLDLWP
)) && !lwp
->lwp_nostop
) {
605 stop(PR_SUSPENDED
, SUSPEND_NORMAL
);
610 * Honor requested stop before dealing with the
611 * current signal; a debugger may change it.
612 * Do not want to go back to loop here since this is a special
613 * stop that means: make incremental progress before the next
614 * stop. The danger is that returning to top of loop would most
615 * likely drop the thread right back here to stop soon after it
616 * was continued, violating the incremental progress request.
618 if ((t
->t_proc_flag
& TP_PRSTOP
) && !lwp
->lwp_nostop
)
619 stop(PR_REQUESTED
, 0);
622 * If a debugger wants us to take a signal it will have
623 * left it in lwp->lwp_cursig. If lwp_cursig has been cleared
624 * or if it's being ignored, we continue on looking for another
625 * signal. Otherwise we return the specified signal, provided
626 * it's not a signal that causes a job control stop.
628 * When stopped on PR_JOBCONTROL, there is no current
629 * signal; we cancel lwp->lwp_cursig temporarily before
630 * calling isjobstop(). The current signal may be reset
631 * by a debugger while we are stopped in isjobstop().
633 * If the current thread is accepting the signal
634 * (via sigwait(), sigwaitinfo(), or sigtimedwait()),
635 * we allow the signal to be accepted, even if it is
636 * being ignored, and without causing a job control stop.
638 if ((sig
= lwp
->lwp_cursig
) != 0) {
639 ext
= lwp
->lwp_extsig
;
642 if (sigismember(&t
->t_sigwait
, sig
) ||
643 (!sigismember(&p
->p_ignore
, sig
) &&
645 if (p
->p_flag
& (SEXITLWPS
|SKILLED
)) {
647 ext
= (p
->p_flag
& SEXTKILLED
) != 0;
649 lwp
->lwp_cursig
= (uchar_t
)sig
;
650 lwp
->lwp_extsig
= (uchar_t
)ext
;
654 * The signal is being ignored or it caused a
655 * job-control stop. If another current signal
656 * has not been established, return the current
657 * siginfo, if any, to the memory manager.
659 if (lwp
->lwp_cursig
== 0 && lwp
->lwp_curinfo
!= NULL
) {
660 siginfofree(lwp
->lwp_curinfo
);
661 lwp
->lwp_curinfo
= NULL
;
664 * Loop around again in case we were stopped
665 * on a job control signal and a /proc stop
666 * request was posted or another current signal
667 * was established while we were stopped.
672 if (p
->p_stopsig
&& !lwp
->lwp_nostop
&&
673 curthread
!= p
->p_agenttp
) {
675 * Some lwp in the process has already stopped
676 * showing PR_JOBCONTROL. This is a stop in
677 * sympathy with the other lwp, even if this
678 * lwp is blocking the stopping signal.
680 stop(PR_JOBCONTROL
, p
->p_stopsig
);
685 * Loop on the pending signals until we find a
686 * non-held signal that is traced or not ignored.
687 * First check the signals pending for the lwp,
688 * then the signals pending for the process as a whole.
691 if ((sig
= fsig(&t
->t_sig
, t
)) != 0) {
693 if (tracing(p
, sig
) ||
694 sigismember(&t
->t_sigwait
, sig
) ||
695 !sigismember(&p
->p_ignore
, sig
)) {
696 if (sigismember(&t
->t_extsig
, sig
))
700 sigdelset(&t
->t_sig
, sig
);
701 sigdelset(&t
->t_extsig
, sig
);
703 } else if ((sig
= fsig(&p
->p_sig
, t
)) != 0) {
707 if (tracing(p
, sig
) ||
708 sigismember(&t
->t_sigwait
, sig
) ||
709 !sigismember(&p
->p_ignore
, sig
)) {
710 if (sigismember(&p
->p_extsig
, sig
))
714 sigdelset(&p
->p_sig
, sig
);
715 sigdelset(&p
->p_extsig
, sig
);
716 sigdelq(p
, NULL
, sig
);
718 /* no signal was found */
723 if (sig
== 0) { /* no signal was found */
724 if (p
->p_flag
& (SEXITLWPS
|SKILLED
)) {
725 lwp
->lwp_cursig
= SIGKILL
;
727 ext
= (p
->p_flag
& SEXTKILLED
) != 0;
733 * If we have been informed not to stop (i.e., we are being
734 * called from within a network operation), then don't promote
735 * the signal at this time, just return the signal number.
736 * We will call issig() again later when it is safe.
738 * fsig() does not return a jobcontrol stopping signal
739 * with a default action of stopping the process if
740 * lwp_nostop is set, so we won't be causing a bogus
741 * EINTR by this action. (Such a signal is eaten by
742 * isjobstop() when we loop around to do final checks.)
744 if (lwp
->lwp_nostop
) {
750 * Promote the signal from pending to current.
752 * Note that sigdeq() will set lwp->lwp_curinfo to NULL
753 * if no siginfo_t exists for this signal.
755 lwp
->lwp_cursig
= (uchar_t
)sig
;
756 lwp
->lwp_extsig
= (uchar_t
)ext
;
757 t
->t_sig_check
= 1; /* so post_syscall will see signal */
758 ASSERT(lwp
->lwp_curinfo
== NULL
);
759 sigdeq(p
, toproc
? NULL
: t
, sig
, &lwp
->lwp_curinfo
);
762 stop(PR_SIGNALLED
, sig
);
765 * Loop around to check for requested stop before
766 * performing the usual current-signal actions.
770 mutex_exit(&p
->p_lock
);
773 * If SIGCLD was dequeued from the process's signal queue,
774 * search for other pending SIGCLD's from the list of children.
780 (void) undo_watch_step(NULL
);
783 * If we have been blocked since the p_lock was dropped off
784 * above, then this promoted signal might have been handled
785 * already when we were on the way back from sleep queue, so
787 * If we have been informed not to stop, just return the signal
788 * number. Also see comments above.
791 sig
= lwp
->lwp_cursig
;
798 * Return true if the process is currently stopped showing PR_JOBCONTROL.
799 * This is true only if all of the process's lwp's are so stopped.
800 * If this is asked by one of the lwps in the process, exclude that lwp.
803 jobstopped(proc_t
*p
)
807 ASSERT(MUTEX_HELD(&p
->p_lock
));
809 if ((t
= p
->p_tlist
) == NULL
)
814 /* ignore current, zombie and suspended lwps in the test */
815 if (!(t
== curthread
|| t
->t_state
== TS_ZOMB
||
817 (t
->t_state
!= TS_STOPPED
||
818 t
->t_whystop
!= PR_JOBCONTROL
)) {
823 } while ((t
= t
->t_forw
) != p
->p_tlist
);
829 * Put ourself (curthread) into the stopped state and notify tracers.
832 stop(int why
, int what
)
834 kthread_t
*t
= curthread
;
835 proc_t
*p
= ttoproc(t
);
836 klwp_t
*lwp
= ttolwp(t
);
840 int flags
= TS_ALLSTART
;
844 * Can't stop a system process.
846 if (p
== NULL
|| lwp
== NULL
|| (p
->p_flag
& SSYS
) || p
->p_as
== &kas
)
849 ASSERT(MUTEX_HELD(&p
->p_lock
));
851 if (why
!= PR_SUSPENDED
&& why
!= PR_CHECKPOINT
) {
853 * Don't stop an lwp with SIGKILL pending.
854 * Don't stop if the process or lwp is exiting.
856 if (lwp
->lwp_cursig
== SIGKILL
||
857 sigismember(&t
->t_sig
, SIGKILL
) ||
858 sigismember(&p
->p_sig
, SIGKILL
) ||
859 (t
->t_proc_flag
& TP_LWPEXIT
) ||
860 (p
->p_flag
& (SEXITLWPS
|SKILLED
))) {
862 t
->t_proc_flag
&= ~(TP_PRSTOP
|TP_PRVSTOP
);
868 * Make sure we don't deadlock on a recursive call to prstop().
869 * prstop() sets the lwp_nostop flag.
875 * Make sure the lwp is in an orderly state for inspection
876 * by a debugger through /proc or for dumping via core().
878 schedctl_finish_sigblock(t
);
879 t
->t_proc_flag
|= TP_STOPPING
; /* must set before dropping p_lock */
880 mutex_exit(&p
->p_lock
);
881 stoptime
= gethrtime();
883 (void) undo_watch_step(NULL
);
884 mutex_enter(&p
->p_lock
);
885 ASSERT(t
->t_state
== TS_ONPROC
);
890 * The situation may have changed since we dropped
891 * and reacquired p->p_lock. Double-check now
892 * whether we should stop or not.
894 if (!(t
->t_proc_flag
& TP_CHKPT
)) {
895 t
->t_proc_flag
&= ~TP_STOPPING
;
898 t
->t_proc_flag
&= ~TP_CHKPT
;
903 ASSERT(what
== SIGSTOP
|| what
== SIGTSTP
||
904 what
== SIGTTIN
|| what
== SIGTTOU
);
909 ASSERT(what
== SUSPEND_NORMAL
|| what
== SUSPEND_PAUSE
);
911 * The situation may have changed since we dropped
912 * and reacquired p->p_lock. Double-check now
913 * whether we should stop or not.
915 if (what
== SUSPEND_PAUSE
) {
916 if (!(t
->t_proc_flag
& TP_PAUSE
)) {
917 t
->t_proc_flag
&= ~TP_STOPPING
;
920 flags
&= ~TS_UNPAUSE
;
922 if (!((t
->t_proc_flag
& TP_HOLDLWP
) ||
923 (p
->p_flag
& (SHOLDFORK
|SHOLDFORK1
|SHOLDWATCH
)))) {
924 t
->t_proc_flag
&= ~TP_STOPPING
;
928 * If SHOLDFORK is in effect and we are stopping
929 * while asleep (not at the top of the stack),
930 * we return now to allow the hold to take effect
931 * when we reach the top of the kernel stack.
933 if (lwp
->lwp_asleep
&& (p
->p_flag
& SHOLDFORK
)) {
934 t
->t_proc_flag
&= ~TP_STOPPING
;
941 default: /* /proc stop */
944 * Do synchronous stop unless the async-stop flag is set.
945 * If why is PR_REQUESTED and t->t_dtrace_stop flag is set,
946 * then no debugger is present and we also do synchronous stop.
948 if ((why
!= PR_REQUESTED
|| t
->t_dtrace_stop
) &&
949 !(p
->p_proc_flag
& P_PR_ASYNC
)) {
952 for (tx
= t
->t_forw
; tx
!= t
; tx
= tx
->t_forw
) {
956 (tx
->t_proc_flag
& TP_PRSTOP
)) {
960 tx
->t_proc_flag
|= TP_PRSTOP
;
962 if (tx
->t_state
== TS_SLEEP
&&
963 (tx
->t_flag
& T_WAKEABLE
)) {
965 * Don't actually wake it up if it's
966 * in one of the lwp_*() syscalls.
967 * Mark it virtually stopped and
968 * notify /proc waiters (below).
970 if (tx
->t_wchan0
== NULL
)
973 tx
->t_proc_flag
|= TP_PRVSTOP
;
974 tx
->t_stoptime
= stoptime
;
979 /* Move waiting thread to run queue */
984 * force the thread into the kernel
985 * if it is not already there.
987 if (tx
->t_state
== TS_ONPROC
&&
989 poke_cpu(tx
->t_cpu
->cpu_id
);
991 lep
= p
->p_lwpdir
[tx
->t_dslot
].ld_entry
;
992 if (notify
&& lep
->le_trace
)
993 prnotify(lep
->le_trace
);
996 * We do this just in case one of the threads we asked
997 * to stop is in holdlwps() (called from cfork()) or
1000 cv_broadcast(&p
->p_holdlwps
);
1005 t
->t_stoptime
= stoptime
;
1007 if (why
== PR_JOBCONTROL
|| (why
== PR_SUSPENDED
&& p
->p_stopsig
)) {
1009 * Determine if the whole process is jobstopped.
1011 if (jobstopped(p
)) {
1015 if ((sig
= p
->p_stopsig
) == 0)
1016 p
->p_stopsig
= (uchar_t
)(sig
= what
);
1017 mutex_exit(&p
->p_lock
);
1018 sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
1019 mutex_enter(&pidlock
);
1021 * The last lwp to stop notifies the parent.
1022 * Turn off the CLDCONT flag now so the first
1023 * lwp to continue knows what to do.
1025 p
->p_pidflag
&= ~CLDCONT
;
1026 p
->p_wcode
= CLD_STOPPED
;
1030 * Grab p->p_lock before releasing pidlock so the
1031 * parent and the child don't have a race condition.
1033 mutex_enter(&p
->p_lock
);
1034 mutex_exit(&pidlock
);
1036 } else if (why
== PR_JOBCONTROL
&& p
->p_stopsig
== 0) {
1038 * Set p->p_stopsig and wake up sleeping lwps
1039 * so they will stop in sympathy with this lwp.
1041 p
->p_stopsig
= (uchar_t
)what
;
1044 * We do this just in case one of the threads we asked
1045 * to stop is in holdlwps() (called from cfork()) or
1048 cv_broadcast(&p
->p_holdlwps
);
1052 if (why
!= PR_JOBCONTROL
&& why
!= PR_CHECKPOINT
) {
1054 * Do process-level notification when all lwps are
1055 * either stopped on events of interest to /proc
1056 * or are stopped showing PR_SUSPENDED or are zombies.
1059 for (tx
= t
->t_forw
; procstop
&& tx
!= t
; tx
= tx
->t_forw
) {
1063 switch (tx
->t_state
) {
1067 /* neither ISTOPPED nor SUSPENDED? */
1068 if ((tx
->t_schedflag
&
1069 (TS_CSTART
| TS_UNPAUSE
| TS_PSTART
)) ==
1070 (TS_CSTART
| TS_UNPAUSE
| TS_PSTART
))
1074 /* not paused for watchpoints? */
1075 if (!(tx
->t_flag
& T_WAKEABLE
) ||
1076 tx
->t_wchan0
== NULL
||
1077 !(tx
->t_proc_flag
& TP_PAUSE
))
1087 /* there must not be any remapped watched pages now */
1088 ASSERT(p
->p_mapcnt
== 0);
1089 if (p
->p_proc_flag
& P_PR_PTRACE
) {
1090 /* ptrace() compatibility */
1091 mutex_exit(&p
->p_lock
);
1092 mutex_enter(&pidlock
);
1093 p
->p_wcode
= CLD_TRAPPED
;
1094 p
->p_wdata
= (why
== PR_SIGNALLED
)?
1096 cv_broadcast(&p
->p_parent
->p_cv
);
1098 * Grab p->p_lock before releasing pidlock so
1099 * parent and child don't have a race condition.
1101 mutex_enter(&p
->p_lock
);
1102 mutex_exit(&pidlock
);
1104 if (p
->p_trace
) /* /proc */
1105 prnotify(p
->p_trace
);
1106 cv_broadcast(&pr_pid_cv
[p
->p_slot
]); /* pauselwps() */
1107 cv_broadcast(&p
->p_holdlwps
); /* holdwatch() */
1109 if (why
!= PR_SUSPENDED
) {
1110 lep
= p
->p_lwpdir
[t
->t_dslot
].ld_entry
;
1111 if (lep
->le_trace
) /* /proc */
1112 prnotify(lep
->le_trace
);
1114 * Special notification for creation of the agent lwp.
1116 if (t
== p
->p_agenttp
&&
1117 (t
->t_proc_flag
& TP_PRSTOP
) &&
1119 prnotify(p
->p_trace
);
1121 * The situation may have changed since we dropped
1122 * and reacquired p->p_lock. Double-check now
1123 * whether we should stop or not.
1125 if (!(t
->t_proc_flag
& TP_STOPPING
)) {
1126 if (t
->t_proc_flag
& TP_PRSTOP
)
1127 t
->t_proc_flag
|= TP_STOPPING
;
1129 t
->t_proc_flag
&= ~(TP_PRSTOP
|TP_PRVSTOP
);
1134 if (why
== PR_SUSPENDED
) {
1137 * We always broadcast in the case of SUSPEND_PAUSE. This is
1138 * because checks for TP_PAUSE take precedence over checks for
1139 * SHOLDWATCH. If a thread is trying to stop because of
1140 * SUSPEND_PAUSE and tries to do a holdwatch(), it will be
1141 * waiting for the rest of the threads to enter a stopped state.
1142 * If we are stopping for a SUSPEND_PAUSE, we may be the last
1143 * lwp and not know it, so broadcast just in case.
1145 if (what
== SUSPEND_PAUSE
||
1146 --p
->p_lwprcnt
== 0 || (t
->t_proc_flag
& TP_HOLDLWP
))
1147 cv_broadcast(&p
->p_holdlwps
);
1152 * Need to do this here (rather than after the thread is officially
1153 * stopped) because we can't call mutex_enter from a stopped thread.
1155 if (why
== PR_CHECKPOINT
)
1159 ASSERT((t
->t_schedflag
& TS_ALLSTART
) == 0);
1160 t
->t_schedflag
|= flags
;
1161 t
->t_whystop
= (short)why
;
1162 t
->t_whatstop
= (short)what
;
1163 CL_STOP(t
, why
, what
);
1164 (void) new_mstate(t
, LMS_STOPPED
);
1165 thread_stop(t
); /* set stop state and drop lock */
1167 if (why
!= PR_SUSPENDED
&& why
!= PR_CHECKPOINT
) {
1169 * We may have gotten a SIGKILL or a SIGCONT when
1170 * we released p->p_lock; make one last check.
1171 * Also check for a /proc run-on-last-close.
1173 if (sigismember(&t
->t_sig
, SIGKILL
) ||
1174 sigismember(&p
->p_sig
, SIGKILL
) ||
1175 (t
->t_proc_flag
& TP_LWPEXIT
) ||
1176 (p
->p_flag
& (SEXITLWPS
|SKILLED
))) {
1179 t
->t_schedflag
|= TS_XSTART
| TS_PSTART
;
1181 thread_unlock_nopreempt(t
);
1182 } else if (why
== PR_JOBCONTROL
) {
1183 if (p
->p_flag
& SSCONT
) {
1185 * This resulted from a SIGCONT posted
1186 * while we were not holding p->p_lock.
1190 t
->t_schedflag
|= TS_XSTART
;
1192 thread_unlock_nopreempt(t
);
1194 } else if (!(t
->t_proc_flag
& TP_STOPPING
)) {
1196 * This resulted from a /proc run-on-last-close.
1199 t
->t_schedflag
|= TS_PSTART
;
1201 thread_unlock_nopreempt(t
);
1205 t
->t_proc_flag
&= ~TP_STOPPING
;
1206 mutex_exit(&p
->p_lock
);
1209 setallwatch(); /* reestablish any watchpoints set while stopped */
1210 mutex_enter(&p
->p_lock
);
1211 prbarrier(p
); /* barrier against /proc locking */
1214 /* Interface for resetting user thread stop count. */
1218 mutex_enter(&thread_stop_lock
);
1220 mutex_exit(&thread_stop_lock
);
1223 /* Interface for registering a user thread stop request. */
1225 add_one_utstop(void)
1227 mutex_enter(&thread_stop_lock
);
1229 mutex_exit(&thread_stop_lock
);
1232 /* Interface for cancelling a user thread stop request */
1234 del_one_utstop(void)
1236 mutex_enter(&thread_stop_lock
);
1238 if (num_utstop
== 0)
1239 cv_broadcast(&utstop_cv
);
1240 mutex_exit(&thread_stop_lock
);
1243 /* Interface to wait for all user threads to be stopped */
1245 utstop_timedwait(clock_t ticks
)
1247 mutex_enter(&thread_stop_lock
);
1249 (void) cv_reltimedwait(&utstop_cv
, &thread_stop_lock
, ticks
,
1251 mutex_exit(&thread_stop_lock
);
1255 * Perform the action specified by the current signal.
1256 * The usual sequence is:
1259 * The signal bit has already been cleared by issig(),
1260 * the current signal number has been stored in lwp_cursig,
1261 * and the current siginfo is now referenced by lwp_curinfo.
1266 kthread_t
*t
= curthread
;
1267 proc_t
*p
= ttoproc(t
);
1268 klwp_t
*lwp
= ttolwp(t
);
1270 int sig
, rc
, code
, ext
;
1273 zoneid_t zoneid
= -1;
1274 sigqueue_t
*sqp
= NULL
;
1275 uint32_t auditing
= AU_AUDITING();
1277 mutex_enter(&p
->p_lock
);
1278 schedctl_finish_sigblock(t
);
1281 if (p
->p_flag
& SEXITLWPS
) {
1283 return; /* not reached */
1285 sig
= lwp
->lwp_cursig
;
1286 ext
= lwp
->lwp_extsig
;
1291 * Re-check lwp_cursig after we acquire p_lock. Since p_lock was
1292 * dropped between issig() and psig(), a debugger may have cleared
1293 * lwp_cursig via /proc in the intervening window.
1296 if (lwp
->lwp_curinfo
) {
1297 siginfofree(lwp
->lwp_curinfo
);
1298 lwp
->lwp_curinfo
= NULL
;
1300 if (t
->t_flag
& T_TOMASK
) { /* sigsuspend or pollsys */
1301 t
->t_flag
&= ~T_TOMASK
;
1302 t
->t_hold
= lwp
->lwp_sigoldmask
;
1304 mutex_exit(&p
->p_lock
);
1307 func
= PTOU(curproc
)->u_signal
[sig
-1];
1310 * The signal disposition could have changed since we promoted
1311 * this signal from pending to current (we dropped p->p_lock).
1312 * This can happen only in a multi-threaded process.
1314 if (sigismember(&p
->p_ignore
, sig
) ||
1315 (func
== SIG_DFL
&& sigismember(&stopdefault
, sig
))) {
1316 lwp
->lwp_cursig
= 0;
1317 lwp
->lwp_extsig
= 0;
1318 if (lwp
->lwp_curinfo
) {
1319 siginfofree(lwp
->lwp_curinfo
);
1320 lwp
->lwp_curinfo
= NULL
;
1322 if (t
->t_flag
& T_TOMASK
) { /* sigsuspend or pollsys */
1323 t
->t_flag
&= ~T_TOMASK
;
1324 t
->t_hold
= lwp
->lwp_sigoldmask
;
1326 mutex_exit(&p
->p_lock
);
1331 * We check lwp_curinfo first since pr_setsig can actually
1332 * stuff a sigqueue_t there for SIGKILL.
1334 if (lwp
->lwp_curinfo
) {
1335 sqp
= lwp
->lwp_curinfo
;
1336 } else if (sig
== SIGKILL
&& p
->p_killsqp
) {
1341 if (SI_FROMUSER(&sqp
->sq_info
)) {
1342 pid
= sqp
->sq_info
.si_pid
;
1343 ctid
= sqp
->sq_info
.si_ctid
;
1344 zoneid
= sqp
->sq_info
.si_zoneid
;
1347 * If we have a sigqueue_t, its sq_external value
1348 * trumps the lwp_extsig value. It is theoretically
1349 * possible to make lwp_extsig reflect reality, but it
1350 * would unnecessarily complicate things elsewhere.
1352 ext
= sqp
->sq_external
;
1355 if (func
== SIG_DFL
) {
1356 mutex_exit(&p
->p_lock
);
1357 DTRACE_PROC3(signal__handle
, int, sig
, k_siginfo_t
*,
1358 NULL
, void (*)(void), func
);
1360 k_siginfo_t
*sip
= NULL
;
1363 * If DTrace user-land tracing is active, give DTrace a
1364 * chance to defer the signal until after tracing is
1367 if (t
->t_dtrace_on
&& dtrace_safe_defer_signal()) {
1368 mutex_exit(&p
->p_lock
);
1373 * save siginfo pointer here, in case the
1374 * the signal's reset bit is on
1376 * The presence of a current signal prevents paging
1377 * from succeeding over a network. We copy the current
1378 * signal information to the side and cancel the current
1379 * signal so that sendsig() will succeed.
1381 if (sigismember(&p
->p_siginfo
, sig
)) {
1382 sip
= &lwp
->lwp_siginfo
;
1384 bcopy(&sqp
->sq_info
, sip
, sizeof (*sip
));
1386 * If we were interrupted out of a system call
1387 * due to pthread_cancel(), inform libc.
1389 if (sig
== SIGCANCEL
&&
1390 sip
->si_code
== SI_LWP
&&
1392 schedctl_cancel_eintr();
1393 } else if (sig
== SIGPROF
&& sip
->si_signo
== SIGPROF
&&
1394 t
->t_rprof
!= NULL
&& t
->t_rprof
->rp_anystate
) {
1397 bzero(sip
, sizeof (*sip
));
1398 sip
->si_signo
= sig
;
1399 sip
->si_code
= SI_NOINFO
;
1403 if (t
->t_flag
& T_TOMASK
)
1404 t
->t_flag
&= ~T_TOMASK
;
1406 lwp
->lwp_sigoldmask
= t
->t_hold
;
1407 sigorset(&t
->t_hold
, &PTOU(curproc
)->u_sigmask
[sig
-1]);
1408 if (!sigismember(&PTOU(curproc
)->u_signodefer
, sig
))
1409 sigaddset(&t
->t_hold
, sig
);
1410 if (sigismember(&PTOU(curproc
)->u_sigresethand
, sig
))
1411 setsigact(sig
, SIG_DFL
, &nullsmask
, 0);
1413 DTRACE_PROC3(signal__handle
, int, sig
, k_siginfo_t
*,
1414 sip
, void (*)(void), func
);
1416 lwp
->lwp_cursig
= 0;
1417 lwp
->lwp_extsig
= 0;
1418 if (lwp
->lwp_curinfo
) {
1419 /* p->p_killsqp is freed by freeproc */
1420 siginfofree(lwp
->lwp_curinfo
);
1421 lwp
->lwp_curinfo
= NULL
;
1423 mutex_exit(&p
->p_lock
);
1424 lwp
->lwp_ru
.nsignals
++;
1426 if (p
->p_model
== DATAMODEL_NATIVE
)
1427 rc
= sendsig(sig
, sip
, func
);
1428 #ifdef _SYSCALL32_IMPL
1430 rc
= sendsig32(sig
, sip
, func
);
1431 #endif /* _SYSCALL32_IMPL */
1434 sig
= lwp
->lwp_cursig
= SIGSEGV
;
1435 ext
= 0; /* lwp_extsig was set above */
1440 if (sigismember(&coredefault
, sig
)) {
1442 * Terminate all LWPs but don't discard them.
1443 * If another lwp beat us to the punch by calling exit(),
1447 if (exitlwps(1) != 0) {
1448 mutex_enter(&p
->p_lock
);
1451 /* if we got a SIGKILL from anywhere, no core dump */
1452 if (p
->p_flag
& SKILLED
) {
1454 ext
= (p
->p_flag
& SEXTKILLED
) != 0;
1456 if (auditing
) /* audit core dump */
1457 audit_core_start(sig
);
1458 if (core(sig
, ext
) == 0)
1460 if (auditing
) /* audit core dump */
1461 audit_core_finish(code
);
1466 * Generate a contract event once if the process is killed
1471 if (exitlwps(0) != 0) {
1472 mutex_enter(&p
->p_lock
);
1475 contract_process_sig(p
->p_ct_process
, p
, sig
, pid
, ctid
,
1483 * Find next unheld signal in ssp for thread t.
1486 fsig(k_sigset_t
*ssp
, kthread_t
*t
)
1488 proc_t
*p
= ttoproc(t
);
1489 user_t
*up
= PTOU(p
);
1493 ASSERT(MUTEX_HELD(&p
->p_lock
));
1496 * Don't promote any signals for the parent of a vfork()d
1497 * child that hasn't yet released the parent's memory.
1499 if (p
->p_flag
& SVFWAIT
)
1503 sigdiffset(&temp
, &t
->t_hold
);
1506 * Don't promote stopping signals (except SIGSTOP) for a child
1507 * of vfork() that hasn't yet released the parent's memory.
1509 if (p
->p_flag
& SVFORK
)
1510 sigdiffset(&temp
, &holdvfork
);
1513 * Don't promote a signal that will stop
1514 * the process when lwp_nostop is set.
1516 if (ttolwp(t
)->lwp_nostop
) {
1517 sigdelset(&temp
, SIGSTOP
);
1518 if (!p
->p_pgidp
->pid_pgorphaned
) {
1519 if (up
->u_signal
[SIGTSTP
-1] == SIG_DFL
)
1520 sigdelset(&temp
, SIGTSTP
);
1521 if (up
->u_signal
[SIGTTIN
-1] == SIG_DFL
)
1522 sigdelset(&temp
, SIGTTIN
);
1523 if (up
->u_signal
[SIGTTOU
-1] == SIG_DFL
)
1524 sigdelset(&temp
, SIGTTOU
);
1529 * Choose SIGKILL and SIGPROF before all other pending signals.
1530 * The rest are promoted in signal number order.
1532 if (sigismember(&temp
, SIGKILL
))
1534 if (sigismember(&temp
, SIGPROF
))
1537 for (i
= 0; i
< sizeof (temp
) / sizeof (temp
.__sigbits
[0]); i
++) {
1538 if (temp
.__sigbits
[i
])
1539 return ((i
* NBBY
* sizeof (temp
.__sigbits
[0])) +
1540 lowbit(temp
.__sigbits
[i
]));
1547 setsigact(int sig
, void (*disp
)(), const k_sigset_t
*mask
, int flags
)
1549 proc_t
*p
= ttoproc(curthread
);
1552 ASSERT(MUTEX_HELD(&p
->p_lock
));
1554 PTOU(curproc
)->u_signal
[sig
- 1] = disp
;
1557 * Honor the SA_SIGINFO flag if the signal is being caught.
1558 * Force the SA_SIGINFO flag if the signal is not being caught.
1559 * This is necessary to make sigqueue() and sigwaitinfo() work
1560 * properly together when the signal is set to default or is
1561 * being temporarily ignored.
1563 if ((flags
& SA_SIGINFO
) || disp
== SIG_DFL
|| disp
== SIG_IGN
)
1564 sigaddset(&p
->p_siginfo
, sig
);
1566 sigdelset(&p
->p_siginfo
, sig
);
1568 if (disp
!= SIG_DFL
&& disp
!= SIG_IGN
) {
1569 sigdelset(&p
->p_ignore
, sig
);
1570 PTOU(curproc
)->u_sigmask
[sig
- 1] = *mask
;
1571 if (!sigismember(&cantreset
, sig
)) {
1572 if (flags
& SA_RESETHAND
)
1573 sigaddset(&PTOU(curproc
)->u_sigresethand
, sig
);
1575 sigdelset(&PTOU(curproc
)->u_sigresethand
, sig
);
1577 if (flags
& SA_NODEFER
)
1578 sigaddset(&PTOU(curproc
)->u_signodefer
, sig
);
1580 sigdelset(&PTOU(curproc
)->u_signodefer
, sig
);
1581 if (flags
& SA_RESTART
)
1582 sigaddset(&PTOU(curproc
)->u_sigrestart
, sig
);
1584 sigdelset(&PTOU(curproc
)->u_sigrestart
, sig
);
1585 if (flags
& SA_ONSTACK
)
1586 sigaddset(&PTOU(curproc
)->u_sigonstack
, sig
);
1588 sigdelset(&PTOU(curproc
)->u_sigonstack
, sig
);
1589 } else if (disp
== SIG_IGN
||
1590 (disp
== SIG_DFL
&& sigismember(&ignoredefault
, sig
))) {
1592 * Setting the signal action to SIG_IGN results in the
1593 * discarding of all pending signals of that signal number.
1594 * Setting the signal action to SIG_DFL does the same *only*
1595 * if the signal's default behavior is to be ignored.
1597 sigaddset(&p
->p_ignore
, sig
);
1598 sigdelset(&p
->p_sig
, sig
);
1599 sigdelset(&p
->p_extsig
, sig
);
1600 sigdelq(p
, NULL
, sig
);
1603 sigdelset(&t
->t_sig
, sig
);
1604 sigdelset(&t
->t_extsig
, sig
);
1606 } while ((t
= t
->t_forw
) != p
->p_tlist
);
1609 * The signal action is being set to SIG_DFL and the default
1610 * behavior is to do something: make sure it is not ignored.
1612 sigdelset(&p
->p_ignore
, sig
);
1615 if (sig
== SIGCLD
) {
1616 if (flags
& SA_NOCLDWAIT
)
1617 p
->p_flag
|= SNOWAIT
;
1619 p
->p_flag
&= ~SNOWAIT
;
1621 if (flags
& SA_NOCLDSTOP
)
1622 p
->p_flag
&= ~SJCTL
;
1626 if ((p
->p_flag
& SNOWAIT
) || disp
== SIG_IGN
) {
1629 mutex_exit(&p
->p_lock
);
1630 mutex_enter(&pidlock
);
1631 for (cp
= p
->p_child
; cp
!= NULL
; cp
= tp
) {
1633 if (cp
->p_stat
== SZOMB
&&
1634 !(cp
->p_pidflag
& CLDWAITPID
))
1637 mutex_exit(&pidlock
);
1638 mutex_enter(&p
->p_lock
);
1644 * Set all signal actions not already set to SIG_DFL or SIG_IGN to SIG_DFL.
1645 * Called from exec_common() for a process undergoing execve()
1646 * and from cfork() for a newly-created child of vfork().
1647 * In the vfork() case, 'p' is not the current process.
1648 * In both cases, there is only one thread in the process.
1651 sigdefault(proc_t
*p
)
1653 kthread_t
*t
= p
->p_tlist
;
1654 struct user
*up
= PTOU(p
);
1657 ASSERT(MUTEX_HELD(&p
->p_lock
));
1659 for (sig
= 1; sig
< NSIG
; sig
++) {
1660 if (up
->u_signal
[sig
- 1] != SIG_DFL
&&
1661 up
->u_signal
[sig
- 1] != SIG_IGN
) {
1662 up
->u_signal
[sig
- 1] = SIG_DFL
;
1663 sigemptyset(&up
->u_sigmask
[sig
- 1]);
1664 if (sigismember(&ignoredefault
, sig
)) {
1665 sigdelq(p
, NULL
, sig
);
1669 p
->p_flag
&= ~(SNOWAIT
|SJCTL
);
1672 sigorset(&p
->p_ignore
, &ignoredefault
);
1673 sigfillset(&p
->p_siginfo
);
1674 sigdiffset(&p
->p_siginfo
, &cantmask
);
1675 sigdiffset(&p
->p_sig
, &ignoredefault
);
1676 sigdiffset(&p
->p_extsig
, &ignoredefault
);
1677 sigdiffset(&t
->t_sig
, &ignoredefault
);
1678 sigdiffset(&t
->t_extsig
, &ignoredefault
);
1682 sigcld(proc_t
*cp
, sigqueue_t
*sqp
)
1684 proc_t
*pp
= cp
->p_parent
;
1686 ASSERT(MUTEX_HELD(&pidlock
));
1688 switch (cp
->p_wcode
) {
1692 ASSERT(cp
->p_stat
== SZOMB
);
1694 * The broadcast on p_srwchan_cv is a kludge to
1695 * wakeup a possible thread in uadmin(A_SHUTDOWN).
1697 cv_broadcast(&cp
->p_srwchan_cv
);
1700 * Add to newstate list of the parent
1704 cv_broadcast(&pp
->p_cv
);
1705 if ((pp
->p_flag
& SNOWAIT
) ||
1706 PTOU(pp
)->u_signal
[SIGCLD
- 1] == SIG_IGN
) {
1707 if (!(cp
->p_pidflag
& CLDWAITPID
))
1709 } else if (!(cp
->p_pidflag
& CLDNOSIGCHLD
)) {
1710 post_sigcld(cp
, sqp
);
1717 cv_broadcast(&pp
->p_cv
);
1718 if (pp
->p_flag
& SJCTL
) {
1719 post_sigcld(cp
, sqp
);
1730 * Common code called from sigcld() and from
1731 * waitid() and issig_forreal() via sigcld_repost().
1732 * Give the parent process a SIGCLD if it does not have one pending,
1733 * else mark the child process so a SIGCLD can be posted later.
1736 post_sigcld(proc_t
*cp
, sigqueue_t
*sqp
)
1738 proc_t
*pp
= cp
->p_parent
;
1741 ASSERT(MUTEX_HELD(&pidlock
));
1742 mutex_enter(&pp
->p_lock
);
1745 * If a SIGCLD is pending, then just mark the child process
1746 * so that its SIGCLD will be posted later, when the first
1747 * SIGCLD is taken off the queue or when the parent is ready
1748 * to receive it or accept it, if ever.
1750 if (sigismember(&pp
->p_sig
, SIGCLD
)) {
1751 cp
->p_pidflag
|= CLDPEND
;
1753 cp
->p_pidflag
&= ~CLDPEND
;
1756 * This can only happen when the parent is init.
1757 * (See call to sigcld(q, NULL) in exit().)
1758 * Use KM_NOSLEEP to avoid deadlock.
1760 ASSERT(pp
== proc_init
);
1761 winfo(cp
, &info
, 0);
1762 sigaddq(pp
, NULL
, &info
, KM_NOSLEEP
);
1764 winfo(cp
, &sqp
->sq_info
, 0);
1765 sigaddqa(pp
, NULL
, sqp
);
1770 mutex_exit(&pp
->p_lock
);
1777 * Search for a child that has a pending SIGCLD for us, the parent.
1778 * The queue of SIGCLD signals is implied by the list of children.
1779 * We post the SIGCLD signals one at a time so they don't get lost.
1780 * When one is dequeued, another is enqueued, until there are no more.
1785 proc_t
*pp
= curproc
;
1789 sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
1790 mutex_enter(&pidlock
);
1791 for (cp
= pp
->p_child
; cp
; cp
= cp
->p_sibling
) {
1792 if (cp
->p_pidflag
& CLDPEND
) {
1793 post_sigcld(cp
, sqp
);
1794 mutex_exit(&pidlock
);
1798 mutex_exit(&pidlock
);
1799 kmem_free(sqp
, sizeof (sigqueue_t
));
1803 * count number of sigqueue send by sigaddqa()
1806 sigqsend(int cmd
, proc_t
*p
, kthread_t
*t
, sigqueue_t
*sigqp
)
1810 sqh
= (sigqhdr_t
*)sigqp
->sq_backptr
;
1813 mutex_enter(&sqh
->sqb_lock
);
1815 mutex_exit(&sqh
->sqb_lock
);
1818 sigaddqa(p
, t
, sigqp
);
1824 sigsendproc(proc_t
*p
, sigsend_t
*pv
)
1827 proc_t
*myprocp
= curproc
;
1829 ASSERT(MUTEX_HELD(&pidlock
));
1831 if (p
->p_pid
== 1 && pv
->sig
&& sigismember(&cantmask
, pv
->sig
))
1836 if (pv
->checkperm
== 0 ||
1837 (pv
->sig
== SIGCONT
&& p
->p_sessp
== myprocp
->p_sessp
) ||
1838 prochasprocperm(p
, myprocp
, cr
)) {
1841 /* Make sure we should be setting si_pid and friends */
1842 ASSERT(pv
->sicode
<= 0);
1843 if (SI_CANQUEUE(pv
->sicode
)) {
1846 mutex_enter(&myprocp
->p_lock
);
1847 sqp
= sigqalloc(myprocp
->p_sigqhdr
);
1848 mutex_exit(&myprocp
->p_lock
);
1851 sqp
->sq_info
.si_signo
= pv
->sig
;
1852 sqp
->sq_info
.si_code
= pv
->sicode
;
1853 sqp
->sq_info
.si_pid
= myprocp
->p_pid
;
1854 sqp
->sq_info
.si_ctid
= PRCTID(myprocp
);
1855 sqp
->sq_info
.si_zoneid
= getzoneid();
1856 sqp
->sq_info
.si_uid
= crgetruid(cr
);
1857 sqp
->sq_info
.si_value
= pv
->value
;
1858 mutex_enter(&p
->p_lock
);
1859 sigqsend(SN_SEND
, p
, NULL
, sqp
);
1860 mutex_exit(&p
->p_lock
);
1863 bzero(&info
, sizeof (info
));
1864 info
.si_signo
= pv
->sig
;
1865 info
.si_code
= pv
->sicode
;
1866 info
.si_pid
= myprocp
->p_pid
;
1867 info
.si_ctid
= PRCTID(myprocp
);
1868 info
.si_zoneid
= getzoneid();
1869 info
.si_uid
= crgetruid(cr
);
1870 mutex_enter(&p
->p_lock
);
1872 * XXX: Should be KM_SLEEP but
1873 * we have to avoid deadlock.
1875 sigaddq(p
, NULL
, &info
, KM_NOSLEEP
);
1876 mutex_exit(&p
->p_lock
);
1885 sigsendset(procset_t
*psp
, sigsend_t
*pv
)
1889 error
= dotoprocs(psp
, sigsendproc
, (char *)pv
);
1890 if (error
== 0 && pv
->perm
== 0)
1897 * Dequeue a queued siginfo structure.
1898 * If a non-null thread pointer is passed then dequeue from
1899 * the thread queue, otherwise dequeue from the process queue.
1902 sigdeq(proc_t
*p
, kthread_t
*t
, int sig
, sigqueue_t
**qpp
)
1904 sigqueue_t
**psqp
, *sqp
;
1906 ASSERT(MUTEX_HELD(&p
->p_lock
));
1911 sigdelset(&t
->t_sig
, sig
);
1912 sigdelset(&t
->t_extsig
, sig
);
1913 psqp
= &t
->t_sigqueue
;
1915 sigdelset(&p
->p_sig
, sig
);
1916 sigdelset(&p
->p_extsig
, sig
);
1917 psqp
= &p
->p_sigqueue
;
1921 if ((sqp
= *psqp
) == NULL
)
1923 if (sqp
->sq_info
.si_signo
== sig
)
1926 psqp
= &sqp
->sq_next
;
1929 *psqp
= sqp
->sq_next
;
1930 for (sqp
= *psqp
; sqp
; sqp
= sqp
->sq_next
) {
1931 if (sqp
->sq_info
.si_signo
== sig
) {
1932 if (t
!= (kthread_t
*)NULL
) {
1933 sigaddset(&t
->t_sig
, sig
);
1936 sigaddset(&p
->p_sig
, sig
);
1945 * Delete a queued SIGCLD siginfo structure matching the k_siginfo_t argument.
1948 sigcld_delete(k_siginfo_t
*ip
)
1950 proc_t
*p
= curproc
;
1951 int another_sigcld
= 0;
1952 sigqueue_t
**psqp
, *sqp
;
1954 ASSERT(ip
->si_signo
== SIGCLD
);
1956 mutex_enter(&p
->p_lock
);
1958 if (!sigismember(&p
->p_sig
, SIGCLD
)) {
1959 mutex_exit(&p
->p_lock
);
1963 psqp
= &p
->p_sigqueue
;
1965 if ((sqp
= *psqp
) == NULL
) {
1966 mutex_exit(&p
->p_lock
);
1969 if (sqp
->sq_info
.si_signo
== SIGCLD
) {
1970 if (sqp
->sq_info
.si_pid
== ip
->si_pid
&&
1971 sqp
->sq_info
.si_code
== ip
->si_code
&&
1972 sqp
->sq_info
.si_status
== ip
->si_status
)
1976 psqp
= &sqp
->sq_next
;
1978 *psqp
= sqp
->sq_next
;
1982 for (sqp
= *psqp
; !another_sigcld
&& sqp
; sqp
= sqp
->sq_next
) {
1983 if (sqp
->sq_info
.si_signo
== SIGCLD
)
1987 if (!another_sigcld
) {
1988 sigdelset(&p
->p_sig
, SIGCLD
);
1989 sigdelset(&p
->p_extsig
, SIGCLD
);
1992 mutex_exit(&p
->p_lock
);
1996 * Delete queued siginfo structures.
1997 * If a non-null thread pointer is passed then delete from
1998 * the thread queue, otherwise delete from the process queue.
2001 sigdelq(proc_t
*p
, kthread_t
*t
, int sig
)
2003 sigqueue_t
**psqp
, *sqp
;
2006 * We must be holding p->p_lock unless the process is
2007 * being reaped or has failed to get started on fork.
2009 ASSERT(MUTEX_HELD(&p
->p_lock
) ||
2010 p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
);
2012 if (t
!= (kthread_t
*)NULL
)
2013 psqp
= &t
->t_sigqueue
;
2015 psqp
= &p
->p_sigqueue
;
2019 if (sig
== 0 || sqp
->sq_info
.si_signo
== sig
) {
2020 *psqp
= sqp
->sq_next
;
2023 psqp
= &sqp
->sq_next
;
2028 * Insert a siginfo structure into a queue.
2029 * If a non-null thread pointer is passed then add to the thread queue,
2030 * otherwise add to the process queue.
2032 * The function sigaddqins() is called with sigqueue already allocated.
2033 * It is called from sigaddqa() and sigaddq() below.
2035 * The value of si_code implicitly indicates whether sigp is to be
2036 * explicitly queued, or to be queued to depth one.
2039 sigaddqins(proc_t
*p
, kthread_t
*t
, sigqueue_t
*sigqp
)
2042 int sig
= sigqp
->sq_info
.si_signo
;
2044 sigqp
->sq_external
= (curproc
!= &p0
) &&
2045 (curproc
->p_ct_process
!= p
->p_ct_process
);
2048 * issig_forreal() doesn't bother dequeueing signals if SKILLED
2049 * is set, and even if it did, we would want to avoid situation
2050 * (which would be unique to SIGKILL) where one thread dequeued
2051 * the sigqueue_t and another executed psig(). So we create a
2052 * separate stash for SIGKILL's sigqueue_t. Because a second
2053 * SIGKILL can set SEXTKILLED, we overwrite the existing entry
2054 * if (and only if) it was non-extracontractual.
2056 if (sig
== SIGKILL
) {
2057 if (p
->p_killsqp
== NULL
|| !p
->p_killsqp
->sq_external
) {
2058 if (p
->p_killsqp
!= NULL
)
2059 siginfofree(p
->p_killsqp
);
2060 p
->p_killsqp
= sigqp
;
2061 sigqp
->sq_next
= NULL
;
2068 ASSERT(sig
>= 1 && sig
< NSIG
);
2069 if (t
!= NULL
) /* directed to a thread */
2070 psqp
= &t
->t_sigqueue
;
2071 else /* directed to a process */
2072 psqp
= &p
->p_sigqueue
;
2073 if (SI_CANQUEUE(sigqp
->sq_info
.si_code
) &&
2074 sigismember(&p
->p_siginfo
, sig
)) {
2075 for (; *psqp
!= NULL
; psqp
= &(*psqp
)->sq_next
)
2078 for (; *psqp
!= NULL
; psqp
= &(*psqp
)->sq_next
) {
2079 if ((*psqp
)->sq_info
.si_signo
== sig
) {
2086 sigqp
->sq_next
= NULL
;
2090 * The function sigaddqa() is called with sigqueue already allocated.
2091 * If signal is ignored, discard but guarantee KILL and generation semantics.
2092 * It is called from sigqueue() and other places.
2095 sigaddqa(proc_t
*p
, kthread_t
*t
, sigqueue_t
*sigqp
)
2097 int sig
= sigqp
->sq_info
.si_signo
;
2099 ASSERT(MUTEX_HELD(&p
->p_lock
));
2100 ASSERT(sig
>= 1 && sig
< NSIG
);
2102 if (sig_discardable(p
, sig
))
2105 sigaddqins(p
, t
, sigqp
);
2107 sigtoproc(p
, t
, sig
);
2111 * Allocate the sigqueue_t structure and call sigaddqins().
2114 sigaddq(proc_t
*p
, kthread_t
*t
, k_siginfo_t
*infop
, int km_flags
)
2117 int sig
= infop
->si_signo
;
2119 ASSERT(MUTEX_HELD(&p
->p_lock
));
2120 ASSERT(sig
>= 1 && sig
< NSIG
);
2123 * If the signal will be discarded by sigtoproc() or
2124 * if the process isn't requesting siginfo and it isn't
2125 * blocking the signal (it *could* change it's mind while
2126 * the signal is pending) then don't bother creating one.
2128 if (!sig_discardable(p
, sig
) &&
2129 (sigismember(&p
->p_siginfo
, sig
) ||
2130 (curproc
->p_ct_process
!= p
->p_ct_process
) ||
2131 (sig
== SIGCLD
&& SI_FROMKERNEL(infop
))) &&
2132 ((sqp
= kmem_alloc(sizeof (sigqueue_t
), km_flags
)) != NULL
)) {
2133 bcopy(infop
, &sqp
->sq_info
, sizeof (k_siginfo_t
));
2134 sqp
->sq_func
= NULL
;
2135 sqp
->sq_next
= NULL
;
2136 sigaddqins(p
, t
, sqp
);
2138 sigtoproc(p
, t
, sig
);
2142 * Handle stop-on-fault processing for the debugger. Returns 0
2143 * if the fault is cleared during the stop, nonzero if it isn't.
2146 stop_on_fault(uint_t fault
, k_siginfo_t
*sip
)
2148 proc_t
*p
= ttoproc(curthread
);
2149 klwp_t
*lwp
= ttolwp(curthread
);
2151 ASSERT(prismember(&p
->p_fltmask
, fault
));
2154 * Record current fault and siginfo structure so debugger can
2157 mutex_enter(&p
->p_lock
);
2158 lwp
->lwp_curflt
= (uchar_t
)fault
;
2159 lwp
->lwp_siginfo
= *sip
;
2161 stop(PR_FAULTED
, fault
);
2163 fault
= lwp
->lwp_curflt
;
2164 lwp
->lwp_curflt
= 0;
2165 mutex_exit(&p
->p_lock
);
2170 sigorset(k_sigset_t
*s1
, const k_sigset_t
*s2
)
2172 s1
->__sigbits
[0] |= s2
->__sigbits
[0];
2173 s1
->__sigbits
[1] |= s2
->__sigbits
[1];
2174 s1
->__sigbits
[2] |= s2
->__sigbits
[2];
2178 sigandset(k_sigset_t
*s1
, const k_sigset_t
*s2
)
2180 s1
->__sigbits
[0] &= s2
->__sigbits
[0];
2181 s1
->__sigbits
[1] &= s2
->__sigbits
[1];
2182 s1
->__sigbits
[2] &= s2
->__sigbits
[2];
2186 sigdiffset(k_sigset_t
*s1
, const k_sigset_t
*s2
)
2188 s1
->__sigbits
[0] &= ~(s2
->__sigbits
[0]);
2189 s1
->__sigbits
[1] &= ~(s2
->__sigbits
[1]);
2190 s1
->__sigbits
[2] &= ~(s2
->__sigbits
[2]);
2194 * Return non-zero if curthread->t_sig_check should be set to 1, that is,
2195 * if there are any signals the thread might take on return from the kernel.
2196 * If ksigset_t's were a single word, we would do:
2197 * return (((p->p_sig | t->t_sig) & ~t->t_hold) & fillset);
2200 sigcheck(proc_t
*p
, kthread_t
*t
)
2202 sc_shared_t
*tdp
= t
->t_schedctl
;
2205 * If signals are blocked via the schedctl interface
2206 * then we only check for the unmaskable signals.
2207 * The unmaskable signal numbers should all be contained
2208 * in __sigbits[0] and we assume this for speed.
2210 #if (CANTMASK1 == 0 && CANTMASK2 == 0)
2211 if (tdp
!= NULL
&& tdp
->sc_sigblock
)
2212 return ((p
->p_sig
.__sigbits
[0] | t
->t_sig
.__sigbits
[0]) &
2215 #error "fix me: CANTMASK1 and CANTMASK2 are not zero"
2218 /* see uts/common/sys/signal.h for why this must be true */
2219 #if ((MAXSIG > (2 * 32)) && (MAXSIG <= (3 * 32)))
2220 return (((p
->p_sig
.__sigbits
[0] | t
->t_sig
.__sigbits
[0]) &
2221 ~t
->t_hold
.__sigbits
[0]) |
2222 ((p
->p_sig
.__sigbits
[1] | t
->t_sig
.__sigbits
[1]) &
2223 ~t
->t_hold
.__sigbits
[1]) |
2224 (((p
->p_sig
.__sigbits
[2] | t
->t_sig
.__sigbits
[2]) &
2225 ~t
->t_hold
.__sigbits
[2]) & FILLSET2
));
2227 #error "fix me: MAXSIG out of bounds"
2232 sigintr(k_sigset_t
*smask
, int intable
)
2236 k_sigset_t lmask
; /* local copy of cantmask */
2237 klwp_t
*lwp
= ttolwp(curthread
);
2240 * Mask out all signals except SIGHUP, SIGINT, SIGQUIT
2241 * and SIGTERM. (Preserving the existing masks).
2242 * This function supports the -intr nfs and ufs mount option.
2246 * don't do kernel threads
2252 * get access to signal mask
2254 p
= ttoproc(curthread
);
2255 owned
= mutex_owned(&p
->p_lock
); /* this is filthy */
2257 mutex_enter(&p
->p_lock
);
2260 * remember the current mask
2262 schedctl_finish_sigblock(curthread
);
2263 *smask
= curthread
->t_hold
;
2266 * mask out all signals
2268 sigfillset(&curthread
->t_hold
);
2271 * Unmask the non-maskable signals (e.g., KILL), as long as
2272 * they aren't already masked (which could happen at exit).
2273 * The first sigdiffset sets lmask to (cantmask & ~curhold). The
2274 * second sets the current hold mask to (~0 & ~lmask), which reduces
2275 * to (~cantmask | curhold).
2278 sigdiffset(&lmask
, smask
);
2279 sigdiffset(&curthread
->t_hold
, &lmask
);
2282 * Re-enable HUP, QUIT, and TERM iff they were originally enabled
2283 * Re-enable INT if it's originally enabled and the NFS mount option
2284 * nointr is not set.
2286 if (!sigismember(smask
, SIGHUP
))
2287 sigdelset(&curthread
->t_hold
, SIGHUP
);
2288 if (!sigismember(smask
, SIGINT
) && intable
)
2289 sigdelset(&curthread
->t_hold
, SIGINT
);
2290 if (!sigismember(smask
, SIGQUIT
))
2291 sigdelset(&curthread
->t_hold
, SIGQUIT
);
2292 if (!sigismember(smask
, SIGTERM
))
2293 sigdelset(&curthread
->t_hold
, SIGTERM
);
2296 * release access to signal mask
2299 mutex_exit(&p
->p_lock
);
2302 * Indicate that this lwp is not to be stopped.
2309 sigunintr(k_sigset_t
*smask
)
2313 klwp_t
*lwp
= ttolwp(curthread
);
2316 * Reset previous mask (See sigintr() above)
2319 lwp
->lwp_nostop
--; /* restore lwp stoppability */
2320 p
= ttoproc(curthread
);
2321 owned
= mutex_owned(&p
->p_lock
); /* this is filthy */
2323 mutex_enter(&p
->p_lock
);
2324 curthread
->t_hold
= *smask
;
2325 /* so unmasked signals will be seen */
2326 curthread
->t_sig_check
= 1;
2328 mutex_exit(&p
->p_lock
);
2333 sigreplace(k_sigset_t
*newmask
, k_sigset_t
*oldmask
)
2338 * Save current signal mask in oldmask, then
2339 * set it to newmask.
2341 if (ttolwp(curthread
) != NULL
) {
2342 p
= ttoproc(curthread
);
2343 owned
= mutex_owned(&p
->p_lock
); /* this is filthy */
2345 mutex_enter(&p
->p_lock
);
2346 schedctl_finish_sigblock(curthread
);
2347 if (oldmask
!= NULL
)
2348 *oldmask
= curthread
->t_hold
;
2349 curthread
->t_hold
= *newmask
;
2350 curthread
->t_sig_check
= 1;
2352 mutex_exit(&p
->p_lock
);
2357 * Return true if the signal number is in range
2358 * and the signal code specifies signal queueing.
2361 sigwillqueue(int sig
, int code
)
2363 if (sig
>= 0 && sig
< NSIG
) {
2376 * The pre-allocated pool (with _SIGQUEUE_PREALLOC entries) is
2377 * allocated at the first sigqueue/signotify call.
2380 sigqhdralloc(size_t size
, uint_t maxcount
)
2383 sigqueue_t
*sq
, *next
;
2387 * Before the introduction of process.max-sigqueue-size
2388 * _SC_SIGQUEUE_MAX had this static value.
2390 #define _SIGQUEUE_PREALLOC 32
2392 i
= (_SIGQUEUE_PREALLOC
* size
) + sizeof (sigqhdr_t
);
2393 ASSERT(maxcount
<= INT_MAX
);
2394 sqh
= kmem_alloc(i
, KM_SLEEP
);
2395 sqh
->sqb_count
= maxcount
;
2396 sqh
->sqb_maxcount
= maxcount
;
2398 sqh
->sqb_pexited
= 0;
2400 sqh
->sqb_free
= sq
= (sigqueue_t
*)(sqh
+ 1);
2401 for (i
= _SIGQUEUE_PREALLOC
- 1; i
!= 0; i
--) {
2402 next
= (sigqueue_t
*)((uintptr_t)sq
+ size
);
2407 cv_init(&sqh
->sqb_cv
, NULL
, CV_DEFAULT
, NULL
);
2408 mutex_init(&sqh
->sqb_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2412 static void sigqrel(sigqueue_t
*);
2415 * Allocate a sigqueue/signotify structure from the per process
2416 * pre-allocated pool or allocate a new sigqueue/signotify structure
2417 * if the pre-allocated pool is exhausted.
2420 sigqalloc(sigqhdr_t
*sqh
)
2422 sigqueue_t
*sq
= NULL
;
2424 ASSERT(MUTEX_HELD(&curproc
->p_lock
));
2427 mutex_enter(&sqh
->sqb_lock
);
2428 if (sqh
->sqb_count
> 0) {
2430 if (sqh
->sqb_free
== NULL
) {
2432 * The pre-allocated pool is exhausted.
2434 sq
= kmem_alloc(sizeof (sigqueue_t
), KM_SLEEP
);
2438 sq
->sq_func
= sigqrel
;
2439 sqh
->sqb_free
= sq
->sq_next
;
2441 mutex_exit(&sqh
->sqb_lock
);
2442 bzero(&sq
->sq_info
, sizeof (k_siginfo_t
));
2443 sq
->sq_backptr
= sqh
;
2445 sq
->sq_external
= 0;
2447 mutex_exit(&sqh
->sqb_lock
);
2454 * Return a sigqueue structure back to the pre-allocated pool.
2457 sigqrel(sigqueue_t
*sq
)
2461 /* make sure that p_lock of the affected process is held */
2463 sqh
= (sigqhdr_t
*)sq
->sq_backptr
;
2464 mutex_enter(&sqh
->sqb_lock
);
2465 if (sqh
->sqb_pexited
&& sqh
->sqb_sent
== 1) {
2466 mutex_exit(&sqh
->sqb_lock
);
2467 cv_destroy(&sqh
->sqb_cv
);
2468 mutex_destroy(&sqh
->sqb_lock
);
2469 kmem_free(sqh
, sqh
->sqb_size
);
2473 sq
->sq_next
= sqh
->sqb_free
;
2474 sq
->sq_backptr
= NULL
;
2476 cv_signal(&sqh
->sqb_cv
);
2477 mutex_exit(&sqh
->sqb_lock
);
2482 * Free up the pre-allocated sigqueue headers of sigqueue pool
2483 * and signotify pool, if possible.
2484 * Called only by the owning process during exec() and exit().
2489 ASSERT(MUTEX_HELD(&p
->p_lock
));
2491 if (p
->p_sigqhdr
!= NULL
) { /* sigqueue pool */
2492 sigqhdrfree(p
->p_sigqhdr
);
2493 p
->p_sigqhdr
= NULL
;
2495 if (p
->p_signhdr
!= NULL
) { /* signotify pool */
2496 sigqhdrfree(p
->p_signhdr
);
2497 p
->p_signhdr
= NULL
;
2502 * Free up the pre-allocated header and sigq pool if possible.
2505 sigqhdrfree(sigqhdr_t
*sqh
)
2507 mutex_enter(&sqh
->sqb_lock
);
2508 if (sqh
->sqb_sent
== 0) {
2509 mutex_exit(&sqh
->sqb_lock
);
2510 cv_destroy(&sqh
->sqb_cv
);
2511 mutex_destroy(&sqh
->sqb_lock
);
2512 kmem_free(sqh
, sqh
->sqb_size
);
2514 sqh
->sqb_pexited
= 1;
2515 mutex_exit(&sqh
->sqb_lock
);
2520 * Free up a single sigqueue structure.
2521 * No other code should free a sigqueue directly.
2524 siginfofree(sigqueue_t
*sqp
)
2527 if (sqp
->sq_func
!= NULL
)
2528 (sqp
->sq_func
)(sqp
);
2530 kmem_free(sqp
, sizeof (sigqueue_t
));
2535 * Generate a synchronous signal caused by a hardware
2536 * condition encountered by an lwp. Called from trap().
2539 trapsig(k_siginfo_t
*ip
, int restartable
)
2541 proc_t
*p
= ttoproc(curthread
);
2542 int sig
= ip
->si_signo
;
2543 sigqueue_t
*sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
2545 ASSERT(sig
> 0 && sig
< NSIG
);
2547 if (curthread
->t_dtrace_on
)
2548 dtrace_safe_synchronous_signal();
2550 mutex_enter(&p
->p_lock
);
2551 schedctl_finish_sigblock(curthread
);
2553 * Avoid a possible infinite loop if the lwp is holding the
2554 * signal generated by a trap of a restartable instruction or
2555 * if the signal so generated is being ignored by the process.
2558 (sigismember(&curthread
->t_hold
, sig
) ||
2559 p
->p_user
.u_signal
[sig
-1] == SIG_IGN
)) {
2560 sigdelset(&curthread
->t_hold
, sig
);
2561 p
->p_user
.u_signal
[sig
-1] = SIG_DFL
;
2562 sigdelset(&p
->p_ignore
, sig
);
2564 bcopy(ip
, &sqp
->sq_info
, sizeof (k_siginfo_t
));
2565 sigaddqa(p
, curthread
, sqp
);
2566 mutex_exit(&p
->p_lock
);
2570 * Dispatch the real time profiling signal in the traditional way,
2571 * honoring all of the /proc tracing mechanism built into issig().
2574 realsigprof_slow(int sysnum
, int nsysarg
, int error
)
2576 kthread_t
*t
= curthread
;
2577 proc_t
*p
= ttoproc(t
);
2578 klwp_t
*lwp
= ttolwp(t
);
2579 k_siginfo_t
*sip
= &lwp
->lwp_siginfo
;
2582 mutex_enter(&p
->p_lock
);
2583 func
= PTOU(p
)->u_signal
[SIGPROF
- 1];
2584 if (p
->p_rprof_cyclic
== CYCLIC_NONE
||
2585 func
== SIG_DFL
|| func
== SIG_IGN
) {
2586 bzero(t
->t_rprof
, sizeof (*t
->t_rprof
));
2587 mutex_exit(&p
->p_lock
);
2590 if (sigismember(&t
->t_hold
, SIGPROF
)) {
2591 mutex_exit(&p
->p_lock
);
2594 sip
->si_signo
= SIGPROF
;
2595 sip
->si_code
= PROF_SIG
;
2596 sip
->si_errno
= error
;
2597 hrt2ts(gethrtime(), &sip
->si_tstamp
);
2598 sip
->si_syscall
= sysnum
;
2599 sip
->si_nsysarg
= nsysarg
;
2600 sip
->si_fault
= lwp
->lwp_lastfault
;
2601 sip
->si_faddr
= lwp
->lwp_lastfaddr
;
2602 lwp
->lwp_lastfault
= 0;
2603 lwp
->lwp_lastfaddr
= NULL
;
2604 sigtoproc(p
, t
, SIGPROF
);
2605 mutex_exit(&p
->p_lock
);
2606 ASSERT(lwp
->lwp_cursig
== 0);
2610 bzero(t
->t_rprof
, sizeof (*t
->t_rprof
));
2614 * We are not tracing the SIGPROF signal, or doing any other unnatural
2615 * acts, like watchpoints, so dispatch the real time profiling signal
2616 * directly, bypassing all of the overhead built into issig().
2619 realsigprof_fast(int sysnum
, int nsysarg
, int error
)
2621 kthread_t
*t
= curthread
;
2622 proc_t
*p
= ttoproc(t
);
2623 klwp_t
*lwp
= ttolwp(t
);
2624 k_siginfo_t
*sip
= &lwp
->lwp_siginfo
;
2630 * We don't need to acquire p->p_lock here;
2631 * we are manipulating thread-private data.
2633 func
= PTOU(p
)->u_signal
[SIGPROF
- 1];
2634 if (p
->p_rprof_cyclic
== CYCLIC_NONE
||
2635 func
== SIG_DFL
|| func
== SIG_IGN
) {
2636 bzero(t
->t_rprof
, sizeof (*t
->t_rprof
));
2639 if (lwp
->lwp_cursig
!= 0 ||
2640 lwp
->lwp_curinfo
!= NULL
||
2641 sigismember(&t
->t_hold
, SIGPROF
)) {
2644 sip
->si_signo
= SIGPROF
;
2645 sip
->si_code
= PROF_SIG
;
2646 sip
->si_errno
= error
;
2647 hrt2ts(gethrtime(), &sip
->si_tstamp
);
2648 sip
->si_syscall
= sysnum
;
2649 sip
->si_nsysarg
= nsysarg
;
2650 sip
->si_fault
= lwp
->lwp_lastfault
;
2651 sip
->si_faddr
= lwp
->lwp_lastfaddr
;
2652 lwp
->lwp_lastfault
= 0;
2653 lwp
->lwp_lastfaddr
= NULL
;
2654 if (t
->t_flag
& T_TOMASK
)
2655 t
->t_flag
&= ~T_TOMASK
;
2657 lwp
->lwp_sigoldmask
= t
->t_hold
;
2658 sigorset(&t
->t_hold
, &PTOU(p
)->u_sigmask
[SIGPROF
- 1]);
2659 if (!sigismember(&PTOU(p
)->u_signodefer
, SIGPROF
))
2660 sigaddset(&t
->t_hold
, SIGPROF
);
2661 lwp
->lwp_extsig
= 0;
2662 lwp
->lwp_ru
.nsignals
++;
2663 if (p
->p_model
== DATAMODEL_NATIVE
)
2664 rc
= sendsig(SIGPROF
, sip
, func
);
2665 #ifdef _SYSCALL32_IMPL
2667 rc
= sendsig32(SIGPROF
, sip
, func
);
2668 #endif /* _SYSCALL32_IMPL */
2670 bzero(t
->t_rprof
, sizeof (*t
->t_rprof
));
2673 * sendsig() failed; we must dump core with a SIGSEGV.
2674 * See psig(). This code is copied from there.
2676 lwp
->lwp_cursig
= SIGSEGV
;
2679 if (exitlwps(1) != 0) {
2680 mutex_enter(&p
->p_lock
);
2683 if (audit_active
== C2AUDIT_LOADED
)
2684 audit_core_start(SIGSEGV
);
2685 if (core(SIGSEGV
, 0) == 0)
2687 if (audit_active
== C2AUDIT_LOADED
)
2688 audit_core_finish(code
);
2689 exit(code
, SIGSEGV
);
2694 * Arrange for the real time profiling signal to be dispatched.
2697 realsigprof(int sysnum
, int nsysarg
, int error
)
2699 kthread_t
*t
= curthread
;
2700 proc_t
*p
= ttoproc(t
);
2702 if (t
->t_rprof
->rp_anystate
== 0)
2705 schedctl_finish_sigblock(t
);
2707 /* test for any activity that requires p->p_lock */
2708 if (tracing(p
, SIGPROF
) || pr_watch_active(p
) ||
2709 sigismember(&PTOU(p
)->u_sigresethand
, SIGPROF
)) {
2710 /* do it the classic slow way */
2711 realsigprof_slow(sysnum
, nsysarg
, error
);
2713 /* do it the cheating-a-little fast way */
2714 realsigprof_fast(sysnum
, nsysarg
, error
);
2718 #ifdef _SYSCALL32_IMPL
2721 * It's tricky to transmit a sigval between 32-bit and 64-bit
2722 * process, since in the 64-bit world, a pointer and an integer
2723 * are different sizes. Since we're constrained by the standards
2724 * world not to change the types, and it's unclear how useful it is
2725 * to send pointers between address spaces this way, we preserve
2726 * the 'int' interpretation for 32-bit processes interoperating
2727 * with 64-bit processes. The full semantics (pointers or integers)
2728 * are available for N-bit processes interoperating with N-bit
2732 siginfo_kto32(const k_siginfo_t
*src
, siginfo32_t
*dest
)
2734 bzero(dest
, sizeof (*dest
));
2737 * The absolute minimum content is si_signo and si_code.
2739 dest
->si_signo
= src
->si_signo
;
2740 if ((dest
->si_code
= src
->si_code
) == SI_NOINFO
)
2744 * A siginfo generated by user level is structured
2745 * differently from one generated by the kernel.
2747 if (SI_FROMUSER(src
)) {
2748 dest
->si_pid
= src
->si_pid
;
2749 dest
->si_ctid
= src
->si_ctid
;
2750 dest
->si_zoneid
= src
->si_zoneid
;
2751 dest
->si_uid
= src
->si_uid
;
2752 if (SI_CANQUEUE(src
->si_code
))
2753 dest
->si_value
.sival_int
=
2754 (int32_t)src
->si_value
.sival_int
;
2758 dest
->si_errno
= src
->si_errno
;
2760 switch (src
->si_signo
) {
2762 dest
->si_pid
= src
->si_pid
;
2763 dest
->si_ctid
= src
->si_ctid
;
2764 dest
->si_zoneid
= src
->si_zoneid
;
2765 dest
->si_uid
= src
->si_uid
;
2766 dest
->si_value
.sival_int
= (int32_t)src
->si_value
.sival_int
;
2769 dest
->si_pid
= src
->si_pid
;
2770 dest
->si_ctid
= src
->si_ctid
;
2771 dest
->si_zoneid
= src
->si_zoneid
;
2772 dest
->si_status
= src
->si_status
;
2773 dest
->si_stime
= src
->si_stime
;
2774 dest
->si_utime
= src
->si_utime
;
2782 dest
->si_addr
= (caddr32_t
)(uintptr_t)src
->si_addr
;
2783 dest
->si_trapno
= src
->si_trapno
;
2784 dest
->si_pc
= (caddr32_t
)(uintptr_t)src
->si_pc
;
2788 dest
->si_fd
= src
->si_fd
;
2789 dest
->si_band
= src
->si_band
;
2792 dest
->si_faddr
= (caddr32_t
)(uintptr_t)src
->si_faddr
;
2793 dest
->si_tstamp
.tv_sec
= src
->si_tstamp
.tv_sec
;
2794 dest
->si_tstamp
.tv_nsec
= src
->si_tstamp
.tv_nsec
;
2795 dest
->si_syscall
= src
->si_syscall
;
2796 dest
->si_nsysarg
= src
->si_nsysarg
;
2797 dest
->si_fault
= src
->si_fault
;
2803 siginfo_32tok(const siginfo32_t
*src
, k_siginfo_t
*dest
)
2805 bzero(dest
, sizeof (*dest
));
2808 * The absolute minimum content is si_signo and si_code.
2810 dest
->si_signo
= src
->si_signo
;
2811 if ((dest
->si_code
= src
->si_code
) == SI_NOINFO
)
2815 * A siginfo generated by user level is structured
2816 * differently from one generated by the kernel.
2818 if (SI_FROMUSER(src
)) {
2819 dest
->si_pid
= src
->si_pid
;
2820 dest
->si_ctid
= src
->si_ctid
;
2821 dest
->si_zoneid
= src
->si_zoneid
;
2822 dest
->si_uid
= src
->si_uid
;
2823 if (SI_CANQUEUE(src
->si_code
))
2824 dest
->si_value
.sival_int
=
2825 (int)src
->si_value
.sival_int
;
2829 dest
->si_errno
= src
->si_errno
;
2831 switch (src
->si_signo
) {
2833 dest
->si_pid
= src
->si_pid
;
2834 dest
->si_ctid
= src
->si_ctid
;
2835 dest
->si_zoneid
= src
->si_zoneid
;
2836 dest
->si_uid
= src
->si_uid
;
2837 dest
->si_value
.sival_int
= (int)src
->si_value
.sival_int
;
2840 dest
->si_pid
= src
->si_pid
;
2841 dest
->si_ctid
= src
->si_ctid
;
2842 dest
->si_zoneid
= src
->si_zoneid
;
2843 dest
->si_status
= src
->si_status
;
2844 dest
->si_stime
= src
->si_stime
;
2845 dest
->si_utime
= src
->si_utime
;
2853 dest
->si_addr
= (void *)(uintptr_t)src
->si_addr
;
2854 dest
->si_trapno
= src
->si_trapno
;
2855 dest
->si_pc
= (void *)(uintptr_t)src
->si_pc
;
2859 dest
->si_fd
= src
->si_fd
;
2860 dest
->si_band
= src
->si_band
;
2863 dest
->si_faddr
= (void *)(uintptr_t)src
->si_faddr
;
2864 dest
->si_tstamp
.tv_sec
= src
->si_tstamp
.tv_sec
;
2865 dest
->si_tstamp
.tv_nsec
= src
->si_tstamp
.tv_nsec
;
2866 dest
->si_syscall
= src
->si_syscall
;
2867 dest
->si_nsysarg
= src
->si_nsysarg
;
2868 dest
->si_fault
= src
->si_fault
;
2873 #endif /* _SYSCALL32_IMPL */