4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/systm.h>
34 #include <sys/prsystm.h>
36 #include <sys/errno.h>
38 #include <sys/signal.h>
40 #include <sys/unistd.h>
41 #include <sys/cmn_err.h>
42 #include <sys/schedctl.h>
43 #include <sys/debug.h>
44 #include <sys/contract/process_impl.h>
47 idtot(proc_t
*p
, id_t lwpid
)
51 if ((ldp
= lwp_hash_lookup(p
, lwpid
)) != NULL
)
52 return (ldp
->ld_entry
->le_thread
);
57 * Same as idtot(), but acquire and return
58 * the tid hash table entry lock on success.
59 * This allows lwp_unpark() to do its job without acquiring
60 * p->p_lock (and thereby causing congestion problems when
61 * the application calls lwp_unpark() too often).
64 idtot_and_lock(proc_t
*p
, id_t lwpid
, kmutex_t
**mpp
)
69 if ((ldp
= lwp_hash_lookup_and_lock(p
, lwpid
, mpp
)) != NULL
) {
70 if ((t
= ldp
->ld_entry
->le_thread
) == NULL
)
78 * Stop an lwp of the current process
81 syslwp_suspend(id_t lwpid
)
85 proc_t
*p
= ttoproc(curthread
);
87 mutex_enter(&p
->p_lock
);
88 if ((t
= idtot(p
, lwpid
)) == NULL
)
91 error
= lwp_suspend(t
);
92 mutex_exit(&p
->p_lock
);
94 return (set_errno(error
));
99 syslwp_continue(id_t lwpid
)
102 proc_t
*p
= ttoproc(curthread
);
104 mutex_enter(&p
->p_lock
);
105 if ((t
= idtot(p
, lwpid
)) == NULL
) {
106 mutex_exit(&p
->p_lock
);
107 return (set_errno(ESRCH
));
110 mutex_exit(&p
->p_lock
);
115 lwp_kill(id_t lwpid
, int sig
)
119 proc_t
*p
= ttoproc(curthread
);
121 if (sig
< 0 || sig
>= NSIG
)
122 return (set_errno(EINVAL
));
124 sqp
= kmem_zalloc(sizeof (sigqueue_t
), KM_SLEEP
);
125 mutex_enter(&p
->p_lock
);
126 if ((t
= idtot(p
, lwpid
)) == NULL
) {
127 mutex_exit(&p
->p_lock
);
129 kmem_free(sqp
, sizeof (sigqueue_t
));
130 return (set_errno(ESRCH
));
133 mutex_exit(&p
->p_lock
);
136 sqp
->sq_info
.si_signo
= sig
;
137 sqp
->sq_info
.si_code
= SI_LWP
;
138 sqp
->sq_info
.si_pid
= p
->p_pid
;
139 sqp
->sq_info
.si_ctid
= PRCTID(p
);
140 sqp
->sq_info
.si_zoneid
= getzoneid();
141 sqp
->sq_info
.si_uid
= crgetruid(CRED());
143 mutex_exit(&p
->p_lock
);
148 * This is the specification of lwp_wait() from the _lwp_wait(2) manual page:
150 * The lwp_wait() function blocks the current lwp until the lwp specified
151 * by 'lwpid' terminates. If the specified lwp terminated prior to the call
152 * to lwp_wait(), then lwp_wait() returns immediately. If 'lwpid' is zero,
153 * then lwp_wait() waits for any undetached lwp in the current process.
154 * If 'lwpid' is not zero, then it must specify an undetached lwp in the
155 * current process. If 'departed' is not NULL, then it points to a location
156 * where the id of the exited lwp is stored.
158 * When an lwp exits and there are one or more lwps in the process waiting
159 * for this specific lwp to exit, then one of the waiting lwps is unblocked
160 * and it returns from lwp_wait() successfully. Any other lwps waiting for
161 * this same lwp to exit are also unblocked, however, they return from
162 * lwp_wait() with the error ESRCH. If there are no lwps in the process
163 * waiting for this specific lwp to exit but there are one or more lwps
164 * waiting for any lwp to exit, then one of the waiting lwps is unblocked
165 * and it returns from lwp_wait() successfully.
167 * If an lwp is waiting for any lwp to exit, it blocks until an undetached
168 * lwp for which no other lwp is waiting terminates, at which time it returns
169 * successfully, or until all other lwps in the process are either daemon
170 * lwps or lwps waiting in lwp_wait(), in which case it returns EDEADLK.
173 lwp_wait(id_t lwpid
, id_t
*departed
)
175 proc_t
*p
= ttoproc(curthread
);
177 int daemon
= (curthread
->t_proc_flag
& TP_DAEMON
)? 1 : 0;
178 lwpent_t
*target_lep
;
183 * lwp_wait() is not supported for the /proc agent lwp.
185 if (curthread
== p
->p_agenttp
)
186 return (set_errno(ENOTSUP
));
188 mutex_enter(&p
->p_lock
);
191 curthread
->t_waitfor
= lwpid
;
193 p
->p_lwpdwait
+= daemon
;
196 if ((ldp
= lwp_hash_lookup(p
, lwpid
)) == NULL
)
199 target_lep
= ldp
->ld_entry
;
200 target_lep
->le_waiters
++;
201 target_lep
->le_dwaiters
+= daemon
;
212 * Look for a specific zombie lwp.
214 if (target_lep
== NULL
)
216 else if ((t
= target_lep
->le_thread
) != NULL
) {
217 if (!(t
->t_proc_flag
& TP_TWAIT
))
221 * We found the zombie we are waiting for.
223 ASSERT(p
->p_zombcnt
> 0);
226 p
->p_lwpdwait
-= daemon
;
227 curthread
->t_waitfor
= -1;
228 lwp_hash_out(p
, lwpid
);
229 mutex_exit(&p
->p_lock
);
230 if (departed
!= NULL
&&
231 copyout(&lwpid
, departed
, sizeof (id_t
)))
232 return (set_errno(EFAULT
));
237 * Look for any zombie lwp.
239 int some_non_daemon_will_return
= 0;
241 /* for each entry in the lwp directory... */
243 for (i
= 0; i
< p
->p_lwpdir_sz
; i
++, ldp
++) {
245 if ((lep
= ldp
->ld_entry
) == NULL
||
246 lep
->le_thread
!= NULL
)
250 * We found a zombie lwp. If there is some
251 * other thread waiting specifically for the
252 * zombie we just found, then defer to the other
253 * waiting thread and continue searching for
254 * another zombie. Also check to see if there
255 * is some non-daemon thread sleeping here in
256 * lwp_wait() that will succeed and return when
257 * we drop p->p_lock. This is tested below.
260 if (lep
->le_waiters
!= 0) {
261 if (lep
->le_waiters
- lep
->le_dwaiters
)
262 some_non_daemon_will_return
= 1;
267 * We found a zombie that no one else
268 * is specifically waiting for.
270 ASSERT(p
->p_zombcnt
> 0);
273 p
->p_lwpdwait
-= daemon
;
274 curthread
->t_waitfor
= -1;
275 lwp_hash_out(p
, tid
);
276 mutex_exit(&p
->p_lock
);
277 if (departed
!= NULL
&&
278 copyout(&tid
, departed
, sizeof (id_t
)))
279 return (set_errno(EFAULT
));
284 * We are waiting for anyone. If all non-daemon lwps
285 * are waiting here, and if we determined above that
286 * no non-daemon lwp will return, we have deadlock.
288 if (!some_non_daemon_will_return
&&
289 p
->p_lwpcnt
== p
->p_lwpdaemon
+
290 (p
->p_lwpwait
- p
->p_lwpdwait
))
294 if (error
== 0 && lwpid
!= 0) {
296 * We are waiting for a specific non-zombie lwp.
297 * Fail if there is a deadlock loop.
300 if (t
== curthread
) {
304 /* who are they waiting for? */
305 if ((tid
= t
->t_waitfor
) == -1)
309 * The lwp we are waiting for is
310 * waiting for anyone (transitively).
311 * If there are no zombies right now
312 * and if we would have deadlock due
313 * to all non-daemon lwps waiting here,
314 * wake up the lwp that is waiting for
315 * anyone so it can return EDEADLK.
317 if (p
->p_zombcnt
== 0 &&
318 p
->p_lwpcnt
== p
->p_lwpdaemon
+
319 p
->p_lwpwait
- p
->p_lwpdwait
)
320 cv_broadcast(&p
->p_lwpexit
);
323 if ((ldp
= lwp_hash_lookup(p
, tid
)) == NULL
||
324 (t
= ldp
->ld_entry
->le_thread
) == NULL
)
333 * Wait for some lwp to terminate.
335 if (!cv_wait_sig(&p
->p_lwpexit
, &p
->p_lock
))
340 if ((ldp
= lwp_hash_lookup(p
, lwpid
)) == NULL
)
343 target_lep
= ldp
->ld_entry
;
347 if (lwpid
!= 0 && target_lep
!= NULL
) {
348 target_lep
->le_waiters
--;
349 target_lep
->le_dwaiters
-= daemon
;
352 p
->p_lwpdwait
-= daemon
;
353 curthread
->t_waitfor
= -1;
354 mutex_exit(&p
->p_lock
);
355 return (set_errno(error
));
359 lwp_detach(id_t lwpid
)
362 proc_t
*p
= ttoproc(curthread
);
366 mutex_enter(&p
->p_lock
);
368 if ((ldp
= lwp_hash_lookup(p
, lwpid
)) == NULL
)
370 else if ((t
= ldp
->ld_entry
->le_thread
) != NULL
) {
371 if (!(t
->t_proc_flag
& TP_TWAIT
))
374 t
->t_proc_flag
&= ~TP_TWAIT
;
375 cv_broadcast(&p
->p_lwpexit
);
378 ASSERT(p
->p_zombcnt
> 0);
380 lwp_hash_out(p
, lwpid
);
382 mutex_exit(&p
->p_lock
);
385 return (set_errno(error
));
390 * Unpark the specified lwp.
393 lwp_unpark(id_t lwpid
)
395 proc_t
*p
= ttoproc(curthread
);
400 if ((t
= idtot_and_lock(p
, lwpid
, &mp
)) == NULL
) {
403 mutex_enter(&t
->t_delay_lock
);
405 cv_signal(&t
->t_delay_cv
);
406 mutex_exit(&t
->t_delay_lock
);
413 * Cancel a previous unpark for the specified lwp.
415 * This interface exists ONLY to support older versions of libthread, which
416 * called lwp_unpark(self) to force calls to lwp_park(self) to return
417 * immediately. These older libthreads required a mechanism to cancel the
420 * libc does not call this interface. Instead, the sc_park flag in the
421 * schedctl page is cleared to force calls to lwp_park() to return
425 lwp_unpark_cancel(id_t lwpid
)
427 proc_t
*p
= ttoproc(curthread
);
432 if ((t
= idtot_and_lock(p
, lwpid
, &mp
)) == NULL
) {
435 mutex_enter(&t
->t_delay_lock
);
437 mutex_exit(&t
->t_delay_lock
);
444 * Sleep until we are set running by lwp_unpark() or until we are
445 * interrupted by a signal or until we exhaust our timeout.
446 * timeoutp is an in/out parameter. On entry, it contains the relative
447 * time until timeout. On exit, we copyout the residual time left to it.
450 lwp_park(timespec_t
*timeoutp
, id_t lwpid
)
455 timespec_t
*rqtp
= NULL
;
456 kthread_t
*t
= curthread
;
459 model_t datamodel
= ttoproc(t
)->p_model
;
461 if (lwpid
!= 0) /* unpark the other lwp, if any */
462 (void) lwp_unpark(lwpid
);
465 timecheck
= timechanged
;
467 if (datamodel
== DATAMODEL_NATIVE
) {
468 if (copyin(timeoutp
, &rqtime
, sizeof (timespec_t
))) {
473 timespec32_t timeout32
;
475 if (copyin(timeoutp
, &timeout32
, sizeof (timeout32
))) {
479 TIMESPEC32_TO_TIMESPEC(&rqtime
, &timeout32
)
482 if (itimerspecfix(&rqtime
)) {
487 * Convert the timespec value into absolute time.
489 timespecadd(&rqtime
, &now
);
493 (void) new_mstate(t
, LMS_USER_LOCK
);
495 mutex_enter(&t
->t_delay_lock
);
496 if (!schedctl_is_park())
498 while (error
== 0 && t
->t_unpark
== 0) {
499 switch (cv_waituntil_sig(&t
->t_delay_cv
,
500 &t
->t_delay_lock
, rqtp
, timecheck
)) {
510 mutex_exit(&t
->t_delay_lock
);
512 if (timeoutp
!= NULL
) {
513 rmtime
.tv_sec
= rmtime
.tv_nsec
= 0;
514 if (error
!= ETIME
) {
516 if ((now
.tv_sec
< rqtime
.tv_sec
) ||
517 ((now
.tv_sec
== rqtime
.tv_sec
) &&
518 (now
.tv_nsec
< rqtime
.tv_nsec
))) {
520 timespecsub(&rmtime
, &now
);
523 if (datamodel
== DATAMODEL_NATIVE
) {
524 if (copyout(&rmtime
, timeoutp
, sizeof (rmtime
)))
527 timespec32_t rmtime32
;
529 TIMESPEC_TO_TIMESPEC32(&rmtime32
, &rmtime
);
530 if (copyout(&rmtime32
, timeoutp
, sizeof (rmtime32
)))
536 if (t
->t_mstate
== LMS_USER_LOCK
)
537 (void) new_mstate(t
, LMS_SYSTEM
);
541 #define MAXLWPIDS 1024
544 * Unpark all of the specified lwps.
545 * Do it in chunks of MAXLWPIDS to avoid allocating too much memory.
548 lwp_unpark_all(id_t
*lwpidp
, int nids
)
550 proc_t
*p
= ttoproc(curthread
);
562 lwpidsz
= MIN(nids
, MAXLWPIDS
) * sizeof (id_t
);
563 lwpid
= kmem_alloc(lwpidsz
, KM_SLEEP
);
565 n
= MIN(nids
, MAXLWPIDS
);
566 if (copyin(lwpidp
, lwpid
, n
* sizeof (id_t
))) {
570 for (i
= 0; i
< n
; i
++) {
571 if ((t
= idtot_and_lock(p
, lwpid
[i
], &mp
)) == NULL
) {
574 mutex_enter(&t
->t_delay_lock
);
576 cv_signal(&t
->t_delay_cv
);
577 mutex_exit(&t
->t_delay_lock
);
584 kmem_free(lwpid
, lwpidsz
);
589 * SYS_lwp_park() system call.
592 syslwp_park(int which
, uintptr_t arg1
, uintptr_t arg2
)
598 error
= lwp_park((timespec_t
*)arg1
, (id_t
)arg2
);
601 error
= lwp_unpark((id_t
)arg1
);
604 error
= lwp_unpark_all((id_t
*)arg1
, (int)arg2
);
608 * This subcode is not used by libc. It exists ONLY to
609 * support older versions of libthread which do not use
610 * the sc_park flag in the schedctl page.
612 * These versions of libthread need to be modifed or emulated
613 * to change calls to syslwp_park(1, tid, 0) to
614 * syslwp_park(3, tid).
616 error
= lwp_unpark_cancel((id_t
)arg1
);
620 * This subcode is not used by libc. It exists ONLY to
621 * support older versions of libthread which do not use
622 * the sc_park flag in the schedctl page.
624 * These versions of libthread need to be modified or emulated
625 * to change calls to syslwp_park(0, ts, tid) to
626 * syslwp_park(4, ts, tid).
629 error
= lwp_park((timespec_t
*)arg1
, (id_t
)arg2
);
637 return (set_errno(error
));