Merge commit '0b905b49d460a57773d88d714cd880ffe0182b7c'
[unleashed.git] / kernel / os / lwp.c
blob44653c33d7653c73719bb6d7f0bd23bb3a6d2f52
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/thread.h>
36 #include <sys/proc.h>
37 #include <sys/task.h>
38 #include <sys/project.h>
39 #include <sys/signal.h>
40 #include <sys/errno.h>
41 #include <sys/vmparam.h>
42 #include <sys/stack.h>
43 #include <sys/procfs.h>
44 #include <sys/prsystm.h>
45 #include <sys/cpuvar.h>
46 #include <sys/kmem.h>
47 #include <sys/vtrace.h>
48 #include <sys/door.h>
49 #include <vm/seg_kp.h>
50 #include <sys/debug.h>
51 #include <sys/tnf.h>
52 #include <sys/schedctl.h>
53 #include <sys/poll.h>
54 #include <sys/copyops.h>
55 #include <sys/lwp_upimutex_impl.h>
56 #include <sys/cpupart.h>
57 #include <sys/lgrp.h>
58 #include <sys/rctl.h>
59 #include <sys/contract_impl.h>
60 #include <sys/cpc_impl.h>
61 #include <sys/sdt.h>
62 #include <sys/cmn_err.h>
63 #include <sys/brand.h>
64 #include <sys/cyclic.h>
65 #include <sys/pool.h>
67 /* hash function for the lwpid hash table, p->p_tidhash[] */
68 #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1))
70 void *segkp_lwp; /* cookie for pool of segkp resources */
71 extern void reapq_move_lq_to_tq(kthread_t *);
72 extern void freectx_ctx(struct ctxop *);
75 * Create a kernel thread associated with a particular system process. Give
76 * it an LWP so that microstate accounting will be available for it.
78 kthread_t *
79 lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri)
81 klwp_t *lwp;
83 VERIFY((p->p_flag & SSYS) != 0);
85 lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0);
87 VERIFY(lwp != NULL);
89 return (lwptot(lwp));
93 * Create a thread that appears to be stopped at sys_rtt.
95 klwp_t *
96 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p,
97 int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid)
99 klwp_t *lwp = NULL;
100 kthread_t *t;
101 kthread_t *tx;
102 cpupart_t *oldpart = NULL;
103 size_t stksize;
104 caddr_t lwpdata = NULL;
105 processorid_t binding;
106 int err = 0;
107 kproject_t *oldkpj, *newkpj;
108 void *bufp = NULL;
109 klwp_t *curlwp;
110 lwpent_t *lep;
111 lwpdir_t *old_dir = NULL;
112 uint_t old_dirsz = 0;
113 tidhash_t *old_hash = NULL;
114 uint_t old_hashsz = 0;
115 ret_tidhash_t *ret_tidhash = NULL;
116 int i;
117 int rctlfail = 0;
118 boolean_t branded = 0;
119 struct ctxop *ctx = NULL;
121 ASSERT(cid != sysdccid); /* system threads must start in SYS */
123 ASSERT(p != &p0); /* No new LWPs in p0. */
125 mutex_enter(&p->p_lock);
126 mutex_enter(&p->p_zone->zone_nlwps_lock);
128 * don't enforce rctl limits on system processes
130 if (!CLASS_KERNEL(cid)) {
131 if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl)
132 if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p,
133 1, 0) & RCT_DENY)
134 rctlfail = 1;
135 if (p->p_task->tk_proj->kpj_nlwps >=
136 p->p_task->tk_proj->kpj_nlwps_ctl)
137 if (rctl_test(rc_project_nlwps,
138 p->p_task->tk_proj->kpj_rctls, p, 1, 0)
139 & RCT_DENY)
140 rctlfail = 1;
141 if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl)
142 if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p,
143 1, 0) & RCT_DENY)
144 rctlfail = 1;
146 if (rctlfail) {
147 mutex_exit(&p->p_zone->zone_nlwps_lock);
148 mutex_exit(&p->p_lock);
149 atomic_inc_32(&p->p_zone->zone_ffcap);
150 return (NULL);
152 p->p_task->tk_nlwps++;
153 p->p_task->tk_proj->kpj_nlwps++;
154 p->p_zone->zone_nlwps++;
155 mutex_exit(&p->p_zone->zone_nlwps_lock);
156 mutex_exit(&p->p_lock);
158 curlwp = ttolwp(curthread);
159 if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0)
160 stksize = lwp_default_stksize;
162 if (CLASS_KERNEL(cid)) {
164 * Since we are creating an LWP in an SSYS process, we do not
165 * inherit anything from the current thread's LWP. We set
166 * stksize and lwpdata to 0 in order to let thread_create()
167 * allocate a regular kernel thread stack for this thread.
169 curlwp = NULL;
170 stksize = 0;
171 lwpdata = NULL;
173 } else if (stksize == lwp_default_stksize) {
175 * Try to reuse an <lwp,stack> from the LWP deathrow.
177 if (lwp_reapcnt > 0) {
178 mutex_enter(&reaplock);
179 if ((t = lwp_deathrow) != NULL) {
180 ASSERT(t->t_swap);
181 lwp_deathrow = t->t_forw;
182 lwp_reapcnt--;
183 lwpdata = t->t_swap;
184 lwp = t->t_lwp;
185 ctx = t->t_ctx;
186 t->t_swap = NULL;
187 t->t_lwp = NULL;
188 t->t_ctx = NULL;
189 reapq_move_lq_to_tq(t);
191 mutex_exit(&reaplock);
192 if (lwp != NULL) {
193 lwp_stk_fini(lwp);
195 if (ctx != NULL) {
196 freectx_ctx(ctx);
199 if (lwpdata == NULL &&
200 (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) {
201 mutex_enter(&p->p_lock);
202 mutex_enter(&p->p_zone->zone_nlwps_lock);
203 p->p_task->tk_nlwps--;
204 p->p_task->tk_proj->kpj_nlwps--;
205 p->p_zone->zone_nlwps--;
206 mutex_exit(&p->p_zone->zone_nlwps_lock);
207 mutex_exit(&p->p_lock);
208 atomic_inc_32(&p->p_zone->zone_ffnomem);
209 return (NULL);
211 } else {
212 stksize = roundup(stksize, PAGESIZE);
213 if ((lwpdata = (caddr_t)segkp_get(segkp, stksize,
214 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) {
215 mutex_enter(&p->p_lock);
216 mutex_enter(&p->p_zone->zone_nlwps_lock);
217 p->p_task->tk_nlwps--;
218 p->p_task->tk_proj->kpj_nlwps--;
219 p->p_zone->zone_nlwps--;
220 mutex_exit(&p->p_zone->zone_nlwps_lock);
221 mutex_exit(&p->p_lock);
222 atomic_inc_32(&p->p_zone->zone_ffnomem);
223 return (NULL);
228 * Create a thread, initializing the stack pointer
230 t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri);
233 * If a non-NULL stack base is passed in, thread_create() assumes
234 * that the stack might be statically allocated (as opposed to being
235 * allocated from segkp), and so it does not set t_swap. Since
236 * the lwpdata was allocated from segkp, we must set t_swap to point
237 * to it ourselves.
239 * This would be less confusing if t_swap had a better name; it really
240 * indicates that the stack is allocated from segkp, regardless of
241 * whether or not it is swappable.
243 if (lwpdata != NULL) {
244 ASSERT(!CLASS_KERNEL(cid));
245 ASSERT(t->t_swap == NULL);
246 t->t_swap = lwpdata; /* Start of page-able data */
250 * If the stack and lwp can be reused, mark the thread as such.
251 * When we get to reapq_add() from resume_from_zombie(), these
252 * threads will go onto lwp_deathrow instead of thread_deathrow.
254 if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize)
255 t->t_flag |= T_LWPREUSE;
257 if (lwp == NULL)
258 lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP);
259 bzero(lwp, sizeof (*lwp));
260 t->t_lwp = lwp;
262 t->t_hold = *smask;
263 lwp->lwp_thread = t;
264 lwp->lwp_procp = p;
265 lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
266 if (curlwp != NULL && curlwp->lwp_childstksz != 0)
267 lwp->lwp_childstksz = curlwp->lwp_childstksz;
269 t->t_stk = lwp_stk_init(lwp, t->t_stk);
270 thread_load(t, proc, arg, len);
273 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect.
275 if (p->p_rprof_cyclic != CYCLIC_NONE)
276 t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP);
278 if (cid != NOCLASS)
279 (void) CL_ALLOC(&bufp, cid, KM_SLEEP);
282 * Allocate an lwp directory entry for the new lwp.
284 lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
286 mutex_enter(&p->p_lock);
287 grow:
289 * Grow the lwp (thread) directory and lwpid hash table if necessary.
290 * A note on the growth algorithm:
291 * The new lwp directory size is computed as:
292 * new = 2 * old + 2
293 * Starting with an initial size of 2 (see exec_common()),
294 * this yields numbers that are a power of two minus 2:
295 * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ...
296 * The size of the lwpid hash table must be a power of two
297 * and must be commensurate in size with the lwp directory
298 * so that hash bucket chains remain short. Therefore,
299 * the lwpid hash table size is computed as:
300 * hashsz = (dirsz + 2) / 2
301 * which leads to these hash table sizes corresponding to
302 * the above directory sizes:
303 * 2, 4, 8, 16, 32, 64, 128, 256, 512, ...
304 * A note on growing the hash table:
305 * For performance reasons, code in lwp_unpark() does not
306 * acquire curproc->p_lock when searching the hash table.
307 * Rather, it calls lwp_hash_lookup_and_lock() which
308 * acquires only the individual hash bucket lock, taking
309 * care to deal with reallocation of the hash table
310 * during the time it takes to acquire the lock.
312 * This is sufficient to protect the integrity of the
313 * hash table, but it requires us to acquire all of the
314 * old hash bucket locks before growing the hash table
315 * and to release them afterwards. It also requires us
316 * not to free the old hash table because some thread
317 * in lwp_hash_lookup_and_lock() might still be trying
318 * to acquire the old bucket lock.
320 * So we adopt the tactic of keeping all of the retired
321 * hash tables on a linked list, so they can be safely
322 * freed when the process exits or execs.
324 * Because the hash table grows in powers of two, the
325 * total size of all of the hash tables will be slightly
326 * less than twice the size of the largest hash table.
328 while (p->p_lwpfree == NULL) {
329 uint_t dirsz = p->p_lwpdir_sz;
330 lwpdir_t *new_dir;
331 uint_t new_dirsz;
332 lwpdir_t *ldp;
333 tidhash_t *new_hash;
334 uint_t new_hashsz;
336 mutex_exit(&p->p_lock);
339 * Prepare to remember the old p_tidhash for later
340 * kmem_free()ing when the process exits or execs.
342 if (ret_tidhash == NULL)
343 ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t),
344 KM_SLEEP);
345 if (old_dir != NULL)
346 kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
347 if (old_hash != NULL)
348 kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
350 new_dirsz = 2 * dirsz + 2;
351 new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP);
352 for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++)
353 ldp->ld_next = ldp + 1;
354 new_hashsz = (new_dirsz + 2) / 2;
355 new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t),
356 KM_SLEEP);
358 mutex_enter(&p->p_lock);
359 if (p == curproc)
360 prbarrier(p);
362 if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) {
364 * Someone else beat us to it or some lwp exited.
365 * Set up to free our memory and take a lap.
367 old_dir = new_dir;
368 old_dirsz = new_dirsz;
369 old_hash = new_hash;
370 old_hashsz = new_hashsz;
371 } else {
373 * For the benefit of lwp_hash_lookup_and_lock(),
374 * called from lwp_unpark(), which searches the
375 * tid hash table without acquiring p->p_lock,
376 * we must acquire all of the tid hash table
377 * locks before replacing p->p_tidhash.
379 old_hash = p->p_tidhash;
380 old_hashsz = p->p_tidhash_sz;
381 for (i = 0; i < old_hashsz; i++) {
382 mutex_enter(&old_hash[i].th_lock);
383 mutex_enter(&new_hash[i].th_lock);
387 * We simply hash in all of the old directory entries.
388 * This works because the old directory has no empty
389 * slots and the new hash table starts out empty.
390 * This reproduces the original directory ordering
391 * (required for /proc directory semantics).
393 old_dir = p->p_lwpdir;
394 old_dirsz = p->p_lwpdir_sz;
395 p->p_lwpdir = new_dir;
396 p->p_lwpfree = new_dir;
397 p->p_lwpdir_sz = new_dirsz;
398 for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++)
399 lwp_hash_in(p, ldp->ld_entry,
400 new_hash, new_hashsz, 0);
403 * Remember the old hash table along with all
404 * of the previously-remembered hash tables.
405 * We will free them at process exit or exec.
407 ret_tidhash->rth_tidhash = old_hash;
408 ret_tidhash->rth_tidhash_sz = old_hashsz;
409 ret_tidhash->rth_next = p->p_ret_tidhash;
410 p->p_ret_tidhash = ret_tidhash;
413 * Now establish the new tid hash table.
414 * As soon as we assign p->p_tidhash,
415 * code in lwp_unpark() can start using it.
417 membar_producer();
418 p->p_tidhash = new_hash;
421 * It is necessary that p_tidhash reach global
422 * visibility before p_tidhash_sz. Otherwise,
423 * code in lwp_hash_lookup_and_lock() could
424 * index into the old p_tidhash using the new
425 * p_tidhash_sz and thereby access invalid data.
427 membar_producer();
428 p->p_tidhash_sz = new_hashsz;
431 * Release the locks; allow lwp_unpark() to carry on.
433 for (i = 0; i < old_hashsz; i++) {
434 mutex_exit(&old_hash[i].th_lock);
435 mutex_exit(&new_hash[i].th_lock);
439 * Avoid freeing these objects below.
441 ret_tidhash = NULL;
442 old_hash = NULL;
443 old_hashsz = 0;
448 * Block the process against /proc while we manipulate p->p_tlist,
449 * unless lwp_create() was called by /proc for the PCAGENT operation.
450 * We want to do this early enough so that we don't drop p->p_lock
451 * until the thread is put on the p->p_tlist.
453 if (p == curproc) {
454 prbarrier(p);
456 * If the current lwp has been requested to stop, do so now.
457 * Otherwise we have a race condition between /proc attempting
458 * to stop the process and this thread creating a new lwp
459 * that was not seen when the /proc PCSTOP request was issued.
460 * We rely on stop() to call prbarrier(p) before returning.
462 while ((curthread->t_proc_flag & TP_PRSTOP) &&
463 !ttolwp(curthread)->lwp_nostop) {
465 * We called pool_barrier_enter() before calling
466 * here to lwp_create(). We have to call
467 * pool_barrier_exit() before stopping.
469 pool_barrier_exit();
470 prbarrier(p);
471 stop(PR_REQUESTED, 0);
473 * And we have to repeat the call to
474 * pool_barrier_enter after stopping.
476 pool_barrier_enter();
477 prbarrier(p);
481 * If process is exiting, there could be a race between
482 * the agent lwp creation and the new lwp currently being
483 * created. So to prevent this race lwp creation is failed
484 * if the process is exiting.
486 if (p->p_flag & (SEXITLWPS|SKILLED)) {
487 err = 1;
488 goto error;
492 * Since we might have dropped p->p_lock, the
493 * lwp directory free list might have changed.
495 if (p->p_lwpfree == NULL)
496 goto grow;
499 kpreempt_disable(); /* can't grab cpu_lock here */
502 * Inherit processor and processor set bindings from curthread.
504 * For kernel LWPs, we do not inherit processor set bindings at
505 * process creation time (i.e. when p != curproc). After the
506 * kernel process is created, any subsequent LWPs must be created
507 * by threads in the kernel process, at which point we *will*
508 * inherit processor set bindings.
510 if (CLASS_KERNEL(cid) && p != curproc) {
511 t->t_bind_cpu = binding = PBIND_NONE;
512 t->t_cpupart = oldpart = &cp_default;
513 t->t_bind_pset = PS_NONE;
514 t->t_bindflag = (uchar_t)default_binding_mode;
515 } else {
516 binding = curthread->t_bind_cpu;
517 t->t_bind_cpu = binding;
518 oldpart = t->t_cpupart;
519 t->t_cpupart = curthread->t_cpupart;
520 t->t_bind_pset = curthread->t_bind_pset;
521 t->t_bindflag = curthread->t_bindflag |
522 (uchar_t)default_binding_mode;
526 * thread_create() initializes this thread's home lgroup to the root.
527 * Choose a more suitable lgroup, since this thread is associated
528 * with an lwp.
530 ASSERT(oldpart != NULL);
531 if (binding != PBIND_NONE && t->t_affinitycnt == 0) {
532 t->t_bound_cpu = cpu[binding];
533 if (t->t_lpl != t->t_bound_cpu->cpu_lpl)
534 lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1);
535 } else if (CLASS_KERNEL(cid)) {
537 * Kernel threads are always in the root lgrp.
539 lgrp_move_thread(t,
540 &t->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1);
541 } else {
542 lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1);
545 kpreempt_enable();
548 * make sure lpl points to our own partition
550 ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads);
551 ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads +
552 t->t_cpupart->cp_nlgrploads);
555 * It is safe to point the thread to the new project without holding it
556 * since we're holding the target process' p_lock here and therefore
557 * we're guaranteed that it will not move to another project.
559 newkpj = p->p_task->tk_proj;
560 oldkpj = ttoproj(t);
561 if (newkpj != oldkpj) {
562 t->t_proj = newkpj;
563 (void) project_hold(newkpj);
564 project_rele(oldkpj);
567 if (cid != NOCLASS) {
569 * If the lwp is being created in the current process
570 * and matches the current thread's scheduling class,
571 * we should propagate the current thread's scheduling
572 * parameters by calling CL_FORK. Otherwise just use
573 * the defaults by calling CL_ENTERCLASS.
575 if (p != curproc || curthread->t_cid != cid) {
576 err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp);
577 t->t_pri = pri; /* CL_ENTERCLASS may have changed it */
579 * We don't call schedctl_set_cidpri(t) here
580 * because the schedctl data is not yet set
581 * up for the newly-created lwp.
583 } else {
584 t->t_clfuncs = &(sclass[cid].cl_funcs->thread);
585 err = CL_FORK(curthread, t, bufp);
586 t->t_cid = cid;
588 if (err) {
589 atomic_inc_32(&p->p_zone->zone_ffmisc);
590 goto error;
591 } else {
592 bufp = NULL;
597 * If we were given an lwpid then use it, else allocate one.
599 if (lwpid != 0)
600 t->t_tid = lwpid;
601 else {
603 * lwp/thread id 0 is never valid; reserved for special checks.
604 * lwp/thread id 1 is reserved for the main thread.
605 * Start again at 2 when INT_MAX has been reached
606 * (id_t is a signed 32-bit integer).
608 id_t prev_id = p->p_lwpid; /* last allocated tid */
610 do { /* avoid lwpid duplication */
611 if (p->p_lwpid == INT_MAX) {
612 p->p_flag |= SLWPWRAP;
613 p->p_lwpid = 1;
615 if ((t->t_tid = ++p->p_lwpid) == prev_id) {
617 * All lwpids are allocated; fail the request.
619 err = 1;
620 atomic_inc_32(&p->p_zone->zone_ffnoproc);
621 goto error;
624 * We only need to worry about colliding with an id
625 * that's already in use if this process has
626 * cycled through all available lwp ids.
628 if ((p->p_flag & SLWPWRAP) == 0)
629 break;
630 } while (lwp_hash_lookup(p, t->t_tid) != NULL);
634 * If this is a branded process, let the brand do any necessary lwp
635 * initialization.
637 if (PROC_IS_BRANDED(p)) {
638 if (BROP(p)->b_initlwp(lwp)) {
639 err = 1;
640 atomic_inc_32(&p->p_zone->zone_ffmisc);
641 goto error;
643 branded = 1;
646 if (t->t_tid == 1) {
647 kpreempt_disable();
648 ASSERT(t->t_lpl != NULL);
649 p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid;
650 kpreempt_enable();
651 if (p->p_tr_lgrpid != LGRP_NONE &&
652 p->p_tr_lgrpid != p->p_t1_lgrpid) {
653 lgrp_update_trthr_migrations(1);
657 p->p_lwpcnt++;
658 t->t_waitfor = -1;
661 * Turn microstate accounting on for thread if on for process.
663 if (p->p_flag & SMSACCT)
664 t->t_proc_flag |= TP_MSACCT;
667 * If the process has watchpoints, mark the new thread as such.
669 if (pr_watch_active(p))
670 watch_enable(t);
673 * The lwp is being created in the stopped state.
674 * We set all the necessary flags to indicate that fact here.
675 * We omit the TS_CREATE flag from t_schedflag so that the lwp
676 * cannot be set running until the caller is finished with it,
677 * even if lwp_continue() is called on it after we drop p->p_lock.
678 * When the caller is finished with the newly-created lwp,
679 * the caller must call lwp_create_done() to allow the lwp
680 * to be set running. If the TP_HOLDLWP is left set, the
681 * lwp will suspend itself after reaching system call exit.
683 init_mstate(t, LMS_STOPPED);
684 t->t_proc_flag |= TP_HOLDLWP;
685 t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE));
686 t->t_whystop = PR_SUSPENDED;
687 t->t_whatstop = SUSPEND_NORMAL;
688 t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */
691 * Set system call processing flags in case tracing or profiling
692 * is set. The first system call will evaluate these and turn
693 * them off if they aren't needed.
695 t->t_pre_sys = 1;
696 t->t_post_sys = 1;
699 * Insert the new thread into the list of all threads.
701 if ((tx = p->p_tlist) == NULL) {
702 t->t_back = t;
703 t->t_forw = t;
704 p->p_tlist = t;
705 } else {
706 t->t_forw = tx;
707 t->t_back = tx->t_back;
708 tx->t_back->t_forw = t;
709 tx->t_back = t;
713 * Insert the new lwp into an lwp directory slot position
714 * and into the lwpid hash table.
716 lep->le_thread = t;
717 lep->le_lwpid = t->t_tid;
718 lep->le_start = t->t_start;
719 lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1);
721 if (state == TS_RUN) {
723 * We set the new lwp running immediately.
725 t->t_proc_flag &= ~TP_HOLDLWP;
726 lwp_create_done(t);
729 error:
730 if (err) {
731 if (CLASS_KERNEL(cid)) {
733 * This should only happen if a system process runs
734 * out of lwpids, which shouldn't occur.
736 panic("Failed to create a system LWP");
739 * We have failed to create an lwp, so decrement the number
740 * of lwps in the task and let the lgroup load averages know
741 * that this thread isn't going to show up.
743 kpreempt_disable();
744 lgrp_move_thread(t, NULL, 1);
745 kpreempt_enable();
747 ASSERT(MUTEX_HELD(&p->p_lock));
748 mutex_enter(&p->p_zone->zone_nlwps_lock);
749 p->p_task->tk_nlwps--;
750 p->p_task->tk_proj->kpj_nlwps--;
751 p->p_zone->zone_nlwps--;
752 mutex_exit(&p->p_zone->zone_nlwps_lock);
753 if (cid != NOCLASS && bufp != NULL)
754 CL_FREE(cid, bufp);
756 if (branded)
757 BROP(p)->b_freelwp(lwp);
759 mutex_exit(&p->p_lock);
760 t->t_state = TS_FREE;
761 thread_rele(t);
764 * We need to remove t from the list of all threads
765 * because thread_exit()/lwp_exit() isn't called on t.
767 mutex_enter(&pidlock);
768 ASSERT(t != t->t_next); /* t0 never exits */
769 t->t_next->t_prev = t->t_prev;
770 t->t_prev->t_next = t->t_next;
771 mutex_exit(&pidlock);
773 thread_free(t);
774 kmem_free(lep, sizeof (*lep));
775 lwp = NULL;
776 } else {
777 mutex_exit(&p->p_lock);
780 if (old_dir != NULL)
781 kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
782 if (old_hash != NULL)
783 kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
784 if (ret_tidhash != NULL)
785 kmem_free(ret_tidhash, sizeof (ret_tidhash_t));
787 DTRACE_PROC1(lwp__create, kthread_t *, t);
788 return (lwp);
792 * lwp_create_done() is called by the caller of lwp_create() to set the
793 * newly-created lwp running after the caller has finished manipulating it.
795 void
796 lwp_create_done(kthread_t *t)
798 proc_t *p = ttoproc(t);
800 ASSERT(MUTEX_HELD(&p->p_lock));
803 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked().
804 * (The absence of the TS_CREATE flag prevents the lwp from running
805 * until we are finished with it, even if lwp_continue() is called on
806 * it by some other lwp in the process or elsewhere in the kernel.)
808 thread_lock(t);
809 ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE));
811 * If TS_CSTART is set, lwp_continue(t) has been called and
812 * has already incremented p_lwprcnt; avoid doing this twice.
814 if (!(t->t_schedflag & TS_CSTART))
815 p->p_lwprcnt++;
816 t->t_schedflag |= (TS_CSTART | TS_CREATE);
817 setrun_locked(t);
818 thread_unlock(t);
822 * Copy an LWP's active templates, and clear the latest contracts.
824 void
825 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src)
827 int i;
829 for (i = 0; i < ct_ntypes; i++) {
830 dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]);
831 dst->lwp_ct_latest[i] = NULL;
836 * Clear an LWP's contract template state.
838 void
839 lwp_ctmpl_clear(klwp_t *lwp)
841 ct_template_t *tmpl;
842 int i;
844 for (i = 0; i < ct_ntypes; i++) {
845 if ((tmpl = lwp->lwp_ct_active[i]) != NULL) {
846 ctmpl_free(tmpl);
847 lwp->lwp_ct_active[i] = NULL;
850 if (lwp->lwp_ct_latest[i] != NULL) {
851 contract_rele(lwp->lwp_ct_latest[i]);
852 lwp->lwp_ct_latest[i] = NULL;
858 * Individual lwp exit.
859 * If this is the last lwp, exit the whole process.
861 void
862 lwp_exit(void)
864 kthread_t *t = curthread;
865 klwp_t *lwp = ttolwp(t);
866 proc_t *p = ttoproc(t);
868 ASSERT(MUTEX_HELD(&p->p_lock));
870 mutex_exit(&p->p_lock);
873 tsd_exit(); /* free thread specific data */
875 kcpc_passivate(); /* Clean up performance counter state */
877 pollcleanup();
879 if (t->t_door)
880 door_slam();
882 if (t->t_schedctl != NULL)
883 schedctl_lwp_cleanup(t);
885 if (t->t_upimutex != NULL)
886 upimutex_cleanup();
889 * Perform any brand specific exit processing, then release any
890 * brand data associated with the lwp
892 if (PROC_IS_BRANDED(p))
893 BROP(p)->b_lwpexit(lwp);
895 lwp_pcb_exit();
897 mutex_enter(&p->p_lock);
898 lwp_cleanup();
901 * When this process is dumping core, its lwps are held here
902 * until the core dump is finished. Then exitlwps() is called
903 * again to release these lwps so that they can finish exiting.
905 if (p->p_flag & SCOREDUMP)
906 stop(PR_SUSPENDED, SUSPEND_NORMAL);
909 * Block the process against /proc now that we have really acquired
910 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least).
912 prbarrier(p);
915 * Call proc_exit() if this is the last non-daemon lwp in the process.
917 if (!(t->t_proc_flag & TP_DAEMON) &&
918 p->p_lwpcnt == p->p_lwpdaemon + 1) {
919 mutex_exit(&p->p_lock);
920 if (proc_exit(CLD_EXITED, 0) == 0) {
921 /* Restarting init. */
922 return;
926 * proc_exit() returns a non-zero value when some other
927 * lwp got there first. We just have to continue in
928 * lwp_exit().
930 mutex_enter(&p->p_lock);
931 ASSERT(curproc->p_flag & SEXITLWPS);
932 prbarrier(p);
935 DTRACE_PROC(lwp__exit);
938 * If the lwp is a detached lwp or if the process is exiting,
939 * remove (lwp_hash_out()) the lwp from the lwp directory.
940 * Otherwise null out the lwp's le_thread pointer in the lwp
941 * directory so that other threads will see it as a zombie lwp.
943 prlwpexit(t); /* notify /proc */
944 if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS))
945 lwp_hash_out(p, t->t_tid);
946 else {
947 ASSERT(!(t->t_proc_flag & TP_DAEMON));
948 p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL;
949 p->p_zombcnt++;
950 cv_broadcast(&p->p_lwpexit);
952 if (t->t_proc_flag & TP_DAEMON) {
953 p->p_lwpdaemon--;
954 t->t_proc_flag &= ~TP_DAEMON;
956 t->t_proc_flag &= ~TP_TWAIT;
959 * Maintain accurate lwp count for task.max-lwps resource control.
961 mutex_enter(&p->p_zone->zone_nlwps_lock);
962 p->p_task->tk_nlwps--;
963 p->p_task->tk_proj->kpj_nlwps--;
964 p->p_zone->zone_nlwps--;
965 mutex_exit(&p->p_zone->zone_nlwps_lock);
967 CL_EXIT(t); /* tell the scheduler that t is exiting */
968 ASSERT(p->p_lwpcnt != 0);
969 p->p_lwpcnt--;
972 * If all remaining non-daemon lwps are waiting in lwp_wait(),
973 * wake them up so someone can return EDEADLK.
974 * (See the block comment preceeding lwp_wait().)
976 if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait))
977 cv_broadcast(&p->p_lwpexit);
979 t->t_proc_flag |= TP_LWPEXIT;
980 term_mstate(t);
982 #ifndef NPROBE
983 /* Kernel probe */
984 if (t->t_tnf_tpdp)
985 tnf_thread_exit();
986 #endif /* NPROBE */
988 t->t_forw->t_back = t->t_back;
989 t->t_back->t_forw = t->t_forw;
990 if (t == p->p_tlist)
991 p->p_tlist = t->t_forw;
994 * Clean up the signal state.
996 if (t->t_sigqueue != NULL)
997 sigdelq(p, t, 0);
998 if (lwp->lwp_curinfo != NULL) {
999 siginfofree(lwp->lwp_curinfo);
1000 lwp->lwp_curinfo = NULL;
1004 * If we have spymaster information (that is, if we're an agent LWP),
1005 * free that now.
1007 if (lwp->lwp_spymaster != NULL) {
1008 kmem_free(lwp->lwp_spymaster, sizeof (psinfo_t));
1009 lwp->lwp_spymaster = NULL;
1012 thread_rele(t);
1015 * Terminated lwps are associated with process zero and are put onto
1016 * death-row by resume(). Avoid preemption after resetting t->t_procp.
1018 t->t_preempt++;
1020 if (t->t_ctx != NULL)
1021 exitctx(t);
1022 if (p->p_pctx != NULL)
1023 exitpctx(p);
1025 t->t_procp = &p0;
1028 * Notify the HAT about the change of address space
1030 hat_thread_exit(t);
1032 * When this is the last running lwp in this process and some lwp is
1033 * waiting for this condition to become true, or this thread was being
1034 * suspended, then the waiting lwp is awakened.
1036 * Also, if the process is exiting, we may have a thread waiting in
1037 * exitlwps() that needs to be notified.
1039 if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) ||
1040 (p->p_flag & SEXITLWPS))
1041 cv_broadcast(&p->p_holdlwps);
1044 * Need to drop p_lock so we can reacquire pidlock.
1046 mutex_exit(&p->p_lock);
1047 mutex_enter(&pidlock);
1049 ASSERT(t != t->t_next); /* t0 never exits */
1050 t->t_next->t_prev = t->t_prev;
1051 t->t_prev->t_next = t->t_next;
1052 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */
1053 mutex_exit(&pidlock);
1055 t->t_state = TS_ZOMB;
1056 swtch_from_zombie();
1057 /* never returns */
1062 * Cleanup function for an exiting lwp.
1063 * Called both from lwp_exit() and from proc_exit().
1064 * p->p_lock is repeatedly released and grabbed in this function.
1066 void
1067 lwp_cleanup(void)
1069 kthread_t *t = curthread;
1070 proc_t *p = ttoproc(t);
1072 ASSERT(MUTEX_HELD(&p->p_lock));
1074 /* untimeout any lwp-bound realtime timers */
1075 if (p->p_itimer != NULL)
1076 timer_lwpexit();
1079 * If this is the /proc agent lwp that is exiting, readjust p_lwpid
1080 * so it appears that the agent never existed, and clear p_agenttp.
1082 if (t == p->p_agenttp) {
1083 ASSERT(t->t_tid == p->p_lwpid);
1084 p->p_lwpid--;
1085 p->p_agenttp = NULL;
1089 * Do lgroup bookkeeping to account for thread exiting.
1091 kpreempt_disable();
1092 lgrp_move_thread(t, NULL, 1);
1093 if (t->t_tid == 1) {
1094 p->p_t1_lgrpid = LGRP_NONE;
1096 kpreempt_enable();
1098 lwp_ctmpl_clear(ttolwp(t));
1102 lwp_suspend(kthread_t *t)
1104 int tid;
1105 proc_t *p = ttoproc(t);
1107 ASSERT(MUTEX_HELD(&p->p_lock));
1110 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp().
1111 * If an lwp is stopping itself, there is no need to wait.
1113 top:
1114 t->t_proc_flag |= TP_HOLDLWP;
1115 if (t == curthread) {
1116 t->t_sig_check = 1;
1117 } else {
1119 * Make sure the lwp stops promptly.
1121 thread_lock(t);
1122 t->t_sig_check = 1;
1124 * XXX Should use virtual stop like /proc does instead of
1125 * XXX waking the thread to get it to stop.
1127 if (ISWAKEABLE(t) || ISWAITING(t)) {
1128 setrun_locked(t);
1129 } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) {
1130 poke_cpu(t->t_cpu->cpu_id);
1133 tid = t->t_tid; /* remember thread ID */
1135 * Wait for lwp to stop
1137 while (!SUSPENDED(t)) {
1139 * Drop the thread lock before waiting and reacquire it
1140 * afterwards, so the thread can change its t_state
1141 * field.
1143 thread_unlock(t);
1146 * Check if aborted by exitlwps().
1148 if (p->p_flag & SEXITLWPS)
1149 lwp_exit();
1152 * Cooperate with jobcontrol signals and /proc stopping
1153 * by calling cv_wait_sig() to wait for the target
1154 * lwp to stop. Just using cv_wait() can lead to
1155 * deadlock because, if some other lwp has stopped
1156 * by either of these mechanisms, then p_lwprcnt will
1157 * never become zero if we do a cv_wait().
1159 if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock))
1160 return (EINTR);
1163 * Check to see if thread died while we were
1164 * waiting for it to suspend.
1166 if (idtot(p, tid) == NULL)
1167 return (ESRCH);
1169 thread_lock(t);
1171 * If the TP_HOLDLWP flag went away, lwp_continue()
1172 * or vfork() must have been called while we were
1173 * waiting, so start over again.
1175 if ((t->t_proc_flag & TP_HOLDLWP) == 0) {
1176 thread_unlock(t);
1177 goto top;
1180 thread_unlock(t);
1182 return (0);
1186 * continue a lwp that's been stopped by lwp_suspend().
1188 void
1189 lwp_continue(kthread_t *t)
1191 proc_t *p = ttoproc(t);
1192 int was_suspended = t->t_proc_flag & TP_HOLDLWP;
1194 ASSERT(MUTEX_HELD(&p->p_lock));
1196 t->t_proc_flag &= ~TP_HOLDLWP;
1197 thread_lock(t);
1198 if (SUSPENDED(t) &&
1199 !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) {
1200 p->p_lwprcnt++;
1201 t->t_schedflag |= TS_CSTART;
1202 setrun_locked(t);
1204 thread_unlock(t);
1206 * Wakeup anyone waiting for this thread to be suspended
1208 if (was_suspended)
1209 cv_broadcast(&p->p_holdlwps);
1213 * ********************************
1214 * Miscellaneous lwp routines *
1215 * ********************************
1218 * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK.
1219 * This will cause the process's lwps to stop at a hold point. A hold
1220 * point is where a kernel thread has a flat stack. This is at the
1221 * return from a system call and at the return from a user level trap.
1223 * When a process is undergoing a fork1() or vfork(), its p_flag is set to
1224 * SHOLDFORK1. This will cause the process's lwps to stop at a modified
1225 * hold point. The lwps in the process are not being cloned, so they
1226 * are held at the usual hold points and also within issig_forreal().
1227 * This has the side-effect that their system calls do not return
1228 * showing EINTR.
1230 * An lwp can also be held. This is identified by the TP_HOLDLWP flag on
1231 * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active
1232 * lwp is waiting for the target lwp to be stopped.
1234 void
1235 holdlwp(void)
1237 proc_t *p = curproc;
1238 kthread_t *t = curthread;
1240 mutex_enter(&p->p_lock);
1242 * Don't terminate immediately if the process is dumping core.
1243 * Once the process has dumped core, all lwps are terminated.
1245 if (!(p->p_flag & SCOREDUMP)) {
1246 if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP))
1247 lwp_exit();
1249 if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) {
1250 mutex_exit(&p->p_lock);
1251 return;
1254 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps
1255 * when p->p_lwprcnt becomes zero.
1257 stop(PR_SUSPENDED, SUSPEND_NORMAL);
1258 if (p->p_flag & SEXITLWPS)
1259 lwp_exit();
1260 mutex_exit(&p->p_lock);
1264 * Have all lwps within the process hold at a point where they are
1265 * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1).
1268 holdlwps(int holdflag)
1270 proc_t *p = curproc;
1272 ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1);
1273 mutex_enter(&p->p_lock);
1274 schedctl_finish_sigblock(curthread);
1275 again:
1276 while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) {
1278 * If another lwp is doing a forkall() or proc_exit(), bail out.
1280 if (p->p_flag & (SEXITLWPS | SHOLDFORK)) {
1281 mutex_exit(&p->p_lock);
1282 return (0);
1285 * Another lwp is doing a fork1() or is undergoing
1286 * watchpoint activity. We hold here for it to complete.
1288 stop(PR_SUSPENDED, SUSPEND_NORMAL);
1290 p->p_flag |= holdflag;
1291 pokelwps(p);
1292 --p->p_lwprcnt;
1294 * Wait for the process to become quiescent (p->p_lwprcnt == 0).
1296 while (p->p_lwprcnt > 0) {
1298 * Check if aborted by exitlwps().
1299 * Also check if SHOLDWATCH is set; it takes precedence.
1301 if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) {
1302 p->p_lwprcnt++;
1303 p->p_flag &= ~holdflag;
1304 cv_broadcast(&p->p_holdlwps);
1305 goto again;
1308 * Cooperate with jobcontrol signals and /proc stopping.
1309 * If some other lwp has stopped by either of these
1310 * mechanisms, then p_lwprcnt will never become zero
1311 * and the process will appear deadlocked unless we
1312 * stop here in sympathy with the other lwp before
1313 * doing the cv_wait() below.
1315 * If the other lwp stops after we do the cv_wait(), it
1316 * will wake us up to loop around and do the sympathy stop.
1318 * Since stop() drops p->p_lock, we must start from
1319 * the top again on returning from stop().
1321 if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) {
1322 int whystop = p->p_stopsig? PR_JOBCONTROL :
1323 PR_REQUESTED;
1324 p->p_lwprcnt++;
1325 p->p_flag &= ~holdflag;
1326 stop(whystop, p->p_stopsig);
1327 goto again;
1329 cv_wait(&p->p_holdlwps, &p->p_lock);
1331 p->p_lwprcnt++;
1332 p->p_flag &= ~holdflag;
1333 mutex_exit(&p->p_lock);
1334 return (1);
1338 * See comments for holdwatch(), below.
1340 static int
1341 holdcheck(int clearflags)
1343 proc_t *p = curproc;
1346 * If we are trying to exit, that takes precedence over anything else.
1348 if (p->p_flag & SEXITLWPS) {
1349 p->p_lwprcnt++;
1350 p->p_flag &= ~clearflags;
1351 lwp_exit();
1355 * If another thread is calling fork1(), stop the current thread so the
1356 * other can complete.
1358 if (p->p_flag & SHOLDFORK1) {
1359 p->p_lwprcnt++;
1360 stop(PR_SUSPENDED, SUSPEND_NORMAL);
1361 if (p->p_flag & SEXITLWPS) {
1362 p->p_flag &= ~clearflags;
1363 lwp_exit();
1365 return (-1);
1369 * If another thread is calling fork(), then indicate we are doing
1370 * watchpoint activity. This will cause holdlwps() above to stop the
1371 * forking thread, at which point we can continue with watchpoint
1372 * activity.
1374 if (p->p_flag & SHOLDFORK) {
1375 p->p_lwprcnt++;
1376 while (p->p_flag & SHOLDFORK) {
1377 p->p_flag |= SHOLDWATCH;
1378 cv_broadcast(&p->p_holdlwps);
1379 cv_wait(&p->p_holdlwps, &p->p_lock);
1380 p->p_flag &= ~SHOLDWATCH;
1382 return (-1);
1385 return (0);
1389 * Stop all lwps within the process, holding themselves in the kernel while the
1390 * active lwp undergoes watchpoint activity. This is more complicated than
1391 * expected because stop() relies on calling holdwatch() in order to copyin data
1392 * from the user's address space. A double barrier is used to prevent an
1393 * infinite loop.
1395 * o The first thread into holdwatch() is the 'master' thread and does
1396 * the following:
1398 * - Sets SHOLDWATCH on the current process
1399 * - Sets TP_WATCHSTOP on the current thread
1400 * - Waits for all threads to be either stopped or have
1401 * TP_WATCHSTOP set.
1402 * - Sets the SWATCHOK flag on the process
1403 * - Unsets TP_WATCHSTOP
1404 * - Waits for the other threads to completely stop
1405 * - Unsets SWATCHOK
1407 * o If SHOLDWATCH is already set when we enter this function, then another
1408 * thread is already trying to stop this thread. This 'slave' thread
1409 * does the following:
1411 * - Sets TP_WATCHSTOP on the current thread
1412 * - Waits for SWATCHOK flag to be set
1413 * - Calls stop()
1415 * o If SWATCHOK is set on the process, then this function immediately
1416 * returns, as we must have been called via stop().
1418 * In addition, there are other flags that take precedence over SHOLDWATCH:
1420 * o If SEXITLWPS is set, exit immediately.
1422 * o If SHOLDFORK1 is set, wait for fork1() to complete.
1424 * o If SHOLDFORK is set, then watchpoint activity takes precedence In this
1425 * case, set SHOLDWATCH, signalling the forking thread to stop first.
1427 * o If the process is being stopped via /proc (TP_PRSTOP is set), then we
1428 * stop the current thread.
1430 * Returns 0 if all threads have been quiesced. Returns non-zero if not all
1431 * threads were stopped, or the list of watched pages has changed.
1434 holdwatch(void)
1436 proc_t *p = curproc;
1437 kthread_t *t = curthread;
1438 int ret = 0;
1440 mutex_enter(&p->p_lock);
1442 p->p_lwprcnt--;
1445 * Check for bail-out conditions as outlined above.
1447 if (holdcheck(0) != 0) {
1448 mutex_exit(&p->p_lock);
1449 return (-1);
1452 if (!(p->p_flag & SHOLDWATCH)) {
1454 * We are the master watchpoint thread. Set SHOLDWATCH and poke
1455 * the other threads.
1457 p->p_flag |= SHOLDWATCH;
1458 pokelwps(p);
1461 * Wait for all threads to be stopped or have TP_WATCHSTOP set.
1463 while (pr_allstopped(p, 1) > 0) {
1464 if (holdcheck(SHOLDWATCH) != 0) {
1465 p->p_flag &= ~SHOLDWATCH;
1466 mutex_exit(&p->p_lock);
1467 return (-1);
1470 cv_wait(&p->p_holdlwps, &p->p_lock);
1474 * All threads are now stopped or in the process of stopping.
1475 * Set SWATCHOK and let them stop completely.
1477 p->p_flag |= SWATCHOK;
1478 t->t_proc_flag &= ~TP_WATCHSTOP;
1479 cv_broadcast(&p->p_holdlwps);
1481 while (pr_allstopped(p, 0) > 0) {
1483 * At first glance, it may appear that we don't need a
1484 * call to holdcheck() here. But if the process gets a
1485 * SIGKILL signal, one of our stopped threads may have
1486 * been awakened and is waiting in exitlwps(), which
1487 * takes precedence over watchpoints.
1489 if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) {
1490 p->p_flag &= ~(SHOLDWATCH | SWATCHOK);
1491 mutex_exit(&p->p_lock);
1492 return (-1);
1495 cv_wait(&p->p_holdlwps, &p->p_lock);
1499 * All threads are now completely stopped.
1501 p->p_flag &= ~SWATCHOK;
1502 p->p_flag &= ~SHOLDWATCH;
1503 p->p_lwprcnt++;
1505 } else if (!(p->p_flag & SWATCHOK)) {
1508 * SHOLDWATCH is set, so another thread is trying to do
1509 * watchpoint activity. Indicate this thread is stopping, and
1510 * wait for the OK from the master thread.
1512 t->t_proc_flag |= TP_WATCHSTOP;
1513 cv_broadcast(&p->p_holdlwps);
1515 while (!(p->p_flag & SWATCHOK)) {
1516 if (holdcheck(0) != 0) {
1517 t->t_proc_flag &= ~TP_WATCHSTOP;
1518 mutex_exit(&p->p_lock);
1519 return (-1);
1522 cv_wait(&p->p_holdlwps, &p->p_lock);
1526 * Once the master thread has given the OK, this thread can
1527 * actually call stop().
1529 t->t_proc_flag &= ~TP_WATCHSTOP;
1530 p->p_lwprcnt++;
1532 stop(PR_SUSPENDED, SUSPEND_NORMAL);
1535 * It's not OK to do watchpoint activity, notify caller to
1536 * retry.
1538 ret = -1;
1540 } else {
1543 * The only way we can hit the case where SHOLDWATCH is set and
1544 * SWATCHOK is set is if we are triggering this from within a
1545 * stop() call. Assert that this is the case.
1548 ASSERT(t->t_proc_flag & TP_STOPPING);
1549 p->p_lwprcnt++;
1552 mutex_exit(&p->p_lock);
1554 return (ret);
1558 * force all interruptible lwps to trap into the kernel.
1560 void
1561 pokelwps(proc_t *p)
1563 kthread_t *t;
1565 ASSERT(MUTEX_HELD(&p->p_lock));
1567 t = p->p_tlist;
1568 do {
1569 if (t == curthread)
1570 continue;
1571 thread_lock(t);
1572 aston(t); /* make thread trap or do post_syscall */
1573 if (ISWAKEABLE(t) || ISWAITING(t)) {
1574 setrun_locked(t);
1575 } else if (t->t_state == TS_STOPPED) {
1577 * Ensure that proc_exit() is not blocked by lwps
1578 * that were stopped via jobcontrol or /proc.
1580 if (p->p_flag & SEXITLWPS) {
1581 p->p_stopsig = 0;
1582 t->t_schedflag |= (TS_XSTART | TS_PSTART);
1583 setrun_locked(t);
1586 * If we are holding lwps for a forkall(),
1587 * force lwps that have been suspended via
1588 * lwp_suspend() and are suspended inside
1589 * of a system call to proceed to their
1590 * holdlwp() points where they are clonable.
1592 if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) {
1593 if ((t->t_schedflag & TS_CSTART) == 0) {
1594 p->p_lwprcnt++;
1595 t->t_schedflag |= TS_CSTART;
1596 setrun_locked(t);
1599 } else if (t->t_state == TS_ONPROC) {
1600 if (t->t_cpu != CPU)
1601 poke_cpu(t->t_cpu->cpu_id);
1603 thread_unlock(t);
1604 } while ((t = t->t_forw) != p->p_tlist);
1608 * undo the effects of holdlwps() or holdwatch().
1610 void
1611 continuelwps(proc_t *p)
1613 kthread_t *t;
1616 * If this flag is set, then the original holdwatch() didn't actually
1617 * stop the process. See comments for holdwatch().
1619 if (p->p_flag & SWATCHOK) {
1620 ASSERT(curthread->t_proc_flag & TP_STOPPING);
1621 return;
1624 ASSERT(MUTEX_HELD(&p->p_lock));
1625 ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0);
1627 t = p->p_tlist;
1628 do {
1629 thread_lock(t); /* SUSPENDED looks at t_schedflag */
1630 if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) {
1631 p->p_lwprcnt++;
1632 t->t_schedflag |= TS_CSTART;
1633 setrun_locked(t);
1635 thread_unlock(t);
1636 } while ((t = t->t_forw) != p->p_tlist);
1640 * Force all other LWPs in the current process other than the caller to exit,
1641 * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function
1642 * is typically used in these situations:
1644 * (a) prior to an exec() system call
1645 * (b) prior to dumping a core file
1646 * (c) prior to a uadmin() shutdown
1648 * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed.
1649 * Multiple threads in the process can call this function at one time by
1650 * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used
1651 * to declare one particular thread the winner who gets to kill the others.
1652 * If a thread wins the exitlwps() dance, zero is returned; otherwise an
1653 * appropriate errno value is returned to caller for its system call to return.
1656 exitlwps(int coredump)
1658 proc_t *p = curproc;
1659 int heldcnt;
1661 if (curthread->t_door)
1662 door_slam();
1663 if (p->p_door_list)
1664 door_revoke_all();
1665 if (curthread->t_schedctl != NULL)
1666 schedctl_lwp_cleanup(curthread);
1669 * Ensure that before starting to wait for other lwps to exit,
1670 * cleanup all upimutexes held by curthread. Otherwise, some other
1671 * lwp could be waiting (uninterruptibly) for a upimutex held by
1672 * curthread, and the call to pokelwps() below would deadlock.
1673 * Even if a blocked upimutex_lock is made interruptible,
1674 * curthread's upimutexes need to be unlocked: do it here.
1676 if (curthread->t_upimutex != NULL)
1677 upimutex_cleanup();
1680 * Grab p_lock in order to check and set SEXITLWPS to declare a winner.
1681 * We must also block any further /proc access from this point forward.
1683 mutex_enter(&p->p_lock);
1684 prbarrier(p);
1686 if (p->p_flag & SEXITLWPS) {
1687 mutex_exit(&p->p_lock);
1688 aston(curthread); /* force a trip through post_syscall */
1689 return (set_errno(EINTR));
1692 p->p_flag |= SEXITLWPS;
1693 if (coredump) /* tell other lwps to stop, not exit */
1694 p->p_flag |= SCOREDUMP;
1697 * Give precedence to exitlwps() if a holdlwps() is
1698 * in progress. The lwp doing the holdlwps() operation
1699 * is aborted when it is awakened.
1701 while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) {
1702 cv_broadcast(&p->p_holdlwps);
1703 cv_wait(&p->p_holdlwps, &p->p_lock);
1704 prbarrier(p);
1706 p->p_flag |= SHOLDFORK;
1707 pokelwps(p);
1710 * Wait for process to become quiescent.
1712 --p->p_lwprcnt;
1713 while (p->p_lwprcnt > 0) {
1714 cv_wait(&p->p_holdlwps, &p->p_lock);
1715 prbarrier(p);
1717 p->p_lwprcnt++;
1718 ASSERT(p->p_lwprcnt == 1);
1721 * The SCOREDUMP flag puts the process into a quiescent
1722 * state. The process's lwps remain attached to this
1723 * process until exitlwps() is called again without the
1724 * 'coredump' flag set, then the lwps are terminated
1725 * and the process can exit.
1727 if (coredump) {
1728 p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS);
1729 goto out;
1733 * Determine if there are any lwps left dangling in
1734 * the stopped state. This happens when exitlwps()
1735 * aborts a holdlwps() operation.
1737 p->p_flag &= ~SHOLDFORK;
1738 if ((heldcnt = p->p_lwpcnt) > 1) {
1739 kthread_t *t;
1740 for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) {
1741 t->t_proc_flag &= ~TP_TWAIT;
1742 lwp_continue(t);
1747 * Wait for all other lwps to exit.
1749 --p->p_lwprcnt;
1750 while (p->p_lwpcnt > 1) {
1751 cv_wait(&p->p_holdlwps, &p->p_lock);
1752 prbarrier(p);
1754 ++p->p_lwprcnt;
1755 ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1);
1757 p->p_flag &= ~SEXITLWPS;
1758 curthread->t_proc_flag &= ~TP_TWAIT;
1760 out:
1761 if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */
1762 lwpdir_t *ldp;
1763 lwpent_t *lep;
1764 int i;
1766 for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) {
1767 lep = ldp->ld_entry;
1768 if (lep != NULL && lep->le_thread != curthread) {
1769 ASSERT(lep->le_thread == NULL);
1770 p->p_zombcnt--;
1771 lwp_hash_out(p, lep->le_lwpid);
1774 ASSERT(p->p_zombcnt == 0);
1778 * If some other LWP in the process wanted us to suspend ourself,
1779 * then we will not do it. The other LWP is now terminated and
1780 * no one will ever continue us again if we suspend ourself.
1782 curthread->t_proc_flag &= ~TP_HOLDLWP;
1783 p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP);
1784 mutex_exit(&p->p_lock);
1785 return (0);
1789 * duplicate a lwp.
1791 klwp_t *
1792 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid)
1794 klwp_t *clwp;
1795 void *tregs, *tfpu;
1796 kthread_t *t = lwptot(lwp);
1797 kthread_t *ct;
1798 proc_t *p = lwptoproc(lwp);
1799 int cid;
1800 void *bufp;
1801 void *brand_data;
1802 int val;
1804 ASSERT(p == curproc);
1805 ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0));
1808 if (t == curthread)
1809 /* copy args out of registers first */
1810 (void) save_syscall_args();
1812 clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt,
1813 NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid);
1814 if (clwp == NULL)
1815 return (NULL);
1818 * most of the parent's lwp can be copied to its duplicate,
1819 * except for the fields that are unique to each lwp, like
1820 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap.
1822 ct = clwp->lwp_thread;
1823 tregs = clwp->lwp_regs;
1824 tfpu = clwp->lwp_fpu;
1825 brand_data = clwp->lwp_brand;
1828 * Copy parent lwp to child lwp. Hold child's p_lock to prevent
1829 * mstate_aggr_state() from reading stale mstate entries copied
1830 * from lwp to clwp.
1832 mutex_enter(&cp->p_lock);
1833 *clwp = *lwp;
1835 /* clear microstate and resource usage data in new lwp */
1836 init_mstate(ct, LMS_STOPPED);
1837 bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru));
1838 mutex_exit(&cp->p_lock);
1840 /* fix up child's lwp */
1842 clwp->lwp_pcb.pcb_flags = 0;
1843 clwp->lwp_cursig = 0;
1844 clwp->lwp_extsig = 0;
1845 clwp->lwp_curinfo = NULL;
1846 clwp->lwp_thread = ct;
1847 ct->t_sysnum = t->t_sysnum;
1848 clwp->lwp_regs = tregs;
1849 clwp->lwp_fpu = tfpu;
1850 clwp->lwp_brand = brand_data;
1851 clwp->lwp_ap = clwp->lwp_arg;
1852 clwp->lwp_procp = cp;
1853 bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer));
1854 clwp->lwp_lastfault = 0;
1855 clwp->lwp_lastfaddr = 0;
1857 /* copy parent's struct regs to child. */
1858 lwp_forkregs(lwp, clwp);
1861 * Fork thread context ops, if any.
1863 if (t->t_ctx)
1864 forkctx(t, ct);
1866 /* fix door state in the child */
1867 if (t->t_door)
1868 door_fork(t, ct);
1870 /* copy current contract templates, clear latest contracts */
1871 lwp_ctmpl_copy(clwp, lwp);
1873 mutex_enter(&cp->p_lock);
1874 /* lwp_create() set the TP_HOLDLWP flag */
1875 if (!(t->t_proc_flag & TP_HOLDLWP))
1876 ct->t_proc_flag &= ~TP_HOLDLWP;
1877 if (cp->p_flag & SMSACCT)
1878 ct->t_proc_flag |= TP_MSACCT;
1879 mutex_exit(&cp->p_lock);
1881 /* Allow brand to propagate brand-specific state */
1882 if (PROC_IS_BRANDED(p))
1883 BROP(p)->b_forklwp(lwp, clwp);
1885 retry:
1886 cid = t->t_cid;
1888 val = CL_ALLOC(&bufp, cid, KM_SLEEP);
1889 ASSERT(val == 0);
1891 mutex_enter(&p->p_lock);
1892 if (cid != t->t_cid) {
1894 * Someone just changed this thread's scheduling class,
1895 * so try pre-allocating the buffer again. Hopefully we
1896 * don't hit this often.
1898 mutex_exit(&p->p_lock);
1899 CL_FREE(cid, bufp);
1900 goto retry;
1903 ct->t_unpark = t->t_unpark;
1904 ct->t_clfuncs = t->t_clfuncs;
1905 CL_FORK(t, ct, bufp);
1906 ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */
1907 mutex_exit(&p->p_lock);
1909 return (clwp);
1913 * Add a new lwp entry to the lwp directory and to the lwpid hash table.
1915 void
1916 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz,
1917 int do_lock)
1919 tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)];
1920 lwpdir_t **ldpp;
1921 lwpdir_t *ldp;
1922 kthread_t *t;
1925 * Allocate a directory element from the free list.
1926 * Code elsewhere guarantees a free slot.
1928 ldp = p->p_lwpfree;
1929 p->p_lwpfree = ldp->ld_next;
1930 ASSERT(ldp->ld_entry == NULL);
1931 ldp->ld_entry = lep;
1933 if (do_lock)
1934 mutex_enter(&thp->th_lock);
1937 * Insert it into the lwpid hash table.
1939 ldpp = &thp->th_list;
1940 ldp->ld_next = *ldpp;
1941 *ldpp = ldp;
1944 * Set the active thread's directory slot entry.
1946 if ((t = lep->le_thread) != NULL) {
1947 ASSERT(lep->le_lwpid == t->t_tid);
1948 t->t_dslot = (int)(ldp - p->p_lwpdir);
1951 if (do_lock)
1952 mutex_exit(&thp->th_lock);
1956 * Remove an lwp from the lwpid hash table and free its directory entry.
1957 * This is done when a detached lwp exits in lwp_exit() or
1958 * when a non-detached lwp is waited for in lwp_wait() or
1959 * when a zombie lwp is detached in lwp_detach().
1961 void
1962 lwp_hash_out(proc_t *p, id_t lwpid)
1964 tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)];
1965 lwpdir_t **ldpp;
1966 lwpdir_t *ldp;
1967 lwpent_t *lep;
1969 mutex_enter(&thp->th_lock);
1970 for (ldpp = &thp->th_list;
1971 (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) {
1972 lep = ldp->ld_entry;
1973 if (lep->le_lwpid == lwpid) {
1974 prlwpfree(p, lep); /* /proc deals with le_trace */
1975 *ldpp = ldp->ld_next;
1976 ldp->ld_entry = NULL;
1977 ldp->ld_next = p->p_lwpfree;
1978 p->p_lwpfree = ldp;
1979 kmem_free(lep, sizeof (*lep));
1980 break;
1983 mutex_exit(&thp->th_lock);
1987 * Lookup an lwp in the lwpid hash table by lwpid.
1989 lwpdir_t *
1990 lwp_hash_lookup(proc_t *p, id_t lwpid)
1992 tidhash_t *thp;
1993 lwpdir_t *ldp;
1996 * The process may be exiting, after p_tidhash has been set to NULL in
1997 * proc_exit() but before prfee() has been called. Return failure in
1998 * this case.
2000 if (p->p_tidhash == NULL)
2001 return (NULL);
2003 thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)];
2004 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) {
2005 if (ldp->ld_entry->le_lwpid == lwpid)
2006 return (ldp);
2009 return (NULL);
2013 * Same as lwp_hash_lookup(), but acquire and return
2014 * the tid hash table entry lock on success.
2016 lwpdir_t *
2017 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp)
2019 tidhash_t *tidhash;
2020 uint_t tidhash_sz;
2021 tidhash_t *thp;
2022 lwpdir_t *ldp;
2024 top:
2025 tidhash_sz = p->p_tidhash_sz;
2026 membar_consumer();
2027 if ((tidhash = p->p_tidhash) == NULL)
2028 return (NULL);
2030 thp = &tidhash[TIDHASH(lwpid, tidhash_sz)];
2031 mutex_enter(&thp->th_lock);
2034 * Since we are not holding p->p_lock, the tid hash table
2035 * may have changed. If so, start over. If not, then
2036 * it cannot change until after we drop &thp->th_lock;
2038 if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) {
2039 mutex_exit(&thp->th_lock);
2040 goto top;
2043 for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) {
2044 if (ldp->ld_entry->le_lwpid == lwpid) {
2045 *mpp = &thp->th_lock;
2046 return (ldp);
2050 mutex_exit(&thp->th_lock);
2051 return (NULL);
2055 * Update the indicated LWP usage statistic for the current LWP.
2057 void
2058 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc)
2060 klwp_t *lwp = ttolwp(curthread);
2062 if (lwp == NULL)
2063 return;
2065 switch (lwp_stat_id) {
2066 case LWP_STAT_INBLK:
2067 lwp->lwp_ru.inblock += inc;
2068 break;
2069 case LWP_STAT_OUBLK:
2070 lwp->lwp_ru.oublock += inc;
2071 break;
2072 case LWP_STAT_MSGRCV:
2073 lwp->lwp_ru.msgrcv += inc;
2074 break;
2075 case LWP_STAT_MSGSND:
2076 lwp->lwp_ru.msgsnd += inc;
2077 break;
2078 default:
2079 panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id);