4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018 Joyent, Inc.
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/signal.h>
31 #include <sys/stack.h>
34 #include <sys/systm.h>
35 #include <sys/sysinfo.h>
36 #include <sys/errno.h>
37 #include <sys/cmn_err.h>
39 #include <sys/resource.h>
41 #include <sys/project.h>
43 #include <sys/debug.h>
45 #include <sys/class.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_kp.h>
48 #include <sys/machlock.h>
50 #include <sys/varargs.h>
51 #include <sys/turnstile.h>
53 #include <sys/vtrace.h>
54 #include <sys/callb.h>
57 #include <sys/sobject.h>
58 #include <sys/cpupart.h>
62 #include <sys/copyops.h>
64 #include <sys/brand.h>
67 #include <sys/tsol/label.h>
68 #include <sys/tsol/tndb.h>
69 #include <sys/cpc_impl.h>
71 #include <sys/reboot.h>
73 #include <sys/schedctl.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 #include <sys/ctype.h>
79 struct kmem_cache
*thread_cache
; /* cache of free threads */
80 struct kmem_cache
*lwp_cache
; /* cache of free lwps */
81 struct kmem_cache
*turnstile_cache
; /* cache of free turnstiles */
84 * allthreads is only for use by kmem_readers. All kernel loops can use
85 * the current thread as a start/end point.
87 kthread_t
*allthreads
= &t0
; /* circular list of all threads */
89 static kcondvar_t reaper_cv
; /* synchronization var */
90 kthread_t
*thread_deathrow
; /* circular list of reapable threads */
91 kthread_t
*lwp_deathrow
; /* circular list of reapable threads */
92 kmutex_t reaplock
; /* protects lwp and thread deathrows */
93 int thread_reapcnt
= 0; /* number of threads on deathrow */
94 int lwp_reapcnt
= 0; /* number of lwps on deathrow */
95 int reaplimit
= 16; /* delay reaping until reaplimit */
97 thread_free_lock_t
*thread_free_lock
;
98 /* protects tick thread from reaper */
102 /* System Scheduling classes. */
103 id_t syscid
; /* system scheduling class ID */
104 id_t sysdccid
= CLASS_UNUSED
; /* reset when SDC loads */
106 void *segkp_thread
; /* cookie for segkp pool */
108 int lwp_cache_sz
= 32;
110 static kt_did_t next_t_id
= 1;
112 /* Default mode for thread binding to CPUs and processor sets */
113 int default_binding_mode
= TB_ALLHARD
;
116 * Min/Max stack sizes for stack size parameters
118 #define MAX_STKSIZE (32 * DEFAULTSTKSZ)
119 #define MIN_STKSIZE DEFAULTSTKSZ
122 * default_stksize overrides lwp_default_stksize if it is set.
125 int lwp_default_stksize
;
127 static zone_key_t zone_thread_key
;
129 unsigned int kmem_stackinfo
; /* stackinfo feature on-off */
130 kmem_stkinfo_t
*kmem_stkinfo_log
; /* stackinfo circular log */
131 static kmutex_t kmem_stkinfo_lock
; /* protects kmem_stkinfo_log */
134 * forward declarations for internal thread specific data (tsd)
136 static void *tsd_realloc(void *, size_t, size_t);
138 void thread_reaper(void);
140 /* forward declarations for stackinfo feature */
141 static void stkinfo_begin(kthread_t
*);
142 static void stkinfo_end(kthread_t
*);
143 static size_t stkinfo_percent(caddr_t
, caddr_t
, caddr_t
);
147 turnstile_constructor(void *buf
, void *cdrarg
, int kmflags
)
149 bzero(buf
, sizeof (turnstile_t
));
155 turnstile_destructor(void *buf
, void *cdrarg
)
157 turnstile_t
*ts
= buf
;
159 ASSERT(ts
->ts_free
== NULL
);
160 ASSERT(ts
->ts_waiters
== 0);
161 ASSERT(ts
->ts_inheritor
== NULL
);
162 ASSERT(ts
->ts_sleepq
[0].sq_first
== NULL
);
163 ASSERT(ts
->ts_sleepq
[1].sq_first
== NULL
);
170 extern char sys_name
[];
172 struct cpu
*cpu
= CPU
;
176 mutex_init(&reaplock
, NULL
, MUTEX_SPIN
, (void *)ipltospl(DISP_LEVEL
));
178 kmem_alloc(sizeof (thread_free_lock_t
) * THREAD_FREE_NUM
, KM_SLEEP
);
179 for (i
= 0; i
< THREAD_FREE_NUM
; i
++) {
180 lp
= &thread_free_lock
[i
].tf_lock
;
181 mutex_init(lp
, NULL
, MUTEX_DEFAULT
, NULL
);
184 #if defined(__i386) || defined(__amd64)
185 thread_cache
= kmem_cache_create("thread_cache", sizeof (kthread_t
),
186 PTR24_ALIGN
, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
189 * "struct _klwp" includes a "struct pcb", which includes a
190 * "struct fpu", which needs to be 64-byte aligned on amd64
191 * (and even on i386) for xsave/xrstor.
193 lwp_cache
= kmem_cache_create("lwp_cache", sizeof (klwp_t
),
194 64, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
197 * Allocate thread structures from static_arena. This prevents
198 * issues where a thread tries to relocate its own thread
199 * structure and touches it after the mapping has been suspended.
201 thread_cache
= kmem_cache_create("thread_cache", sizeof (kthread_t
),
202 PTR24_ALIGN
, NULL
, NULL
, NULL
, NULL
, static_arena
, 0);
204 lwp_stk_cache_init();
206 lwp_cache
= kmem_cache_create("lwp_cache", sizeof (klwp_t
),
207 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
210 turnstile_cache
= kmem_cache_create("turnstile_cache",
211 sizeof (turnstile_t
), 0,
212 turnstile_constructor
, turnstile_destructor
, NULL
, NULL
, NULL
, 0);
218 * Initialize various resource management facilities.
223 * Zone_init() should be called before project_init() so that project ID
224 * for the first project is initialized correctly.
234 curthread
->t_ts
= kmem_cache_alloc(turnstile_cache
, KM_SLEEP
);
237 * Originally, we had two parameters to set default stack
238 * size: one for lwp's (lwp_default_stksize), and one for
239 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
240 * Now we have a third parameter that overrides both if it is
241 * set to a legal stack size, called default_stksize.
244 if (default_stksize
== 0) {
245 default_stksize
= DEFAULTSTKSZ
;
246 } else if (default_stksize
% PAGESIZE
!= 0 ||
247 default_stksize
> MAX_STKSIZE
||
248 default_stksize
< MIN_STKSIZE
) {
249 cmn_err(CE_WARN
, "Illegal stack size. Using %d",
251 default_stksize
= DEFAULTSTKSZ
;
253 lwp_default_stksize
= default_stksize
;
256 if (lwp_default_stksize
== 0) {
257 lwp_default_stksize
= default_stksize
;
258 } else if (lwp_default_stksize
% PAGESIZE
!= 0 ||
259 lwp_default_stksize
> MAX_STKSIZE
||
260 lwp_default_stksize
< MIN_STKSIZE
) {
261 cmn_err(CE_WARN
, "Illegal stack size. Using %d",
263 lwp_default_stksize
= default_stksize
;
266 segkp_lwp
= segkp_cache_init(segkp
, lwp_cache_sz
,
268 (KPD_NOWAIT
| KPD_HASREDZONE
| KPD_LOCKED
));
270 segkp_thread
= segkp_cache_init(segkp
, t_cache_sz
,
271 default_stksize
, KPD_HASREDZONE
| KPD_LOCKED
| KPD_NO_ANON
);
273 (void) getcid(sys_name
, &syscid
);
274 curthread
->t_cid
= syscid
; /* current thread is t0 */
277 * Set up the first CPU's idle thread.
278 * It runs whenever the CPU has nothing worthwhile to do.
280 tp
= thread_create(NULL
, 0, idle
, NULL
, 0, &p0
, TS_STOPPED
, -1);
281 cpu
->cpu_idle_thread
= tp
;
283 tp
->t_disp_queue
= cpu
->cpu_disp
;
284 ASSERT(tp
->t_disp_queue
!= NULL
);
285 tp
->t_bound_cpu
= cpu
;
286 tp
->t_affinitycnt
= 1;
289 * Registering a thread in the callback table is usually
290 * done in the initialization code of the thread. In this
291 * case, we do it right after thread creation to avoid
292 * blocking idle thread while registering itself. It also
293 * avoids the possibility of reregistration in case a CPU
294 * restarts its idle thread.
296 CALLB_CPR_INIT_SAFE(tp
, "idle");
299 * Create the thread_reaper daemon. From this point on, exited
300 * threads will get reaped.
302 (void) thread_create(NULL
, 0, (void (*)())thread_reaper
,
303 NULL
, 0, &p0
, TS_RUN
, minclsyspri
);
306 * Finish initializing the kernel memory allocator now that
307 * thread_create() is available.
311 if (boothowto
& RB_DEBUG
)
318 * thread_create() blocks for memory if necessary. It never fails.
320 * If stk is NULL, the thread is created at the base of the stack
321 * and cannot be swapped.
335 extern struct classfuncs sys_classfuncs
;
339 * Every thread keeps a turnstile around in case it needs to block.
340 * The only reason the turnstile is not simply part of the thread
341 * structure is that we may have to break the association whenever
342 * more than one thread blocks on a given synchronization object.
343 * From a memory-management standpoint, turnstiles are like the
344 * "attached mblks" that hang off dblks in the streams allocator.
346 ts
= kmem_cache_alloc(turnstile_cache
, KM_SLEEP
);
350 * alloc both thread and stack in segkp chunk
353 if (stksize
< default_stksize
)
354 stksize
= default_stksize
;
356 if (stksize
== default_stksize
) {
357 stk
= (caddr_t
)segkp_cache_get(segkp_thread
);
359 stksize
= roundup(stksize
, PAGESIZE
);
360 stk
= (caddr_t
)segkp_get(segkp
, stksize
,
361 (KPD_HASREDZONE
| KPD_NO_ANON
| KPD_LOCKED
));
367 * The machine-dependent mutex code may require that
368 * thread pointers (since they may be used for mutex owner
369 * fields) have certain alignment requirements.
370 * PTR24_ALIGN is the size of the alignment quanta.
371 * XXX - assumes stack grows toward low addresses.
373 if (stksize
<= sizeof (kthread_t
) + PTR24_ALIGN
)
374 cmn_err(CE_PANIC
, "thread_create: proposed stack size"
375 " too small to hold thread.");
376 #ifdef STACK_GROWTH_DOWN
377 stksize
-= SA(sizeof (kthread_t
) + PTR24_ALIGN
- 1);
378 stksize
&= -PTR24_ALIGN
; /* make thread aligned */
379 t
= (kthread_t
*)(stk
+ stksize
);
380 bzero(t
, sizeof (kthread_t
));
382 audit_thread_create(t
);
383 t
->t_stk
= stk
+ stksize
;
385 #else /* stack grows to larger addresses */
386 stksize
-= SA(sizeof (kthread_t
));
387 t
= (kthread_t
*)(stk
);
388 bzero(t
, sizeof (kthread_t
));
389 t
->t_stk
= stk
+ sizeof (kthread_t
);
390 t
->t_stkbase
= stk
+ stksize
+ sizeof (kthread_t
);
391 #endif /* STACK_GROWTH_DOWN */
392 t
->t_flag
|= T_TALLOCSTK
;
395 t
= kmem_cache_alloc(thread_cache
, KM_SLEEP
);
396 bzero(t
, sizeof (kthread_t
));
397 ASSERT(((uintptr_t)t
& (PTR24_ALIGN
- 1)) == 0);
399 audit_thread_create(t
);
401 * Initialize t_stk to the kernel stack pointer to use
402 * upon entry to the kernel
404 #ifdef STACK_GROWTH_DOWN
405 t
->t_stk
= stk
+ stksize
;
408 t
->t_stk
= stk
; /* 3b2-like */
409 t
->t_stkbase
= stk
+ stksize
;
410 #endif /* STACK_GROWTH_DOWN */
413 if (kmem_stackinfo
!= 0) {
420 * p_cred could be NULL if it thread_create is called before cred_init
423 mutex_enter(&pp
->p_crlock
);
425 crhold(t
->t_cred
= pp
->p_cred
);
426 mutex_exit(&pp
->p_crlock
);
427 t
->t_start
= gethrestime_sec();
430 t
->t_clfuncs
= &sys_classfuncs
.thread
;
433 t
->t_stime
= ddi_get_lbolt();
434 t
->t_schedflag
= TS_LOAD
| TS_DONT_SWAP
;
435 t
->t_bind_cpu
= PBIND_NONE
;
436 t
->t_bindflag
= (uchar_t
)default_binding_mode
;
437 t
->t_bind_pset
= PS_NONE
;
438 t
->t_plockp
= &pp
->p_lock
;
444 t
->t_dtrace_vtime
= 1; /* assure vtimestamp is always non-zero */
446 CPU_STATS_ADDQ(CPU
, sys
, nthreads
, 1);
449 tnf_thread_create(t
);
451 LOCK_INIT_CLEAR(&t
->t_lock
);
454 * Callers who give us a NULL proc must do their own
455 * stack initialization. e.g. lwp_create()
458 t
->t_stk
= thread_stk_init(t
->t_stk
);
459 thread_load(t
, proc
, arg
, len
);
463 * Put a hold on project0. If this thread is actually in a
464 * different project, then t_proj will be changed later in
465 * lwp_create(). All kernel-only threads must be in project 0.
467 t
->t_proj
= project_hold(proj0p
);
469 lgrp_affinity_init(&t
->t_lgrp_affinity
);
471 mutex_enter(&pidlock
);
473 t
->t_did
= next_t_id
++;
474 t
->t_prev
= curthread
->t_prev
;
475 t
->t_next
= curthread
;
478 * Add the thread to the list of all threads, and initialize
479 * its t_cpu pointer. We need to block preemption since
480 * cpu_offline walks the thread list looking for threads
481 * with t_cpu pointing to the CPU being offlined. We want
482 * to make sure that the list is consistent and that if t_cpu
483 * is set, the thread is on the list.
486 curthread
->t_prev
->t_next
= t
;
487 curthread
->t_prev
= t
;
490 * Threads should never have a NULL t_cpu pointer so assign it
491 * here. If the thread is being created with state TS_RUN a
492 * better CPU may be chosen when it is placed on the run queue.
494 * We need to keep kernel preemption disabled when setting all
495 * three fields to keep them in sync. Also, always create in
496 * the default partition since that's where kernel threads go
497 * (if this isn't a kernel thread, t_cpupart will be changed
498 * in lwp_create before setting the thread runnable).
500 t
->t_cpupart
= &cp_default
;
503 * For now, affiliate this thread with the root lgroup.
504 * Since the kernel does not (presently) allocate its memory
505 * in a locality aware fashion, the root is an appropriate home.
506 * If this thread is later associated with an lwp, it will have
507 * it's lgroup re-assigned at that time.
509 lgrp_move_thread(t
, &cp_default
.cp_lgrploads
[LGRP_ROOTID
], 1);
512 * Inherit the current cpu. If this cpu isn't part of the chosen
513 * lgroup, a new cpu will be chosen by cpu_choose when the thread
516 if (CPU
->cpu_part
== &cp_default
)
519 t
->t_cpu
= disp_lowpri_cpu(cp_default
.cp_cpulist
, t
->t_lpl
,
522 t
->t_disp_queue
= t
->t_cpu
->cpu_disp
;
526 * Initialize thread state and the dispatcher lock pointer.
527 * Need to hold onto pidlock to block allthreads walkers until
532 curthread
->t_oldspl
= splhigh(); /* get dispatcher spl */
533 THREAD_SET_STATE(t
, TS_STOPPED
, &transition_lock
);
539 THREAD_ONPROC(t
, t
->t_cpu
);
544 * Free state will be used for intr threads.
545 * The interrupt routine must set the thread dispatcher
546 * lock pointer (t_lockp) if starting on a CPU
547 * other than the current one.
549 THREAD_FREEINTR(t
, CPU
);
553 THREAD_SET_STATE(t
, TS_STOPPED
, &stop_lock
);
556 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */
557 cmn_err(CE_PANIC
, "thread_create: invalid state %d", state
);
559 mutex_exit(&pidlock
);
564 * Move thread to project0 and take care of project reference counters.
567 thread_rele(kthread_t
*t
)
573 ASSERT(t
== curthread
|| t
->t_state
== TS_FREE
|| t
->t_procp
== &p0
);
581 (void) project_hold(proj0p
);
588 kthread_t
*t
= curthread
;
590 if ((t
->t_proc_flag
& TP_ZTHREAD
) != 0)
591 cmn_err(CE_PANIC
, "thread_exit: zthread_exit() not called");
593 tsd_exit(); /* Clean up this thread's TSD */
595 kcpc_passivate(); /* clean up performance counter state */
598 * No kernel thread should have called poll() without arranging
599 * calling pollcleanup() here.
601 ASSERT(t
->t_pollstate
== NULL
);
602 ASSERT(t
->t_schedctl
== NULL
);
604 door_slam(); /* in case thread did an upcall */
616 * remove thread from the all threads list so that
617 * death-row can use the same pointers.
619 mutex_enter(&pidlock
);
620 t
->t_next
->t_prev
= t
->t_prev
;
621 t
->t_prev
->t_next
= t
->t_next
;
622 ASSERT(allthreads
!= t
); /* t0 never exits */
623 cv_broadcast(&t
->t_joincv
); /* wake up anyone in thread_join */
624 mutex_exit(&pidlock
);
626 if (t
->t_ctx
!= NULL
)
628 if (t
->t_procp
->p_pctx
!= NULL
)
629 exitpctx(t
->t_procp
);
631 if (kmem_stackinfo
!= 0) {
635 t
->t_state
= TS_ZOMB
; /* set zombie thread */
637 swtch_from_zombie(); /* give up the CPU */
642 * Check to see if the specified thread is active (defined as being on
643 * the thread list). This is certainly a slow way to do this; if there's
644 * ever a reason to speed it up, we could maintain a hash table of active
645 * threads indexed by their t_did.
648 did_to_thread(kt_did_t tid
)
652 ASSERT(MUTEX_HELD(&pidlock
));
653 for (t
= curthread
->t_next
; t
!= curthread
; t
= t
->t_next
) {
664 * Wait for specified thread to exit. Returns immediately if the thread
665 * could not be found, meaning that it has either already exited or never
669 thread_join(kt_did_t tid
)
673 ASSERT(tid
!= curthread
->t_did
);
674 ASSERT(tid
!= t0
.t_did
);
676 mutex_enter(&pidlock
);
678 * Make sure we check that the thread is on the thread list
679 * before blocking on it; otherwise we could end up blocking on
680 * a cv that's already been freed. In other words, don't cache
681 * the thread pointer across calls to cv_wait.
683 * The choice of loop invariant means that whenever a thread
684 * is taken off the allthreads list, a cv_broadcast must be
685 * performed on that thread's t_joincv to wake up any waiters.
686 * The broadcast doesn't have to happen right away, but it
687 * shouldn't be postponed indefinitely (e.g., by doing it in
688 * thread_free which may only be executed when the deathrow
689 * queue is processed.
691 while (t
= did_to_thread(tid
))
692 cv_wait(&t
->t_joincv
, &pidlock
);
693 mutex_exit(&pidlock
);
697 thread_free_prevent(kthread_t
*t
)
701 lp
= &thread_free_lock
[THREAD_FREE_HASH(t
)].tf_lock
;
706 thread_free_allow(kthread_t
*t
)
710 lp
= &thread_free_lock
[THREAD_FREE_HASH(t
)].tf_lock
;
715 thread_free_barrier(kthread_t
*t
)
719 lp
= &thread_free_lock
[THREAD_FREE_HASH(t
)].tf_lock
;
725 thread_free(kthread_t
*t
)
727 boolean_t allocstk
= (t
->t_flag
& T_TALLOCSTK
);
728 klwp_t
*lwp
= t
->t_lwp
;
729 caddr_t swap
= t
->t_swap
;
731 ASSERT(t
!= &t0
&& t
->t_state
== TS_FREE
);
732 ASSERT(t
->t_door
== NULL
);
733 ASSERT(t
->t_schedctl
== NULL
);
734 ASSERT(t
->t_pollstate
== NULL
);
741 if (t
->t_cred
!= NULL
) {
746 kmem_free(t
->t_pdmsg
, strlen(t
->t_pdmsg
) + 1);
750 audit_thread_free(t
);
756 CL_EXITCLASS(t
->t_cid
, (caddr_t
*)t
->t_cldata
);
758 if (t
->t_rprof
!= NULL
) {
759 kmem_free(t
->t_rprof
, sizeof (*t
->t_rprof
));
762 t
->t_lockp
= NULL
; /* nothing should try to lock this thread now */
764 lwp_freeregs(lwp
, 0);
770 lock_clear(&t
->t_lock
);
772 if (t
->t_ts
->ts_waiters
> 0)
773 panic("thread_free: turnstile still active");
775 kmem_cache_free(turnstile_cache
, t
->t_ts
);
777 free_afd(&t
->t_activefd
);
780 * Barrier for the tick accounting code. The tick accounting code
781 * holds this lock to keep the thread from going away while it's
784 thread_free_barrier(t
);
786 ASSERT(ttoproj(t
) == proj0p
);
787 project_rele(ttoproj(t
));
789 lgrp_affinity_free(&t
->t_lgrp_affinity
);
791 mutex_enter(&pidlock
);
793 mutex_exit(&pidlock
);
795 if (t
->t_name
!= NULL
) {
796 kmem_free(t
->t_name
, THREAD_NAME_MAX
);
801 * Free thread, lwp and stack. This needs to be done carefully, since
802 * if T_TALLOCSTK is set, the thread is part of the stack.
808 segkp_release(segkp
, swap
);
811 kmem_cache_free(lwp_cache
, lwp
);
814 kmem_cache_free(thread_cache
, t
);
819 * Removes threads associated with the given zone from a deathrow queue.
820 * tp is a pointer to the head of the deathrow queue, and countp is a
821 * pointer to the current deathrow count. Returns a linked list of
822 * threads removed from the list.
825 thread_zone_cleanup(kthread_t
**tp
, int *countp
, zoneid_t zoneid
)
827 kthread_t
*tmp
, *list
= NULL
;
830 ASSERT(MUTEX_HELD(&reaplock
));
831 while (*tp
!= NULL
) {
832 if ((cr
= (*tp
)->t_cred
) != NULL
&& crgetzoneid(cr
) == zoneid
) {
846 thread_reap_list(kthread_t
*t
)
859 thread_zone_destroy(zoneid_t zoneid
, void *unused
)
863 mutex_enter(&reaplock
);
865 * Pull threads and lwps associated with zone off deathrow lists.
867 t
= thread_zone_cleanup(&thread_deathrow
, &thread_reapcnt
, zoneid
);
868 l
= thread_zone_cleanup(&lwp_deathrow
, &lwp_reapcnt
, zoneid
);
869 mutex_exit(&reaplock
);
872 * Guard against race condition in mutex_owner_running:
873 * thread=owner(mutex)
878 * thread struct freed
879 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
880 * A cross call to all cpus will cause the interrupt handler
881 * to reset the PC if it is in mutex_owner_running, refreshing
882 * stale thread pointers.
884 mutex_sync(); /* sync with mutex code */
898 * cleanup zombie threads that are on deathrow.
907 * Register callback to clean up threads when zone is destroyed.
909 zone_key_create(&zone_thread_key
, NULL
, NULL
, thread_zone_destroy
);
911 CALLB_CPR_INIT(&cprinfo
, &reaplock
, callb_generic_cpr
, "t_reaper");
913 mutex_enter(&reaplock
);
914 while (thread_deathrow
== NULL
&& lwp_deathrow
== NULL
) {
915 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
916 cv_wait(&reaper_cv
, &reaplock
);
917 CALLB_CPR_SAFE_END(&cprinfo
, &reaplock
);
920 * mutex_sync() needs to be called when reaping, but
921 * not too often. We limit reaping rate to once
922 * per second. Reaplimit is max rate at which threads can
923 * be freed. Does not impact thread destruction/creation.
927 thread_deathrow
= NULL
;
931 mutex_exit(&reaplock
);
934 * Guard against race condition in mutex_owner_running:
935 * thread=owner(mutex)
940 * thread struct freed
941 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
942 * A cross call to all cpus will cause the interrupt handler
943 * to reset the PC if it is in mutex_owner_running, refreshing
944 * stale thread pointers.
946 mutex_sync(); /* sync with mutex code */
961 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
962 * thread_deathrow. The thread's state is changed already TS_FREE to indicate
963 * that is reapable. The thread already holds the reaplock, and was already
967 reapq_move_lq_to_tq(kthread_t
*t
)
969 ASSERT(t
->t_state
== TS_FREE
);
970 ASSERT(MUTEX_HELD(&reaplock
));
971 t
->t_forw
= thread_deathrow
;
974 if (lwp_reapcnt
+ thread_reapcnt
> reaplimit
)
975 cv_signal(&reaper_cv
); /* wake the reaper */
979 * This is called by resume() to put a zombie thread onto deathrow.
980 * The thread's state is changed to TS_FREE to indicate that is reapable.
981 * This is called from the idle thread so it must not block - just spin.
984 reapq_add(kthread_t
*t
)
986 mutex_enter(&reaplock
);
989 * lwp_deathrow contains threads with lwp linkage and
990 * swappable thread stacks which have the default stacksize.
991 * These threads' lwps and stacks may be reused by lwp_create().
993 * Anything else goes on thread_deathrow(), where it will eventually
996 if (t
->t_flag
& T_LWPREUSE
) {
997 ASSERT(ttolwp(t
) != NULL
);
998 t
->t_forw
= lwp_deathrow
;
1002 t
->t_forw
= thread_deathrow
;
1003 thread_deathrow
= t
;
1006 if (lwp_reapcnt
+ thread_reapcnt
> reaplimit
)
1007 cv_signal(&reaper_cv
); /* wake the reaper */
1008 t
->t_state
= TS_FREE
;
1009 lock_clear(&t
->t_lock
);
1012 * Before we return, we need to grab and drop the thread lock for
1013 * the dead thread. At this point, the current thread is the idle
1014 * thread, and the dead thread's CPU lock points to the current
1015 * CPU -- and we must grab and drop the lock to synchronize with
1016 * a racing thread walking a blocking chain that the zombie thread
1017 * was recently in. By this point, that blocking chain is (by
1018 * definition) stale: the dead thread is not holding any locks, and
1019 * is therefore not in any blocking chains -- but if we do not regrab
1020 * our lock before freeing the dead thread's data structures, the
1021 * thread walking the (stale) blocking chain will die on memory
1022 * corruption when it attempts to drop the dead thread's lock. We
1023 * only need do this once because there is no way for the dead thread
1024 * to ever again be on a blocking chain: once we have grabbed and
1025 * dropped the thread lock, we are guaranteed that anyone that could
1026 * have seen this thread in a blocking chain can no longer see it.
1031 mutex_exit(&reaplock
);
1035 * Install thread context ops for the current thread.
1041 void (*save
)(void *),
1042 void (*restore
)(void *),
1043 void (*fork
)(void *, void *),
1044 void (*lwp_create
)(void *, void *),
1045 void (*exit
)(void *),
1046 void (*free
)(void *, int))
1050 ctx
= kmem_alloc(sizeof (struct ctxop
), KM_SLEEP
);
1051 ctx
->save_op
= save
;
1052 ctx
->restore_op
= restore
;
1053 ctx
->fork_op
= fork
;
1054 ctx
->lwp_create_op
= lwp_create
;
1055 ctx
->exit_op
= exit
;
1056 ctx
->free_op
= free
;
1058 ctx
->next
= t
->t_ctx
;
1063 * Remove the thread context ops from a thread.
1069 void (*save
)(void *),
1070 void (*restore
)(void *),
1071 void (*fork
)(void *, void *),
1072 void (*lwp_create
)(void *, void *),
1073 void (*exit
)(void *),
1074 void (*free
)(void *, int))
1076 struct ctxop
*ctx
, *prev_ctx
;
1079 * The incoming kthread_t (which is the thread for which the
1080 * context ops will be removed) should be one of the following:
1082 * a) the current thread,
1084 * b) a thread of a process that's being forked (SIDL),
1086 * c) a thread that belongs to the same process as the current
1087 * thread and for which the current thread is the agent thread,
1089 * d) a thread that is TS_STOPPED which is indicative of it
1090 * being (if curthread is not an agent) a thread being created
1091 * as part of an lwp creation.
1093 ASSERT(t
== curthread
|| ttoproc(t
)->p_stat
== SIDL
||
1094 ttoproc(t
)->p_agenttp
== curthread
|| t
->t_state
== TS_STOPPED
);
1097 * Serialize modifications to t->t_ctx to prevent the agent thread
1098 * and the target thread from racing with each other during lwp exit.
1100 mutex_enter(&t
->t_ctx_lock
);
1103 for (ctx
= t
->t_ctx
; ctx
!= NULL
; ctx
= ctx
->next
) {
1104 if (ctx
->save_op
== save
&& ctx
->restore_op
== restore
&&
1105 ctx
->fork_op
== fork
&& ctx
->lwp_create_op
== lwp_create
&&
1106 ctx
->exit_op
== exit
&& ctx
->free_op
== free
&&
1109 prev_ctx
->next
= ctx
->next
;
1111 t
->t_ctx
= ctx
->next
;
1112 mutex_exit(&t
->t_ctx_lock
);
1113 if (ctx
->free_op
!= NULL
)
1114 (ctx
->free_op
)(ctx
->arg
, 0);
1115 kmem_free(ctx
, sizeof (struct ctxop
));
1121 mutex_exit(&t
->t_ctx_lock
);
1128 savectx(kthread_t
*t
)
1132 ASSERT(t
== curthread
);
1133 for (ctx
= t
->t_ctx
; ctx
!= 0; ctx
= ctx
->next
)
1134 if (ctx
->save_op
!= NULL
)
1135 (ctx
->save_op
)(ctx
->arg
);
1139 restorectx(kthread_t
*t
)
1143 ASSERT(t
== curthread
);
1144 for (ctx
= t
->t_ctx
; ctx
!= 0; ctx
= ctx
->next
)
1145 if (ctx
->restore_op
!= NULL
)
1146 (ctx
->restore_op
)(ctx
->arg
);
1150 forkctx(kthread_t
*t
, kthread_t
*ct
)
1154 for (ctx
= t
->t_ctx
; ctx
!= NULL
; ctx
= ctx
->next
)
1155 if (ctx
->fork_op
!= NULL
)
1156 (ctx
->fork_op
)(t
, ct
);
1160 * Note that this operator is only invoked via the _lwp_create
1161 * system call. The system may have other reasons to create lwps
1162 * e.g. the agent lwp or the doors unreferenced lwp.
1165 lwp_createctx(kthread_t
*t
, kthread_t
*ct
)
1169 for (ctx
= t
->t_ctx
; ctx
!= NULL
; ctx
= ctx
->next
)
1170 if (ctx
->lwp_create_op
!= NULL
)
1171 (ctx
->lwp_create_op
)(t
, ct
);
1175 * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1176 * needed when the thread/LWP leaves the processor for the last time. This
1177 * routine is not intended to deal with freeing memory; freectx() is used for
1178 * that purpose during thread_free(). This routine is provided to allow for
1179 * clean-up that can't wait until thread_free().
1182 exitctx(kthread_t
*t
)
1186 for (ctx
= t
->t_ctx
; ctx
!= NULL
; ctx
= ctx
->next
)
1187 if (ctx
->exit_op
!= NULL
)
1192 * freectx is called from thread_free() and exec() to get
1193 * rid of old thread context ops.
1196 freectx(kthread_t
*t
, int isexec
)
1201 while ((ctx
= t
->t_ctx
) != NULL
) {
1202 t
->t_ctx
= ctx
->next
;
1203 if (ctx
->free_op
!= NULL
)
1204 (ctx
->free_op
)(ctx
->arg
, isexec
);
1205 kmem_free(ctx
, sizeof (struct ctxop
));
1211 * freectx_ctx is called from lwp_create() when lwp is reused from
1212 * lwp_deathrow and its thread structure is added to thread_deathrow.
1213 * The thread structure to which this ctx was attached may be already
1214 * freed by the thread reaper so free_op implementations shouldn't rely
1215 * on thread structure to which this ctx was attached still being around.
1218 freectx_ctx(struct ctxop
*ctx
)
1222 ASSERT(ctx
!= NULL
);
1227 if (ctx
->free_op
!= NULL
)
1228 (ctx
->free_op
)(ctx
->arg
, 0);
1229 kmem_free(ctx
, sizeof (struct ctxop
));
1230 } while ((ctx
= nctx
) != NULL
);
1235 * Set the thread running; arrange for it to be swapped in if necessary.
1238 setrun_locked(kthread_t
*t
)
1240 ASSERT(THREAD_LOCK_HELD(t
));
1241 if (t
->t_state
== TS_SLEEP
) {
1243 * Take off sleep queue.
1245 SOBJ_UNSLEEP(t
->t_sobj_ops
, t
);
1246 } else if (t
->t_state
& (TS_RUN
| TS_ONPROC
)) {
1248 * Already on dispatcher queue.
1251 } else if (t
->t_state
== TS_WAIT
) {
1253 } else if (t
->t_state
== TS_STOPPED
) {
1255 * All of the sending of SIGCONT (TC_XSTART) and /proc
1256 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1257 * requested that the thread be run.
1258 * Just calling setrun() is not sufficient to set a stopped
1259 * thread running. TP_TXSTART is always set if the thread
1260 * is not stopped by a jobcontrol stop signal.
1261 * TP_TPSTART is always set if /proc is not controlling it.
1262 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1263 * The thread won't be stopped unless one of these
1264 * three mechanisms did it.
1266 * These flags must be set before calling setrun_locked(t).
1267 * They can't be passed as arguments because the streams
1268 * code calls setrun() indirectly and the mechanism for
1269 * doing so admits only one argument. Note that the
1270 * thread must be locked in order to change t_schedflags.
1272 if ((t
->t_schedflag
& TS_ALLSTART
) != TS_ALLSTART
)
1275 * Process is no longer stopped (a thread is running).
1280 * Strictly speaking, we do not have to clear these
1281 * flags here; they are cleared on entry to stop().
1282 * However, they are confusing when doing kernel
1283 * debugging or when they are revealed by ps(1).
1285 t
->t_schedflag
&= ~TS_ALLSTART
;
1286 THREAD_TRANSITION(t
); /* drop stopped-thread lock */
1287 ASSERT(t
->t_lockp
== &transition_lock
);
1288 ASSERT(t
->t_wchan0
== NULL
&& t
->t_wchan
== NULL
);
1290 * Let the class put the process on the dispatcher queue.
1297 setrun(kthread_t
*t
)
1305 * Unpin an interrupted thread.
1306 * When an interrupt occurs, the interrupt is handled on the stack
1307 * of an interrupt thread, taken from a pool linked to the CPU structure.
1309 * When swtch() is switching away from an interrupt thread because it
1310 * blocked or was preempted, this routine is called to complete the
1311 * saving of the interrupted thread state, and returns the interrupted
1312 * thread pointer so it may be resumed.
1314 * Called by swtch() only at high spl.
1319 kthread_t
*t
= curthread
; /* current thread */
1320 kthread_t
*itp
; /* interrupted thread */
1321 int i
; /* interrupt level */
1322 extern int intr_passivate();
1324 ASSERT(t
->t_intr
!= NULL
);
1326 itp
= t
->t_intr
; /* interrupted thread */
1327 t
->t_intr
= NULL
; /* clear interrupt ptr */
1330 * Get state from interrupt thread for the one
1334 i
= intr_passivate(t
, itp
);
1336 TRACE_5(TR_FAC_INTR
, TR_INTR_PASSIVATE
,
1337 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1341 * Dissociate the current thread from the interrupted thread's LWP.
1346 * Interrupt handlers above the level that spinlocks block must
1350 if (i
< 0 || i
> LOCK_LEVEL
)
1351 cmn_err(CE_PANIC
, "thread_unpin: ipl out of range %x", i
);
1355 * Compute the CPU's base interrupt level based on the active
1358 ASSERT(CPU
->cpu_intr_actv
& (1 << i
));
1365 * Create and initialize an interrupt thread.
1366 * Returns non-zero on error.
1367 * Called at spl7() or better.
1370 thread_create_intr(struct cpu
*cp
)
1374 tp
= thread_create(NULL
, 0,
1375 (void (*)())thread_create_intr
, NULL
, 0, &p0
, TS_ONPROC
, 0);
1378 * Set the thread in the TS_FREE state. The state will change
1379 * to TS_ONPROC only while the interrupt is active. Think of these
1380 * as being on a private free list for the CPU. Being TS_FREE keeps
1381 * inactive interrupt threads out of debugger thread lists.
1383 * We cannot call thread_create with TS_FREE because of the current
1384 * checks there for ONPROC. Fix this when thread_create takes flags.
1386 THREAD_FREEINTR(tp
, cp
);
1389 * Nobody should ever reference the credentials of an interrupt
1390 * thread so make it NULL to catch any such references.
1393 tp
->t_flag
|= T_INTR_THREAD
;
1395 tp
->t_bound_cpu
= cp
;
1396 tp
->t_disp_queue
= cp
->cpu_disp
;
1397 tp
->t_affinitycnt
= 1;
1401 * Don't make a user-requested binding on this thread so that
1402 * the processor can be offlined.
1404 tp
->t_bind_cpu
= PBIND_NONE
; /* no USER-requested binding */
1405 tp
->t_bind_pset
= PS_NONE
;
1407 #if defined(__i386) || defined(__amd64)
1408 tp
->t_stk
-= STACK_ALIGN
;
1409 *(tp
->t_stk
) = 0; /* terminate intr thread stack */
1413 * Link onto CPU's interrupt pool.
1415 tp
->t_link
= cp
->cpu_intr_thread
;
1416 cp
->cpu_intr_thread
= tp
;
1420 * TSD -- THREAD SPECIFIC DATA
1422 static kmutex_t tsd_mutex
; /* linked list spin lock */
1423 static uint_t tsd_nkeys
; /* size of destructor array */
1424 /* per-key destructor funcs */
1425 static void (**tsd_destructor
)(void *);
1426 /* list of tsd_thread's */
1427 static struct tsd_thread
*tsd_list
;
1430 * Default destructor
1431 * Needed because NULL destructor means that the key is unused
1435 tsd_defaultdestructor(void *value
)
1439 * Create a key (index into per thread array)
1440 * Locks out tsd_create, tsd_destroy, and tsd_exit
1441 * May allocate memory with lock held
1444 tsd_create(uint_t
*keyp
, void (*destructor
)(void *))
1450 * if key is allocated, do nothing
1452 mutex_enter(&tsd_mutex
);
1454 mutex_exit(&tsd_mutex
);
1458 * find an unused key
1460 if (destructor
== NULL
)
1461 destructor
= tsd_defaultdestructor
;
1463 for (i
= 0; i
< tsd_nkeys
; ++i
)
1464 if (tsd_destructor
[i
] == NULL
)
1468 * if no unused keys, increase the size of the destructor array
1470 if (i
== tsd_nkeys
) {
1471 if ((nkeys
= (tsd_nkeys
<< 1)) == 0)
1474 (void (**)(void *))tsd_realloc((void *)tsd_destructor
,
1475 (size_t)(tsd_nkeys
* sizeof (void (*)(void *))),
1476 (size_t)(nkeys
* sizeof (void (*)(void *))));
1481 * allocate the next available unused key
1483 tsd_destructor
[i
] = destructor
;
1485 mutex_exit(&tsd_mutex
);
1489 * Destroy a key -- this is for unloadable modules
1491 * Assumes that the caller is preventing tsd_set and tsd_get
1492 * Locks out tsd_create, tsd_destroy, and tsd_exit
1493 * May free memory with lock held
1496 tsd_destroy(uint_t
*keyp
)
1499 struct tsd_thread
*tsd
;
1502 * protect the key namespace and our destructor lists
1504 mutex_enter(&tsd_mutex
);
1508 ASSERT(key
<= tsd_nkeys
);
1511 * if the key is valid
1516 * for every thread with TSD, call key's destructor
1518 for (tsd
= tsd_list
; tsd
; tsd
= tsd
->ts_next
) {
1520 * no TSD for key in this thread
1522 if (key
> tsd
->ts_nkeys
)
1525 * call destructor for key
1527 if (tsd
->ts_value
[k
] && tsd_destructor
[k
])
1528 (*tsd_destructor
[k
])(tsd
->ts_value
[k
]);
1530 * reset value for key
1532 tsd
->ts_value
[k
] = NULL
;
1535 * actually free the key (NULL destructor == unused)
1537 tsd_destructor
[k
] = NULL
;
1540 mutex_exit(&tsd_mutex
);
1544 * Quickly return the per thread value that was stored with the specified key
1545 * Assumes the caller is protecting key from tsd_create and tsd_destroy
1550 return (tsd_agent_get(curthread
, key
));
1554 * Set a per thread value indexed with the specified key
1557 tsd_set(uint_t key
, void *value
)
1559 return (tsd_agent_set(curthread
, key
, value
));
1563 * Like tsd_get(), except that the agent lwp can get the tsd of
1564 * another thread in the same process (the agent thread only runs when the
1565 * process is completely stopped by /proc), or syslwp is creating a new lwp.
1568 tsd_agent_get(kthread_t
*t
, uint_t key
)
1570 struct tsd_thread
*tsd
= t
->t_tsd
;
1572 ASSERT(t
== curthread
||
1573 ttoproc(t
)->p_agenttp
== curthread
|| t
->t_state
== TS_STOPPED
);
1575 if (key
&& tsd
!= NULL
&& key
<= tsd
->ts_nkeys
)
1576 return (tsd
->ts_value
[key
- 1]);
1581 * Like tsd_set(), except that the agent lwp can set the tsd of
1582 * another thread in the same process, or syslwp can set the tsd
1583 * of a thread it's in the middle of creating.
1585 * Assumes the caller is protecting key from tsd_create and tsd_destroy
1586 * May lock out tsd_destroy (and tsd_create), may allocate memory with
1590 tsd_agent_set(kthread_t
*t
, uint_t key
, void *value
)
1592 struct tsd_thread
*tsd
= t
->t_tsd
;
1594 ASSERT(t
== curthread
||
1595 ttoproc(t
)->p_agenttp
== curthread
|| t
->t_state
== TS_STOPPED
);
1600 tsd
= t
->t_tsd
= kmem_zalloc(sizeof (*tsd
), KM_SLEEP
);
1601 if (key
<= tsd
->ts_nkeys
) {
1602 tsd
->ts_value
[key
- 1] = value
;
1606 ASSERT(key
<= tsd_nkeys
);
1609 * lock out tsd_destroy()
1611 mutex_enter(&tsd_mutex
);
1612 if (tsd
->ts_nkeys
== 0) {
1614 * Link onto list of threads with TSD
1616 if ((tsd
->ts_next
= tsd_list
) != NULL
)
1617 tsd_list
->ts_prev
= tsd
;
1622 * Allocate thread local storage and set the value for key
1624 tsd
->ts_value
= tsd_realloc(tsd
->ts_value
,
1625 tsd
->ts_nkeys
* sizeof (void *),
1626 key
* sizeof (void *));
1627 tsd
->ts_nkeys
= key
;
1628 tsd
->ts_value
[key
- 1] = value
;
1629 mutex_exit(&tsd_mutex
);
1636 * Return the per thread value that was stored with the specified key
1637 * If necessary, create the key and the value
1638 * Assumes the caller is protecting *keyp from tsd_destroy
1641 tsd_getcreate(uint_t
*keyp
, void (*destroy
)(void *), void *(*allocate
)(void))
1645 struct tsd_thread
*tsd
= curthread
->t_tsd
;
1648 tsd
= curthread
->t_tsd
= kmem_zalloc(sizeof (*tsd
), KM_SLEEP
);
1649 if (key
&& key
<= tsd
->ts_nkeys
&& (value
= tsd
->ts_value
[key
- 1]))
1652 tsd_create(keyp
, destroy
);
1653 (void) tsd_set(*keyp
, value
= (*allocate
)());
1659 * Called from thread_exit() to run the destructor function for each tsd
1660 * Locks out tsd_create and tsd_destroy
1661 * Assumes that the destructor *DOES NOT* use tsd
1667 struct tsd_thread
*tsd
= curthread
->t_tsd
;
1672 if (tsd
->ts_nkeys
== 0) {
1673 kmem_free(tsd
, sizeof (*tsd
));
1674 curthread
->t_tsd
= NULL
;
1679 * lock out tsd_create and tsd_destroy, call
1680 * the destructor, and mark the value as destroyed.
1682 mutex_enter(&tsd_mutex
);
1684 for (i
= 0; i
< tsd
->ts_nkeys
; i
++) {
1685 if (tsd
->ts_value
[i
] && tsd_destructor
[i
])
1686 (*tsd_destructor
[i
])(tsd
->ts_value
[i
]);
1687 tsd
->ts_value
[i
] = NULL
;
1691 * remove from linked list of threads with TSD
1694 tsd
->ts_next
->ts_prev
= tsd
->ts_prev
;
1696 tsd
->ts_prev
->ts_next
= tsd
->ts_next
;
1697 if (tsd_list
== tsd
)
1698 tsd_list
= tsd
->ts_next
;
1700 mutex_exit(&tsd_mutex
);
1705 kmem_free(tsd
->ts_value
, tsd
->ts_nkeys
* sizeof (void *));
1706 kmem_free(tsd
, sizeof (struct tsd_thread
));
1707 curthread
->t_tsd
= NULL
;
1714 tsd_realloc(void *old
, size_t osize
, size_t nsize
)
1718 new = kmem_zalloc(nsize
, KM_SLEEP
);
1720 bcopy(old
, new, osize
);
1721 kmem_free(old
, osize
);
1727 * Return non-zero if an interrupt is being serviced.
1730 servicing_interrupt()
1734 /* Are we an interrupt thread */
1735 if (curthread
->t_flag
& T_INTR_THREAD
)
1737 /* Are we servicing a high level interrupt? */
1738 if (CPU_ON_INTR(CPU
)) {
1740 onintr
= CPU_ON_INTR(CPU
);
1748 * Change the dispatch priority of a thread in the system.
1749 * Used when raising or lowering a thread's priority.
1750 * (E.g., priority inheritance)
1752 * Since threads are queued according to their priority, we
1753 * we must check the thread's state to determine whether it
1754 * is on a queue somewhere. If it is, we've got to:
1756 * o Dequeue the thread.
1757 * o Change its effective priority.
1758 * o Enqueue the thread.
1760 * Assumptions: The thread whose priority we wish to change
1761 * must be locked before we call thread_change_(e)pri().
1762 * The thread_change(e)pri() function doesn't drop the thread
1763 * lock--that must be done by its caller.
1766 thread_change_epri(kthread_t
*t
, pri_t disp_pri
)
1770 ASSERT(THREAD_LOCK_HELD(t
));
1773 * If the inherited priority hasn't actually changed,
1776 if (t
->t_epri
== disp_pri
)
1782 * If it's not on a queue, change the priority with impunity.
1784 if ((state
& (TS_SLEEP
| TS_RUN
| TS_WAIT
)) == 0) {
1785 t
->t_epri
= disp_pri
;
1786 if (state
== TS_ONPROC
) {
1787 cpu_t
*cp
= t
->t_disp_queue
->disp_cpu
;
1789 if (t
== cp
->cpu_dispthread
)
1790 cp
->cpu_dispatch_pri
= DISP_PRIO(t
);
1792 } else if (state
== TS_SLEEP
) {
1794 * Take the thread out of its sleep queue.
1795 * Change the inherited priority.
1796 * Re-enqueue the thread.
1797 * Each synchronization object exports a function
1798 * to do this in an appropriate manner.
1800 SOBJ_CHANGE_EPRI(t
->t_sobj_ops
, t
, disp_pri
);
1801 } else if (state
== TS_WAIT
) {
1803 * Re-enqueue a thread on the wait queue if its
1804 * effective priority needs to change.
1806 if (disp_pri
!= t
->t_epri
)
1807 waitq_change_pri(t
, disp_pri
);
1810 * The thread is on a run queue.
1811 * Note: setbackdq() may not put the thread
1812 * back on the same run queue where it originally
1816 t
->t_epri
= disp_pri
;
1819 schedctl_set_cidpri(t
);
1823 * Function: Change the t_pri field of a thread.
1824 * Side Effects: Adjust the thread ordering on a run queue
1825 * or sleep queue, if necessary.
1826 * Returns: 1 if the thread was on a run queue, else 0.
1829 thread_change_pri(kthread_t
*t
, pri_t disp_pri
, int front
)
1834 ASSERT(THREAD_LOCK_HELD(t
));
1837 THREAD_WILLCHANGE_PRI(t
, disp_pri
);
1840 * If it's not on a queue, change the priority with impunity.
1842 if ((state
& (TS_SLEEP
| TS_RUN
| TS_WAIT
)) == 0) {
1843 t
->t_pri
= disp_pri
;
1845 if (state
== TS_ONPROC
) {
1846 cpu_t
*cp
= t
->t_disp_queue
->disp_cpu
;
1848 if (t
== cp
->cpu_dispthread
)
1849 cp
->cpu_dispatch_pri
= DISP_PRIO(t
);
1851 } else if (state
== TS_SLEEP
) {
1853 * If the priority has changed, take the thread out of
1854 * its sleep queue and change the priority.
1855 * Re-enqueue the thread.
1856 * Each synchronization object exports a function
1857 * to do this in an appropriate manner.
1859 if (disp_pri
!= t
->t_pri
)
1860 SOBJ_CHANGE_PRI(t
->t_sobj_ops
, t
, disp_pri
);
1861 } else if (state
== TS_WAIT
) {
1863 * Re-enqueue a thread on the wait queue if its
1864 * priority needs to change.
1866 if (disp_pri
!= t
->t_pri
)
1867 waitq_change_pri(t
, disp_pri
);
1870 * The thread is on a run queue.
1871 * Note: setbackdq() may not put the thread
1872 * back on the same run queue where it originally
1875 * We still requeue the thread even if the priority
1876 * is unchanged to preserve round-robin (and other)
1877 * effects between threads of the same priority.
1881 t
->t_pri
= disp_pri
;
1888 schedctl_set_cidpri(t
);
1893 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a
1897 stkinfo_begin(kthread_t
*t
)
1899 caddr_t start
; /* stack start */
1900 caddr_t end
; /* stack end */
1901 uint64_t *ptr
; /* pattern pointer */
1904 * Stack grows up or down, see thread_create(),
1905 * compute stack memory area start and end (start < end).
1907 if (t
->t_stk
> t
->t_stkbase
) {
1908 /* stack grows down */
1909 start
= t
->t_stkbase
;
1912 /* stack grows up */
1918 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1919 * alignement for start and end in stack area boundaries
1920 * (protection against corrupt t_stkbase/t_stk data).
1922 if ((((uintptr_t)start
) & 0x7) != 0) {
1923 start
= (caddr_t
)((((uintptr_t)start
) & (~0x7)) + 8);
1925 end
= (caddr_t
)(((uintptr_t)end
) & (~0x7));
1927 if ((end
<= start
) || (end
- start
) > (1024 * 1024)) {
1928 /* negative or stack size > 1 meg, assume bogus */
1932 /* fill stack area with a pattern (instead of zeros) */
1933 ptr
= (uint64_t *)((void *)start
);
1934 while (ptr
< (uint64_t *)((void *)end
)) {
1935 *ptr
++ = KMEM_STKINFO_PATTERN
;
1941 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist,
1942 * compute the percentage of kernel stack really used, and set in the log
1943 * if it's the latest highest percentage.
1946 stkinfo_end(kthread_t
*t
)
1948 caddr_t start
; /* stack start */
1949 caddr_t end
; /* stack end */
1950 uint64_t *ptr
; /* pattern pointer */
1951 size_t stksz
; /* stack size */
1952 size_t smallest
= 0;
1956 static size_t smallest_percent
= (size_t)-1;
1957 static uint_t full
= 0;
1959 /* create the stackinfo log, if doesn't already exist */
1960 mutex_enter(&kmem_stkinfo_lock
);
1961 if (kmem_stkinfo_log
== NULL
) {
1962 kmem_stkinfo_log
= (kmem_stkinfo_t
*)
1963 kmem_zalloc(KMEM_STKINFO_LOG_SIZE
*
1964 (sizeof (kmem_stkinfo_t
)), KM_NOSLEEP
);
1965 if (kmem_stkinfo_log
== NULL
) {
1966 mutex_exit(&kmem_stkinfo_lock
);
1970 mutex_exit(&kmem_stkinfo_lock
);
1973 * Stack grows up or down, see thread_create(),
1974 * compute stack memory area start and end (start < end).
1976 if (t
->t_stk
> t
->t_stkbase
) {
1977 /* stack grows down */
1978 start
= t
->t_stkbase
;
1981 /* stack grows up */
1986 /* stack size as found in kthread_t */
1987 stksz
= end
- start
;
1990 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes
1991 * alignement for start and end in stack area boundaries
1992 * (protection against corrupt t_stkbase/t_stk data).
1994 if ((((uintptr_t)start
) & 0x7) != 0) {
1995 start
= (caddr_t
)((((uintptr_t)start
) & (~0x7)) + 8);
1997 end
= (caddr_t
)(((uintptr_t)end
) & (~0x7));
1999 if ((end
<= start
) || (end
- start
) > (1024 * 1024)) {
2000 /* negative or stack size > 1 meg, assume bogus */
2004 /* search until no pattern in the stack */
2005 if (t
->t_stk
> t
->t_stkbase
) {
2006 /* stack grows down */
2007 #if defined(__i386) || defined(__amd64)
2009 * 6 longs are pushed on stack, see thread_load(). Skip
2010 * them, so if kthread has never run, percent is zero.
2011 * 8 bytes alignement is preserved for a 32 bit kernel,
2012 * 6 x 4 = 24, 24 is a multiple of 8.
2015 end
-= (6 * sizeof (long));
2017 ptr
= (uint64_t *)((void *)start
);
2018 while (ptr
< (uint64_t *)((void *)end
)) {
2019 if (*ptr
!= KMEM_STKINFO_PATTERN
) {
2020 percent
= stkinfo_percent(end
,
2021 start
, (caddr_t
)ptr
);
2027 /* stack grows up */
2028 ptr
= (uint64_t *)((void *)end
);
2030 while (ptr
>= (uint64_t *)((void *)start
)) {
2031 if (*ptr
!= KMEM_STKINFO_PATTERN
) {
2032 percent
= stkinfo_percent(start
,
2040 DTRACE_PROBE3(stack__usage
, kthread_t
*, t
,
2041 size_t, stksz
, size_t, percent
);
2047 mutex_enter(&kmem_stkinfo_lock
);
2048 if (full
== KMEM_STKINFO_LOG_SIZE
&& percent
< smallest_percent
) {
2050 * The log is full and already contains the highest values
2052 mutex_exit(&kmem_stkinfo_lock
);
2056 /* keep a log of the highest used stack */
2057 for (i
= 0; i
< KMEM_STKINFO_LOG_SIZE
; i
++) {
2058 if (kmem_stkinfo_log
[i
].percent
== 0) {
2063 if (smallest
== 0) {
2064 smallest
= kmem_stkinfo_log
[i
].percent
;
2068 if (kmem_stkinfo_log
[i
].percent
< smallest
) {
2069 smallest
= kmem_stkinfo_log
[i
].percent
;
2074 if (percent
>= kmem_stkinfo_log
[index
].percent
) {
2075 kmem_stkinfo_log
[index
].kthread
= (caddr_t
)t
;
2076 kmem_stkinfo_log
[index
].t_startpc
= (caddr_t
)t
->t_startpc
;
2077 kmem_stkinfo_log
[index
].start
= start
;
2078 kmem_stkinfo_log
[index
].stksz
= stksz
;
2079 kmem_stkinfo_log
[index
].percent
= percent
;
2080 kmem_stkinfo_log
[index
].t_tid
= t
->t_tid
;
2081 kmem_stkinfo_log
[index
].cmd
[0] = '\0';
2082 if (t
->t_tid
!= 0) {
2083 stksz
= strlen((t
->t_procp
)->p_user
.u_comm
);
2084 if (stksz
>= KMEM_STKINFO_STR_SIZE
) {
2085 stksz
= KMEM_STKINFO_STR_SIZE
- 1;
2086 kmem_stkinfo_log
[index
].cmd
[stksz
] = '\0';
2090 (void) memcpy(kmem_stkinfo_log
[index
].cmd
,
2091 (t
->t_procp
)->p_user
.u_comm
, stksz
);
2093 if (percent
< smallest_percent
) {
2094 smallest_percent
= percent
;
2097 mutex_exit(&kmem_stkinfo_lock
);
2101 * Tunable kmem_stackinfo is set, compute stack utilization percentage.
2104 stkinfo_percent(caddr_t t_stk
, caddr_t t_stkbase
, caddr_t sp
)
2109 if (t_stk
> t_stkbase
) {
2110 /* stack grows down */
2114 if (sp
< t_stkbase
) {
2117 percent
= t_stk
- sp
+ 1;
2118 s
= t_stk
- t_stkbase
+ 1;
2120 /* stack grows up */
2124 if (sp
> t_stkbase
) {
2127 percent
= sp
- t_stk
+ 1;
2128 s
= t_stkbase
- t_stk
+ 1;
2130 percent
= ((100 * percent
) / s
) + 1;
2131 if (percent
> 100) {
2138 * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters
2139 * long. It is expected that callers (acting on behalf of userland clients)
2140 * will perform any required checks to return the correct error semantics.
2141 * It is also expected callers on behalf of userland clients have done
2142 * any necessary permission checks.
2145 thread_setname(kthread_t
*t
, const char *name
)
2150 * We optimistically assume that a thread's name will only be set
2151 * once and so allocate memory in preparation of setting t_name.
2152 * If it turns out a name has already been set, we just discard (free)
2153 * the buffer we just allocated and reuse the current buffer
2154 * (as all should be THREAD_NAME_MAX large).
2156 * Such an arrangement means over the lifetime of a kthread_t, t_name
2157 * is either NULL or has one value (the address of the buffer holding
2158 * the current thread name). The assumption is that most kthread_t
2159 * instances will not have a name assigned, so dynamically allocating
2160 * the memory should minimize the footprint of this feature, but by
2161 * having the buffer persist for the life of the thread, it simplifies
2162 * usage in highly constrained situations (e.g. dtrace).
2164 if (name
!= NULL
&& name
[0] != '\0') {
2165 for (size_t i
= 0; name
[i
] != '\0'; i
++) {
2166 if (!isprint(name
[i
]))
2170 buf
= kmem_zalloc(THREAD_NAME_MAX
, KM_SLEEP
);
2171 (void) strlcpy(buf
, name
, THREAD_NAME_MAX
);
2174 mutex_enter(&ttoproc(t
)->p_lock
);
2175 if (t
->t_name
== NULL
) {
2179 (void) strlcpy(t
->t_name
, name
, THREAD_NAME_MAX
);
2180 kmem_free(buf
, THREAD_NAME_MAX
);
2182 bzero(t
->t_name
, THREAD_NAME_MAX
);
2185 mutex_exit(&ttoproc(t
)->p_lock
);
2190 thread_vsetname(kthread_t
*t
, const char *fmt
, ...)
2192 char name
[THREAD_NAME_MAX
];
2197 rc
= vsnprintf(name
, sizeof (name
), fmt
, va
);
2203 if (rc
>= sizeof (name
))
2204 return (ENAMETOOLONG
);
2206 return (thread_setname(t
, name
));