2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Each cpu in a system has its own self-contained light weight kernel
37 * thread scheduler, which means that generally speaking we only need
38 * to use a critical section to avoid problems. Foreign thread
39 * scheduling is queued via (async) IPIs.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/rtprio.h>
47 #include <sys/kinfo.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <sys/kthread.h>
51 #include <machine/cpu.h>
53 #include <sys/spinlock.h>
56 #include <sys/thread2.h>
57 #include <sys/spinlock2.h>
58 #include <sys/mplock2.h>
60 #include <sys/dsched.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vm_extern.h>
71 #include <machine/stdarg.h>
72 #include <machine/smp.h>
73 #include <machine/clock.h>
75 #ifdef _KERNEL_VIRTUAL
81 #if !defined(KTR_CTXSW)
82 #define KTR_CTXSW KTR_ALL
84 KTR_INFO_MASTER(ctxsw
);
85 KTR_INFO(KTR_CTXSW
, ctxsw
, sw
, 0, "#cpu[%d].td = %p", int cpu
, struct thread
*td
);
86 KTR_INFO(KTR_CTXSW
, ctxsw
, pre
, 1, "#cpu[%d].td = %p", int cpu
, struct thread
*td
);
87 KTR_INFO(KTR_CTXSW
, ctxsw
, newtd
, 2, "#threads[%p].name = %s", struct thread
*td
, char *comm
);
88 KTR_INFO(KTR_CTXSW
, ctxsw
, deadtd
, 3, "#threads[%p].name = <dead>", struct thread
*td
);
90 static MALLOC_DEFINE(M_THREAD
, "thread", "lwkt threads");
93 static int panic_on_cscount
= 0;
95 static int64_t switch_count
= 0;
96 static int64_t preempt_hit
= 0;
97 static int64_t preempt_miss
= 0;
98 static int64_t preempt_weird
= 0;
99 static int lwkt_use_spin_port
;
100 static struct objcache
*thread_cache
;
101 int cpu_mwait_spin
= 0;
103 static void lwkt_schedule_remote(void *arg
, int arg2
, struct intrframe
*frame
);
104 static void lwkt_setcpu_remote(void *arg
);
107 * We can make all thread ports use the spin backend instead of the thread
108 * backend. This should only be set to debug the spin backend.
110 TUNABLE_INT("lwkt.use_spin_port", &lwkt_use_spin_port
);
113 SYSCTL_INT(_lwkt
, OID_AUTO
, panic_on_cscount
, CTLFLAG_RW
, &panic_on_cscount
, 0,
114 "Panic if attempting to switch lwkt's while mastering cpusync");
116 SYSCTL_QUAD(_lwkt
, OID_AUTO
, switch_count
, CTLFLAG_RW
, &switch_count
, 0,
117 "Number of switched threads");
118 SYSCTL_QUAD(_lwkt
, OID_AUTO
, preempt_hit
, CTLFLAG_RW
, &preempt_hit
, 0,
119 "Successful preemption events");
120 SYSCTL_QUAD(_lwkt
, OID_AUTO
, preempt_miss
, CTLFLAG_RW
, &preempt_miss
, 0,
121 "Failed preemption events");
122 SYSCTL_QUAD(_lwkt
, OID_AUTO
, preempt_weird
, CTLFLAG_RW
, &preempt_weird
, 0,
123 "Number of preempted threads.");
124 static int fairq_enable
= 0;
125 SYSCTL_INT(_lwkt
, OID_AUTO
, fairq_enable
, CTLFLAG_RW
,
126 &fairq_enable
, 0, "Turn on fairq priority accumulators");
127 static int fairq_bypass
= -1;
128 SYSCTL_INT(_lwkt
, OID_AUTO
, fairq_bypass
, CTLFLAG_RW
,
129 &fairq_bypass
, 0, "Allow fairq to bypass td on token failure");
130 extern int lwkt_sched_debug
;
131 int lwkt_sched_debug
= 0;
132 SYSCTL_INT(_lwkt
, OID_AUTO
, sched_debug
, CTLFLAG_RW
,
133 &lwkt_sched_debug
, 0, "Scheduler debug");
134 static u_int lwkt_spin_loops
= 10;
135 SYSCTL_UINT(_lwkt
, OID_AUTO
, spin_loops
, CTLFLAG_RW
,
136 &lwkt_spin_loops
, 0, "Scheduler spin loops until sorted decon");
137 static int preempt_enable
= 1;
138 SYSCTL_INT(_lwkt
, OID_AUTO
, preempt_enable
, CTLFLAG_RW
,
139 &preempt_enable
, 0, "Enable preemption");
140 static int lwkt_cache_threads
= 0;
141 SYSCTL_INT(_lwkt
, OID_AUTO
, cache_threads
, CTLFLAG_RD
,
142 &lwkt_cache_threads
, 0, "thread+kstack cache");
145 * These helper procedures handle the runq, they can only be called from
146 * within a critical section.
148 * WARNING! Prior to SMP being brought up it is possible to enqueue and
149 * dequeue threads belonging to other cpus, so be sure to use td->td_gd
150 * instead of 'mycpu' when referencing the globaldata structure. Once
151 * SMP live enqueuing and dequeueing only occurs on the current cpu.
155 _lwkt_dequeue(thread_t td
)
157 if (td
->td_flags
& TDF_RUNQ
) {
158 struct globaldata
*gd
= td
->td_gd
;
160 td
->td_flags
&= ~TDF_RUNQ
;
161 TAILQ_REMOVE(&gd
->gd_tdrunq
, td
, td_threadq
);
162 --gd
->gd_tdrunqcount
;
163 if (TAILQ_FIRST(&gd
->gd_tdrunq
) == NULL
)
164 atomic_clear_int(&gd
->gd_reqflags
, RQF_RUNNING
);
171 * There are a limited number of lwkt threads runnable since user
172 * processes only schedule one at a time per cpu. However, there can
173 * be many user processes in kernel mode exiting from a tsleep() which
176 * NOTE: lwkt_schedulerclock() will force a round-robin based on td_pri and
177 * will ignore user priority. This is to ensure that user threads in
178 * kernel mode get cpu at some point regardless of what the user
183 _lwkt_enqueue(thread_t td
)
187 if ((td
->td_flags
& (TDF_RUNQ
|TDF_MIGRATING
|TDF_BLOCKQ
)) == 0) {
188 struct globaldata
*gd
= td
->td_gd
;
190 td
->td_flags
|= TDF_RUNQ
;
191 xtd
= TAILQ_FIRST(&gd
->gd_tdrunq
);
193 TAILQ_INSERT_TAIL(&gd
->gd_tdrunq
, td
, td_threadq
);
194 atomic_set_int(&gd
->gd_reqflags
, RQF_RUNNING
);
197 * NOTE: td_upri - higher numbers more desireable, same sense
198 * as td_pri (typically reversed from lwp_upri).
200 * In the equal priority case we want the best selection
201 * at the beginning so the less desireable selections know
202 * that they have to setrunqueue/go-to-another-cpu, even
203 * though it means switching back to the 'best' selection.
204 * This also avoids degenerate situations when many threads
205 * are runnable or waking up at the same time.
207 * If upri matches exactly place at end/round-robin.
210 (xtd
->td_pri
>= td
->td_pri
||
211 (xtd
->td_pri
== td
->td_pri
&&
212 xtd
->td_upri
>= td
->td_upri
))) {
213 xtd
= TAILQ_NEXT(xtd
, td_threadq
);
216 TAILQ_INSERT_BEFORE(xtd
, td
, td_threadq
);
218 TAILQ_INSERT_TAIL(&gd
->gd_tdrunq
, td
, td_threadq
);
220 ++gd
->gd_tdrunqcount
;
223 * Request a LWKT reschedule if we are now at the head of the queue.
225 if (TAILQ_FIRST(&gd
->gd_tdrunq
) == td
)
231 _lwkt_thread_ctor(void *obj
, void *privdata
, int ocflags
)
233 struct thread
*td
= (struct thread
*)obj
;
235 td
->td_kstack
= NULL
;
236 td
->td_kstack_size
= 0;
237 td
->td_flags
= TDF_ALLOCATED_THREAD
;
243 _lwkt_thread_dtor(void *obj
, void *privdata
)
245 struct thread
*td
= (struct thread
*)obj
;
247 KASSERT(td
->td_flags
& TDF_ALLOCATED_THREAD
,
248 ("_lwkt_thread_dtor: not allocated from objcache"));
249 KASSERT((td
->td_flags
& TDF_ALLOCATED_STACK
) && td
->td_kstack
&&
250 td
->td_kstack_size
> 0,
251 ("_lwkt_thread_dtor: corrupted stack"));
252 kmem_free(&kernel_map
, (vm_offset_t
)td
->td_kstack
, td
->td_kstack_size
);
253 td
->td_kstack
= NULL
;
258 * Initialize the lwkt s/system.
260 * Nominally cache up to 32 thread + kstack structures. Cache more on
261 * systems with a lot of cpu cores.
266 TUNABLE_INT("lwkt.cache_threads", &lwkt_cache_threads
);
267 if (lwkt_cache_threads
== 0) {
268 lwkt_cache_threads
= ncpus
* 4;
269 if (lwkt_cache_threads
< 32)
270 lwkt_cache_threads
= 32;
272 thread_cache
= objcache_create_mbacked(
273 M_THREAD
, sizeof(struct thread
),
274 0, lwkt_cache_threads
,
275 _lwkt_thread_ctor
, _lwkt_thread_dtor
, NULL
);
277 SYSINIT(lwkt_init
, SI_BOOT2_LWKT_INIT
, SI_ORDER_FIRST
, lwkt_init
, NULL
);
280 * Schedule a thread to run. As the current thread we can always safely
281 * schedule ourselves, and a shortcut procedure is provided for that
284 * (non-blocking, self contained on a per cpu basis)
287 lwkt_schedule_self(thread_t td
)
289 KKASSERT((td
->td_flags
& TDF_MIGRATING
) == 0);
290 crit_enter_quick(td
);
291 KASSERT(td
!= &td
->td_gd
->gd_idlethread
,
292 ("lwkt_schedule_self(): scheduling gd_idlethread is illegal!"));
293 KKASSERT(td
->td_lwp
== NULL
||
294 (td
->td_lwp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
300 * Deschedule a thread.
302 * (non-blocking, self contained on a per cpu basis)
305 lwkt_deschedule_self(thread_t td
)
307 crit_enter_quick(td
);
313 * LWKTs operate on a per-cpu basis
315 * WARNING! Called from early boot, 'mycpu' may not work yet.
318 lwkt_gdinit(struct globaldata
*gd
)
320 TAILQ_INIT(&gd
->gd_tdrunq
);
321 TAILQ_INIT(&gd
->gd_tdallq
);
325 * Create a new thread. The thread must be associated with a process context
326 * or LWKT start address before it can be scheduled. If the target cpu is
327 * -1 the thread will be created on the current cpu.
329 * If you intend to create a thread without a process context this function
330 * does everything except load the startup and switcher function.
333 lwkt_alloc_thread(struct thread
*td
, int stksize
, int cpu
, int flags
)
335 static int cpu_rotator
;
336 globaldata_t gd
= mycpu
;
340 * If static thread storage is not supplied allocate a thread. Reuse
341 * a cached free thread if possible. gd_freetd is used to keep an exiting
342 * thread intact through the exit.
346 if ((td
= gd
->gd_freetd
) != NULL
) {
347 KKASSERT((td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
|
349 gd
->gd_freetd
= NULL
;
351 td
= objcache_get(thread_cache
, M_WAITOK
);
352 KKASSERT((td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
|
356 KASSERT((td
->td_flags
&
357 (TDF_ALLOCATED_THREAD
|TDF_RUNNING
|TDF_PREEMPT_LOCK
)) ==
358 TDF_ALLOCATED_THREAD
,
359 ("lwkt_alloc_thread: corrupted td flags 0x%X", td
->td_flags
));
360 flags
|= td
->td_flags
& (TDF_ALLOCATED_THREAD
|TDF_ALLOCATED_STACK
);
364 * Try to reuse cached stack.
366 if ((stack
= td
->td_kstack
) != NULL
&& td
->td_kstack_size
!= stksize
) {
367 if (flags
& TDF_ALLOCATED_STACK
) {
368 kmem_free(&kernel_map
, (vm_offset_t
)stack
, td
->td_kstack_size
);
374 stack
= (void *)kmem_alloc_stack(&kernel_map
, stksize
, 0);
376 stack
= (void *)kmem_alloc_stack(&kernel_map
, stksize
,
378 flags
|= TDF_ALLOCATED_STACK
;
385 lwkt_init_thread(td
, stack
, stksize
, flags
, globaldata_find(cpu
));
390 * Initialize a preexisting thread structure. This function is used by
391 * lwkt_alloc_thread() and also used to initialize the per-cpu idlethread.
393 * All threads start out in a critical section at a priority of
394 * TDPRI_KERN_DAEMON. Higher level code will modify the priority as
395 * appropriate. This function may send an IPI message when the
396 * requested cpu is not the current cpu and consequently gd_tdallq may
397 * not be initialized synchronously from the point of view of the originating
400 * NOTE! we have to be careful in regards to creating threads for other cpus
401 * if SMP has not yet been activated.
404 lwkt_init_thread_remote(void *arg
)
409 * Protected by critical section held by IPI dispatch
411 TAILQ_INSERT_TAIL(&td
->td_gd
->gd_tdallq
, td
, td_allq
);
415 * lwkt core thread structural initialization.
417 * NOTE: All threads are initialized as mpsafe threads.
420 lwkt_init_thread(thread_t td
, void *stack
, int stksize
, int flags
,
421 struct globaldata
*gd
)
423 globaldata_t mygd
= mycpu
;
425 bzero(td
, sizeof(struct thread
));
426 td
->td_kstack
= stack
;
427 td
->td_kstack_size
= stksize
;
428 td
->td_flags
= flags
;
430 td
->td_type
= TD_TYPE_GENERIC
;
432 td
->td_pri
= TDPRI_KERN_DAEMON
;
433 td
->td_critcount
= 1;
434 td
->td_toks_have
= NULL
;
435 td
->td_toks_stop
= &td
->td_toks_base
;
436 if (lwkt_use_spin_port
|| (flags
& TDF_FORCE_SPINPORT
)) {
437 lwkt_initport_spin(&td
->td_msgport
, td
,
438 (flags
& TDF_FIXEDCPU
) ? TRUE
: FALSE
);
440 lwkt_initport_thread(&td
->td_msgport
, td
);
442 pmap_init_thread(td
);
444 * Normally initializing a thread for a remote cpu requires sending an
445 * IPI. However, the idlethread is setup before the other cpus are
446 * activated so we have to treat it as a special case. XXX manipulation
447 * of gd_tdallq requires the BGL.
449 if (gd
== mygd
|| td
== &gd
->gd_idlethread
) {
451 TAILQ_INSERT_TAIL(&gd
->gd_tdallq
, td
, td_allq
);
454 lwkt_send_ipiq(gd
, lwkt_init_thread_remote
, td
);
456 dsched_enter_thread(td
);
460 lwkt_set_comm(thread_t td
, const char *ctl
, ...)
465 kvsnprintf(td
->td_comm
, sizeof(td
->td_comm
), ctl
, va
);
467 KTR_LOG(ctxsw_newtd
, td
, td
->td_comm
);
471 * Prevent the thread from getting destroyed. Note that unlike PHOLD/PRELE
472 * this does not prevent the thread from migrating to another cpu so the
473 * gd_tdallq state is not protected by this.
476 lwkt_hold(thread_t td
)
478 atomic_add_int(&td
->td_refs
, 1);
482 lwkt_rele(thread_t td
)
484 KKASSERT(td
->td_refs
> 0);
485 atomic_add_int(&td
->td_refs
, -1);
489 lwkt_free_thread(thread_t td
)
491 KKASSERT(td
->td_refs
== 0);
492 KKASSERT((td
->td_flags
& (TDF_RUNNING
| TDF_PREEMPT_LOCK
|
493 TDF_RUNQ
| TDF_TSLEEPQ
)) == 0);
494 if (td
->td_flags
& TDF_ALLOCATED_THREAD
) {
495 objcache_put(thread_cache
, td
);
496 } else if (td
->td_flags
& TDF_ALLOCATED_STACK
) {
497 /* client-allocated struct with internally allocated stack */
498 KASSERT(td
->td_kstack
&& td
->td_kstack_size
> 0,
499 ("lwkt_free_thread: corrupted stack"));
500 kmem_free(&kernel_map
, (vm_offset_t
)td
->td_kstack
, td
->td_kstack_size
);
501 td
->td_kstack
= NULL
;
502 td
->td_kstack_size
= 0;
505 KTR_LOG(ctxsw_deadtd
, td
);
510 * Switch to the next runnable lwkt. If no LWKTs are runnable then
511 * switch to the idlethread. Switching must occur within a critical
512 * section to avoid races with the scheduling queue.
514 * We always have full control over our cpu's run queue. Other cpus
515 * that wish to manipulate our queue must use the cpu_*msg() calls to
516 * talk to our cpu, so a critical section is all that is needed and
517 * the result is very, very fast thread switching.
519 * The LWKT scheduler uses a fixed priority model and round-robins at
520 * each priority level. User process scheduling is a totally
521 * different beast and LWKT priorities should not be confused with
522 * user process priorities.
524 * PREEMPTION NOTE: Preemption occurs via lwkt_preempt(). lwkt_switch()
525 * is not called by the current thread in the preemption case, only when
526 * the preempting thread blocks (in order to return to the original thread).
528 * SPECIAL NOTE ON SWITCH ATOMICY: Certain operations such as thread
529 * migration and tsleep deschedule the current lwkt thread and call
530 * lwkt_switch(). In particular, the target cpu of the migration fully
531 * expects the thread to become non-runnable and can deadlock against
532 * cpusync operations if we run any IPIs prior to switching the thread out.
534 * WE MUST BE VERY CAREFUL NOT TO RUN SPLZ DIRECTLY OR INDIRECTLY IF
535 * THE CURRENT THREAD HAS BEEN DESCHEDULED!
540 globaldata_t gd
= mycpu
;
541 thread_t td
= gd
->gd_curthread
;
545 uint64_t tsc_base
= rdtsc();
548 KKASSERT(gd
->gd_processing_ipiq
== 0);
549 KKASSERT(td
->td_flags
& TDF_RUNNING
);
552 * Switching from within a 'fast' (non thread switched) interrupt or IPI
553 * is illegal. However, we may have to do it anyway if we hit a fatal
554 * kernel trap or we have paniced.
556 * If this case occurs save and restore the interrupt nesting level.
558 if (gd
->gd_intr_nesting_level
) {
562 if (gd
->gd_trap_nesting_level
== 0 && panic_cpu_gd
!= mycpu
) {
563 panic("lwkt_switch: Attempt to switch from a "
564 "fast interrupt, ipi, or hard code section, "
568 savegdnest
= gd
->gd_intr_nesting_level
;
569 savegdtrap
= gd
->gd_trap_nesting_level
;
570 gd
->gd_intr_nesting_level
= 0;
571 gd
->gd_trap_nesting_level
= 0;
572 if ((td
->td_flags
& TDF_PANICWARN
) == 0) {
573 td
->td_flags
|= TDF_PANICWARN
;
574 kprintf("Warning: thread switch from interrupt, IPI, "
575 "or hard code section.\n"
576 "thread %p (%s)\n", td
, td
->td_comm
);
580 gd
->gd_intr_nesting_level
= savegdnest
;
581 gd
->gd_trap_nesting_level
= savegdtrap
;
587 * Release our current user process designation if we are blocking
588 * or if a user reschedule was requested.
590 * NOTE: This function is NOT called if we are switching into or
591 * returning from a preemption.
593 * NOTE: Releasing our current user process designation may cause
594 * it to be assigned to another thread, which in turn will
595 * cause us to block in the usched acquire code when we attempt
596 * to return to userland.
598 * NOTE: On SMP systems this can be very nasty when heavy token
599 * contention is present so we want to be careful not to
600 * release the designation gratuitously.
602 if (td
->td_release
&&
603 (user_resched_wanted() || (td
->td_flags
& TDF_RUNQ
) == 0)) {
608 * Release all tokens. Once we do this we must remain in the critical
609 * section and cannot run IPIs or other interrupts until we switch away
610 * because they may implode if they try to get a token using our thread
614 if (TD_TOKS_HELD(td
))
615 lwkt_relalltokens(td
);
618 * We had better not be holding any spin locks, but don't get into an
619 * endless panic loop.
621 KASSERT(gd
->gd_spinlocks
== 0 || panicstr
!= NULL
,
622 ("lwkt_switch: still holding %d exclusive spinlocks!",
626 if (td
->td_cscount
) {
627 kprintf("Diagnostic: attempt to switch while mastering cpusync: %p\n",
629 if (panic_on_cscount
)
630 panic("switching while mastering cpusync");
635 * If we had preempted another thread on this cpu, resume the preempted
636 * thread. This occurs transparently, whether the preempted thread
637 * was scheduled or not (it may have been preempted after descheduling
640 * We have to setup the MP lock for the original thread after backing
641 * out the adjustment that was made to curthread when the original
644 if ((ntd
= td
->td_preempted
) != NULL
) {
645 KKASSERT(ntd
->td_flags
& TDF_PREEMPT_LOCK
);
646 ntd
->td_flags
|= TDF_PREEMPT_DONE
;
647 ntd
->td_contended
= 0; /* reset contended */
650 * The interrupt may have woken a thread up, we need to properly
651 * set the reschedule flag if the originally interrupted thread is
652 * at a lower priority.
654 * The interrupt may not have descheduled.
656 if (TAILQ_FIRST(&gd
->gd_tdrunq
) != ntd
)
658 goto havethread_preempted
;
662 * Figure out switch target. If we cannot switch to our desired target
663 * look for a thread that we can switch to.
665 * NOTE! The limited spin loop and related parameters are extremely
666 * important for system performance, particularly for pipes and
667 * concurrent conflicting VM faults.
669 clear_lwkt_resched();
670 ntd
= TAILQ_FIRST(&gd
->gd_tdrunq
);
674 if (TD_TOKS_NOT_HELD(ntd
) ||
675 lwkt_getalltokens(ntd
, (ntd
->td_contended
> lwkt_spin_loops
)))
679 ++gd
->gd_cnt
.v_lock_colls
;
680 ++ntd
->td_contended
; /* overflow ok */
682 if (tsc_frequency
&& rdtsc() - tsc_base
> tsc_frequency
) {
683 kprintf("lwkt_switch: excessive contended %d "
684 "thread %p\n", ntd
->td_contended
, ntd
);
688 } while (ntd
->td_contended
< (lwkt_spin_loops
>> 1));
692 * Bleh, the thread we wanted to switch to has a contended token.
693 * See if we can switch to another thread.
695 * We generally don't want to do this because it represents a
696 * priority inversion. Do not allow the case if the thread
697 * is returning to userland (not a kernel thread) AND the thread
700 while ((ntd
= TAILQ_NEXT(ntd
, td_threadq
)) != NULL
) {
701 if (ntd
->td_pri
< TDPRI_KERN_LPSCHED
&& upri
> ntd
->td_upri
)
708 if (TD_TOKS_NOT_HELD(ntd
) ||
709 lwkt_getalltokens(ntd
, (ntd
->td_contended
> lwkt_spin_loops
))) {
712 ++ntd
->td_contended
; /* overflow ok */
713 ++gd
->gd_cnt
.v_lock_colls
;
717 * Fall through, switch to idle thread to get us out of the current
718 * context. Since we were contended, prevent HLT by flagging a
725 * We either contended on ntd or the runq is empty. We must switch
726 * through the idle thread to get out of the current context.
728 ntd
= &gd
->gd_idlethread
;
729 if (gd
->gd_trap_nesting_level
== 0 && panicstr
== NULL
)
730 ASSERT_NO_TOKENS_HELD(ntd
);
731 cpu_time
.cp_msg
[0] = 0;
736 * Clear gd_idle_repeat when doing a normal switch to a non-idle
739 ntd
->td_wmesg
= NULL
;
740 ntd
->td_contended
= 0; /* reset once scheduled */
741 ++gd
->gd_cnt
.v_swtch
;
742 gd
->gd_idle_repeat
= 0;
744 havethread_preempted
:
746 * If the new target does not need the MP lock and we are holding it,
747 * release the MP lock. If the new target requires the MP lock we have
748 * already acquired it for the target.
752 KASSERT(ntd
->td_critcount
,
753 ("priority problem in lwkt_switch %d %d",
754 td
->td_critcount
, ntd
->td_critcount
));
758 * Execute the actual thread switch operation. This function
759 * returns to the current thread and returns the previous thread
760 * (which may be different from the thread we switched to).
762 * We are responsible for marking ntd as TDF_RUNNING.
764 KKASSERT((ntd
->td_flags
& TDF_RUNNING
) == 0);
766 KTR_LOG(ctxsw_sw
, gd
->gd_cpuid
, ntd
);
767 ntd
->td_flags
|= TDF_RUNNING
;
768 lwkt_switch_return(td
->td_switch(ntd
));
769 /* ntd invalid, td_switch() can return a different thread_t */
773 * catch-all. XXX is this strictly needed?
777 /* NOTE: current cpu may have changed after switch */
782 * Called by assembly in the td_switch (thread restore path) for thread
783 * bootstrap cases which do not 'return' to lwkt_switch().
786 lwkt_switch_return(thread_t otd
)
790 uint64_t tsc_base
= rdtsc();
794 exiting
= otd
->td_flags
& TDF_EXITING
;
798 * Check if otd was migrating. Now that we are on ntd we can finish
799 * up the migration. This is a bit messy but it is the only place
800 * where td is known to be fully descheduled.
802 * We can only activate the migration if otd was migrating but not
803 * held on the cpu due to a preemption chain. We still have to
804 * clear TDF_RUNNING on the old thread either way.
806 * We are responsible for clearing the previously running thread's
809 if ((rgd
= otd
->td_migrate_gd
) != NULL
&&
810 (otd
->td_flags
& TDF_PREEMPT_LOCK
) == 0) {
811 KKASSERT((otd
->td_flags
& (TDF_MIGRATING
| TDF_RUNNING
)) ==
812 (TDF_MIGRATING
| TDF_RUNNING
));
813 otd
->td_migrate_gd
= NULL
;
814 otd
->td_flags
&= ~TDF_RUNNING
;
815 lwkt_send_ipiq(rgd
, lwkt_setcpu_remote
, otd
);
817 otd
->td_flags
&= ~TDF_RUNNING
;
821 * Final exit validations (see lwp_wait()). Note that otd becomes
822 * invalid the *instant* we set TDF_MP_EXITSIG.
824 * Use the EXITING status loaded from before we clear TDF_RUNNING,
825 * because if it is not set otd becomes invalid the instant we clear
826 * TDF_RUNNING on it (otherwise, if the system is fast enough, we
827 * might 'steal' TDF_EXITING from another switch-return!).
832 mpflags
= otd
->td_mpflags
;
835 if (mpflags
& TDF_MP_EXITWAIT
) {
836 if (atomic_cmpset_int(&otd
->td_mpflags
, mpflags
,
837 mpflags
| TDF_MP_EXITSIG
)) {
842 if (atomic_cmpset_int(&otd
->td_mpflags
, mpflags
,
843 mpflags
| TDF_MP_EXITSIG
)) {
850 if (tsc_frequency
&& rdtsc() - tsc_base
> tsc_frequency
) {
851 kprintf("lwkt_switch_return: excessive TDF_EXITING "
860 * Request that the target thread preempt the current thread. Preemption
861 * can only occur if our only critical section is the one that we were called
862 * with, the relative priority of the target thread is higher, and the target
863 * thread holds no tokens. This also only works if we are not holding any
864 * spinlocks (obviously).
866 * THE CALLER OF LWKT_PREEMPT() MUST BE IN A CRITICAL SECTION. Typically
867 * this is called via lwkt_schedule() through the td_preemptable callback.
868 * critcount is the managed critical priority that we should ignore in order
869 * to determine whether preemption is possible (aka usually just the crit
870 * priority of lwkt_schedule() itself).
872 * Preemption is typically limited to interrupt threads.
874 * Operation works in a fairly straight-forward manner. The normal
875 * scheduling code is bypassed and we switch directly to the target
876 * thread. When the target thread attempts to block or switch away
877 * code at the base of lwkt_switch() will switch directly back to our
878 * thread. Our thread is able to retain whatever tokens it holds and
879 * if the target needs one of them the target will switch back to us
880 * and reschedule itself normally.
883 lwkt_preempt(thread_t ntd
, int critcount
)
885 struct globaldata
*gd
= mycpu
;
888 int save_gd_intr_nesting_level
;
891 * The caller has put us in a critical section. We can only preempt
892 * if the caller of the caller was not in a critical section (basically
893 * a local interrupt), as determined by the 'critcount' parameter. We
894 * also can't preempt if the caller is holding any spinlocks (even if
895 * he isn't in a critical section). This also handles the tokens test.
897 * YYY The target thread must be in a critical section (else it must
898 * inherit our critical section? I dunno yet).
900 KASSERT(ntd
->td_critcount
, ("BADCRIT0 %d", ntd
->td_pri
));
902 td
= gd
->gd_curthread
;
903 if (preempt_enable
== 0) {
907 if (ntd
->td_pri
<= td
->td_pri
) {
911 if (td
->td_critcount
> critcount
) {
915 if (td
->td_cscount
) {
919 if (ntd
->td_gd
!= gd
) {
925 * We don't have to check spinlocks here as they will also bump
928 * Do not try to preempt if the target thread is holding any tokens.
929 * We could try to acquire the tokens but this case is so rare there
930 * is no need to support it.
932 KKASSERT(gd
->gd_spinlocks
== 0);
934 if (TD_TOKS_HELD(ntd
)) {
938 if (td
== ntd
|| ((td
->td_flags
| ntd
->td_flags
) & TDF_PREEMPT_LOCK
)) {
942 if (ntd
->td_preempted
) {
946 KKASSERT(gd
->gd_processing_ipiq
== 0);
949 * Since we are able to preempt the current thread, there is no need to
950 * call need_lwkt_resched().
952 * We must temporarily clear gd_intr_nesting_level around the switch
953 * since switchouts from the target thread are allowed (they will just
954 * return to our thread), and since the target thread has its own stack.
956 * A preemption must switch back to the original thread, assert the
960 ntd
->td_preempted
= td
;
961 td
->td_flags
|= TDF_PREEMPT_LOCK
;
962 KTR_LOG(ctxsw_pre
, gd
->gd_cpuid
, ntd
);
963 save_gd_intr_nesting_level
= gd
->gd_intr_nesting_level
;
964 gd
->gd_intr_nesting_level
= 0;
966 KKASSERT((ntd
->td_flags
& TDF_RUNNING
) == 0);
967 ntd
->td_flags
|= TDF_RUNNING
;
968 xtd
= td
->td_switch(ntd
);
969 KKASSERT(xtd
== ntd
);
970 lwkt_switch_return(xtd
);
971 gd
->gd_intr_nesting_level
= save_gd_intr_nesting_level
;
973 KKASSERT(ntd
->td_preempted
&& (td
->td_flags
& TDF_PREEMPT_DONE
));
974 ntd
->td_preempted
= NULL
;
975 td
->td_flags
&= ~(TDF_PREEMPT_LOCK
|TDF_PREEMPT_DONE
);
979 * Conditionally call splz() if gd_reqflags indicates work is pending.
980 * This will work inside a critical section but not inside a hard code
983 * (self contained on a per cpu basis)
988 globaldata_t gd
= mycpu
;
989 thread_t td
= gd
->gd_curthread
;
991 if ((gd
->gd_reqflags
& RQF_IDLECHECK_MASK
) &&
992 gd
->gd_intr_nesting_level
== 0 &&
993 td
->td_nest_count
< 2)
1000 * This version is integrated into crit_exit, reqflags has already
1001 * been tested but td_critcount has not.
1003 * We only want to execute the splz() on the 1->0 transition of
1004 * critcount and not in a hard code section or if too deeply nested.
1006 * NOTE: gd->gd_spinlocks is implied to be 0 when td_critcount is 0.
1009 lwkt_maybe_splz(thread_t td
)
1011 globaldata_t gd
= td
->td_gd
;
1013 if (td
->td_critcount
== 0 &&
1014 gd
->gd_intr_nesting_level
== 0 &&
1015 td
->td_nest_count
< 2)
1022 * Drivers which set up processing co-threads can call this function to
1023 * run the co-thread at a higher priority and to allow it to preempt
1027 lwkt_set_interrupt_support_thread(void)
1029 thread_t td
= curthread
;
1031 lwkt_setpri_self(TDPRI_INT_SUPPORT
);
1032 td
->td_flags
|= TDF_INTTHREAD
;
1033 td
->td_preemptable
= lwkt_preempt
;
1038 * This function is used to negotiate a passive release of the current
1039 * process/lwp designation with the user scheduler, allowing the user
1040 * scheduler to schedule another user thread. The related kernel thread
1041 * (curthread) continues running in the released state.
1044 lwkt_passive_release(struct thread
*td
)
1046 struct lwp
*lp
= td
->td_lwp
;
1048 td
->td_release
= NULL
;
1049 lwkt_setpri_self(TDPRI_KERN_USER
);
1051 lp
->lwp_proc
->p_usched
->release_curproc(lp
);
1056 * This implements a LWKT yield, allowing a kernel thread to yield to other
1057 * kernel threads at the same or higher priority. This function can be
1058 * called in a tight loop and will typically only yield once per tick.
1060 * Most kernel threads run at the same priority in order to allow equal
1063 * (self contained on a per cpu basis)
1068 globaldata_t gd
= mycpu
;
1069 thread_t td
= gd
->gd_curthread
;
1072 * Should never be called with spinlocks held but there is a path
1073 * via ACPI where it might happen.
1075 if (gd
->gd_spinlocks
)
1079 * Safe to call splz if we are not too-heavily nested.
1081 if ((gd
->gd_reqflags
& RQF_IDLECHECK_MASK
) && td
->td_nest_count
< 2)
1085 * Caller allows switching
1087 if (lwkt_resched_wanted()) {
1088 lwkt_schedule_self(curthread
);
1094 * The quick version processes pending interrupts and higher-priority
1095 * LWKT threads but will not round-robin same-priority LWKT threads.
1097 * When called while attempting to return to userland the only same-pri
1098 * threads are the ones which have already tried to become the current
1102 lwkt_yield_quick(void)
1104 globaldata_t gd
= mycpu
;
1105 thread_t td
= gd
->gd_curthread
;
1107 if ((gd
->gd_reqflags
& RQF_IDLECHECK_MASK
) && td
->td_nest_count
< 2)
1109 if (lwkt_resched_wanted()) {
1111 if (TAILQ_FIRST(&gd
->gd_tdrunq
) == td
) {
1112 clear_lwkt_resched();
1114 lwkt_schedule_self(curthread
);
1122 * This yield is designed for kernel threads with a user context.
1124 * The kernel acting on behalf of the user is potentially cpu-bound,
1125 * this function will efficiently allow other threads to run and also
1126 * switch to other processes by releasing.
1128 * The lwkt_user_yield() function is designed to have very low overhead
1129 * if no yield is determined to be needed.
1132 lwkt_user_yield(void)
1134 globaldata_t gd
= mycpu
;
1135 thread_t td
= gd
->gd_curthread
;
1138 * Should never be called with spinlocks held but there is a path
1139 * via ACPI where it might happen.
1141 if (gd
->gd_spinlocks
)
1145 * Always run any pending interrupts in case we are in a critical
1148 if ((gd
->gd_reqflags
& RQF_IDLECHECK_MASK
) && td
->td_nest_count
< 2)
1152 * Switch (which forces a release) if another kernel thread needs
1153 * the cpu, if userland wants us to resched, or if our kernel
1154 * quantum has run out.
1156 if (lwkt_resched_wanted() ||
1157 user_resched_wanted())
1164 * Reacquire the current process if we are released.
1166 * XXX not implemented atm. The kernel may be holding locks and such,
1167 * so we want the thread to continue to receive cpu.
1169 if (td
->td_release
== NULL
&& lp
) {
1170 lp
->lwp_proc
->p_usched
->acquire_curproc(lp
);
1171 td
->td_release
= lwkt_passive_release
;
1172 lwkt_setpri_self(TDPRI_USER_NORM
);
1178 * Generic schedule. Possibly schedule threads belonging to other cpus and
1179 * deal with threads that might be blocked on a wait queue.
1181 * We have a little helper inline function which does additional work after
1182 * the thread has been enqueued, including dealing with preemption and
1183 * setting need_lwkt_resched() (which prevents the kernel from returning
1184 * to userland until it has processed higher priority threads).
1186 * It is possible for this routine to be called after a failed _enqueue
1187 * (due to the target thread migrating, sleeping, or otherwise blocked).
1188 * We have to check that the thread is actually on the run queue!
1192 _lwkt_schedule_post(globaldata_t gd
, thread_t ntd
, int ccount
)
1194 if (ntd
->td_flags
& TDF_RUNQ
) {
1195 if (ntd
->td_preemptable
) {
1196 ntd
->td_preemptable(ntd
, ccount
); /* YYY +token */
1203 _lwkt_schedule(thread_t td
)
1205 globaldata_t mygd
= mycpu
;
1207 KASSERT(td
!= &td
->td_gd
->gd_idlethread
,
1208 ("lwkt_schedule(): scheduling gd_idlethread is illegal!"));
1209 KKASSERT((td
->td_flags
& TDF_MIGRATING
) == 0);
1210 crit_enter_gd(mygd
);
1211 KKASSERT(td
->td_lwp
== NULL
||
1212 (td
->td_lwp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
1214 if (td
== mygd
->gd_curthread
) {
1218 * If we own the thread, there is no race (since we are in a
1219 * critical section). If we do not own the thread there might
1220 * be a race but the target cpu will deal with it.
1222 if (td
->td_gd
== mygd
) {
1224 _lwkt_schedule_post(mygd
, td
, 1);
1226 lwkt_send_ipiq3(td
->td_gd
, lwkt_schedule_remote
, td
, 0);
1233 lwkt_schedule(thread_t td
)
1239 lwkt_schedule_noresched(thread_t td
) /* XXX not impl */
1245 * When scheduled remotely if frame != NULL the IPIQ is being
1246 * run via doreti or an interrupt then preemption can be allowed.
1248 * To allow preemption we have to drop the critical section so only
1249 * one is present in _lwkt_schedule_post.
1252 lwkt_schedule_remote(void *arg
, int arg2
, struct intrframe
*frame
)
1254 thread_t td
= curthread
;
1257 if (frame
&& ntd
->td_preemptable
) {
1258 crit_exit_noyield(td
);
1259 _lwkt_schedule(ntd
);
1260 crit_enter_quick(td
);
1262 _lwkt_schedule(ntd
);
1267 * Thread migration using a 'Pull' method. The thread may or may not be
1268 * the current thread. It MUST be descheduled and in a stable state.
1269 * lwkt_giveaway() must be called on the cpu owning the thread.
1271 * At any point after lwkt_giveaway() is called, the target cpu may
1272 * 'pull' the thread by calling lwkt_acquire().
1274 * We have to make sure the thread is not sitting on a per-cpu tsleep
1275 * queue or it will blow up when it moves to another cpu.
1277 * MPSAFE - must be called under very specific conditions.
1280 lwkt_giveaway(thread_t td
)
1282 globaldata_t gd
= mycpu
;
1285 if (td
->td_flags
& TDF_TSLEEPQ
)
1287 KKASSERT(td
->td_gd
== gd
);
1288 TAILQ_REMOVE(&gd
->gd_tdallq
, td
, td_allq
);
1289 td
->td_flags
|= TDF_MIGRATING
;
1294 lwkt_acquire(thread_t td
)
1299 KKASSERT(td
->td_flags
& TDF_MIGRATING
);
1304 uint64_t tsc_base
= rdtsc();
1307 KKASSERT((td
->td_flags
& TDF_RUNQ
) == 0);
1308 crit_enter_gd(mygd
);
1309 DEBUG_PUSH_INFO("lwkt_acquire");
1310 while (td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
)) {
1311 lwkt_process_ipiq();
1313 #ifdef _KERNEL_VIRTUAL
1317 if (tsc_frequency
&& rdtsc() - tsc_base
> tsc_frequency
) {
1318 kprintf("lwkt_acquire: stuck td %p td->td_flags %08x\n",
1327 TAILQ_INSERT_TAIL(&mygd
->gd_tdallq
, td
, td_allq
);
1328 td
->td_flags
&= ~TDF_MIGRATING
;
1331 crit_enter_gd(mygd
);
1332 TAILQ_INSERT_TAIL(&mygd
->gd_tdallq
, td
, td_allq
);
1333 td
->td_flags
&= ~TDF_MIGRATING
;
1339 * Generic deschedule. Descheduling threads other then your own should be
1340 * done only in carefully controlled circumstances. Descheduling is
1343 * This function may block if the cpu has run out of messages.
1346 lwkt_deschedule(thread_t td
)
1349 if (td
== curthread
) {
1352 if (td
->td_gd
== mycpu
) {
1355 lwkt_send_ipiq(td
->td_gd
, (ipifunc1_t
)lwkt_deschedule
, td
);
1362 * Set the target thread's priority. This routine does not automatically
1363 * switch to a higher priority thread, LWKT threads are not designed for
1364 * continuous priority changes. Yield if you want to switch.
1367 lwkt_setpri(thread_t td
, int pri
)
1369 if (td
->td_pri
!= pri
) {
1372 if (td
->td_flags
& TDF_RUNQ
) {
1373 KKASSERT(td
->td_gd
== mycpu
);
1385 * Set the initial priority for a thread prior to it being scheduled for
1386 * the first time. The thread MUST NOT be scheduled before or during
1387 * this call. The thread may be assigned to a cpu other then the current
1390 * Typically used after a thread has been created with TDF_STOPPREQ,
1391 * and before the thread is initially scheduled.
1394 lwkt_setpri_initial(thread_t td
, int pri
)
1397 KKASSERT((td
->td_flags
& TDF_RUNQ
) == 0);
1402 lwkt_setpri_self(int pri
)
1404 thread_t td
= curthread
;
1406 KKASSERT(pri
>= 0 && pri
<= TDPRI_MAX
);
1408 if (td
->td_flags
& TDF_RUNQ
) {
1419 * hz tick scheduler clock for LWKT threads
1422 lwkt_schedulerclock(thread_t td
)
1424 globaldata_t gd
= td
->td_gd
;
1427 if (TAILQ_FIRST(&gd
->gd_tdrunq
) == td
) {
1429 * If the current thread is at the head of the runq shift it to the
1430 * end of any equal-priority threads and request a LWKT reschedule
1433 * Ignore upri in this situation. There will only be one user thread
1434 * in user mode, all others will be user threads running in kernel
1435 * mode and we have to make sure they get some cpu.
1437 xtd
= TAILQ_NEXT(td
, td_threadq
);
1438 if (xtd
&& xtd
->td_pri
== td
->td_pri
) {
1439 TAILQ_REMOVE(&gd
->gd_tdrunq
, td
, td_threadq
);
1440 while (xtd
&& xtd
->td_pri
== td
->td_pri
)
1441 xtd
= TAILQ_NEXT(xtd
, td_threadq
);
1443 TAILQ_INSERT_BEFORE(xtd
, td
, td_threadq
);
1445 TAILQ_INSERT_TAIL(&gd
->gd_tdrunq
, td
, td_threadq
);
1446 need_lwkt_resched();
1450 * If we scheduled a thread other than the one at the head of the
1451 * queue always request a reschedule every tick.
1453 need_lwkt_resched();
1458 * Migrate the current thread to the specified cpu.
1460 * This is accomplished by descheduling ourselves from the current cpu
1461 * and setting td_migrate_gd. The lwkt_switch() code will detect that the
1462 * 'old' thread wants to migrate after it has been completely switched out
1463 * and will complete the migration.
1465 * TDF_MIGRATING prevents scheduling races while the thread is being migrated.
1467 * We must be sure to release our current process designation (if a user
1468 * process) before clearing out any tsleepq we are on because the release
1469 * code may re-add us.
1471 * We must be sure to remove ourselves from the current cpu's tsleepq
1472 * before potentially moving to another queue. The thread can be on
1473 * a tsleepq due to a left-over tsleep_interlock().
1477 lwkt_setcpu_self(globaldata_t rgd
)
1479 thread_t td
= curthread
;
1481 if (td
->td_gd
!= rgd
) {
1482 crit_enter_quick(td
);
1486 if (td
->td_flags
& TDF_TSLEEPQ
)
1490 * Set TDF_MIGRATING to prevent a spurious reschedule while we are
1491 * trying to deschedule ourselves and switch away, then deschedule
1492 * ourself, remove us from tdallq, and set td_migrate_gd. Finally,
1493 * call lwkt_switch() to complete the operation.
1495 td
->td_flags
|= TDF_MIGRATING
;
1496 lwkt_deschedule_self(td
);
1497 TAILQ_REMOVE(&td
->td_gd
->gd_tdallq
, td
, td_allq
);
1498 td
->td_migrate_gd
= rgd
;
1502 * We are now on the target cpu
1504 KKASSERT(rgd
== mycpu
);
1505 TAILQ_INSERT_TAIL(&rgd
->gd_tdallq
, td
, td_allq
);
1506 crit_exit_quick(td
);
1511 lwkt_migratecpu(int cpuid
)
1515 rgd
= globaldata_find(cpuid
);
1516 lwkt_setcpu_self(rgd
);
1520 * Remote IPI for cpu migration (called while in a critical section so we
1521 * do not have to enter another one).
1523 * The thread (td) has already been completely descheduled from the
1524 * originating cpu and we can simply assert the case. The thread is
1525 * assigned to the new cpu and enqueued.
1527 * The thread will re-add itself to tdallq when it resumes execution.
1530 lwkt_setcpu_remote(void *arg
)
1533 globaldata_t gd
= mycpu
;
1535 KKASSERT((td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
)) == 0);
1538 td
->td_flags
&= ~TDF_MIGRATING
;
1539 KKASSERT(td
->td_migrate_gd
== NULL
);
1540 KKASSERT(td
->td_lwp
== NULL
||
1541 (td
->td_lwp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
1546 lwkt_preempted_proc(void)
1548 thread_t td
= curthread
;
1549 while (td
->td_preempted
)
1550 td
= td
->td_preempted
;
1555 * Create a kernel process/thread/whatever. It shares it's address space
1556 * with proc0 - ie: kernel only.
1558 * If the cpu is not specified one will be selected. In the future
1559 * specifying a cpu of -1 will enable kernel thread migration between
1563 lwkt_create(void (*func
)(void *), void *arg
, struct thread
**tdp
,
1564 thread_t
template, int tdflags
, int cpu
, const char *fmt
, ...)
1569 td
= lwkt_alloc_thread(template, LWKT_THREAD_STACK
, cpu
,
1573 cpu_set_thread_handler(td
, lwkt_exit
, func
, arg
);
1576 * Set up arg0 for 'ps' etc
1578 __va_start(ap
, fmt
);
1579 kvsnprintf(td
->td_comm
, sizeof(td
->td_comm
), fmt
, ap
);
1583 * Schedule the thread to run
1585 if (td
->td_flags
& TDF_NOSTART
)
1586 td
->td_flags
&= ~TDF_NOSTART
;
1593 * Destroy an LWKT thread. Warning! This function is not called when
1594 * a process exits, cpu_proc_exit() directly calls cpu_thread_exit() and
1595 * uses a different reaping mechanism.
1600 thread_t td
= curthread
;
1605 * Do any cleanup that might block here
1607 if (td
->td_flags
& TDF_VERBOSE
)
1608 kprintf("kthread %p %s has exited\n", td
, td
->td_comm
);
1610 dsched_exit_thread(td
);
1613 * Get us into a critical section to interlock gd_freetd and loop
1614 * until we can get it freed.
1616 * We have to cache the current td in gd_freetd because objcache_put()ing
1617 * it would rip it out from under us while our thread is still active.
1619 * We are the current thread so of course our own TDF_RUNNING bit will
1620 * be set, so unlike the lwp reap code we don't wait for it to clear.
1623 crit_enter_quick(td
);
1626 tsleep(td
, 0, "tdreap", 1);
1629 if ((std
= gd
->gd_freetd
) != NULL
) {
1630 KKASSERT((std
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
)) == 0);
1631 gd
->gd_freetd
= NULL
;
1632 objcache_put(thread_cache
, std
);
1639 * Remove thread resources from kernel lists and deschedule us for
1640 * the last time. We cannot block after this point or we may end
1641 * up with a stale td on the tsleepq.
1643 * None of this may block, the critical section is the only thing
1644 * protecting tdallq and the only thing preventing new lwkt_hold()
1647 if (td
->td_flags
& TDF_TSLEEPQ
)
1649 lwkt_deschedule_self(td
);
1650 lwkt_remove_tdallq(td
);
1651 KKASSERT(td
->td_refs
== 0);
1656 KKASSERT(gd
->gd_freetd
== NULL
);
1657 if (td
->td_flags
& TDF_ALLOCATED_THREAD
)
1663 lwkt_remove_tdallq(thread_t td
)
1665 KKASSERT(td
->td_gd
== mycpu
);
1666 TAILQ_REMOVE(&td
->td_gd
->gd_tdallq
, td
, td_allq
);
1670 * Code reduction and branch prediction improvements. Call/return
1671 * overhead on modern cpus often degenerates into 0 cycles due to
1672 * the cpu's branch prediction hardware and return pc cache. We
1673 * can take advantage of this by not inlining medium-complexity
1674 * functions and we can also reduce the branch prediction impact
1675 * by collapsing perfectly predictable branches into a single
1676 * procedure instead of duplicating it.
1678 * Is any of this noticeable? Probably not, so I'll take the
1679 * smaller code size.
1682 crit_exit_wrapper(__DEBUG_CRIT_ARG__
)
1684 _crit_exit(mycpu __DEBUG_CRIT_PASS_ARG__
);
1690 thread_t td
= curthread
;
1691 int lcrit
= td
->td_critcount
;
1693 td
->td_critcount
= 0;
1694 panic("td_critcount is/would-go negative! %p %d", td
, lcrit
);
1699 * Called from debugger/panic on cpus which have been stopped. We must still
1700 * process the IPIQ while stopped.
1702 * If we are dumping also try to process any pending interrupts. This may
1703 * or may not work depending on the state of the cpu at the point it was
1707 lwkt_smp_stopped(void)
1709 globaldata_t gd
= mycpu
;
1712 lwkt_process_ipiq();
1713 --gd
->gd_intr_nesting_level
;
1715 ++gd
->gd_intr_nesting_level
;
1717 lwkt_process_ipiq();