2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
38 * This code is derived from software contributed to Berkeley by
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
68 #include <sys/rtprio.h>
70 #include <machine/asmacros.h>
71 #include <machine/segments.h>
73 #include <machine/pmap.h>
75 #include <machine_base/apic/apicreg.h>
77 #include <machine/lock.h>
81 #define MPLOCKED lock ;
86 * This feature allows the preempting (interrupt) kernel thread to borrow
87 * %cr3 from the user process it interrupts, allowing us to do-away with
88 * two %cr3 stores, two atomic ops (pm_active is not modified), and pmap
89 * lock tests (not needed since pm_active is not modified).
91 * Unfortunately, I couldn't really measure any result so for now the
92 * optimization is disabled.
94 #undef PREEMPT_OPTIMIZE
99 * This optimization attempted to avoid a %cr3 store and atomic op, and
100 * it might have been useful on older cpus but newer cpus (and more
101 * importantly multi-core cpus) generally do not switch between LWPs on
102 * the same cpu. Multiple user threads are more likely to be distributed
103 * across multiple cpus. In cpu-bound situations the scheduler will already
104 * be in batch-mode (meaning relatively few context-switches/sec), and
105 * otherwise the lwp(s) are likely to be blocked waiting for events.
107 * On the flip side, the conditionals this option uses measurably reduce
108 * performance (just slightly, honestly). So this option is disabled.
110 #undef LWP_SWITCH_OPTIMIZE
113 * Global Declarations
118 .globl lwkt_switch_return
120 #if defined(SWTCH_OPTIM_STATS)
121 .globl swtch_optim_stats, tlb_flush_count
122 swtch_optim_stats
: .long 0 /* number of _swtch_optims */
123 tlb_flush_count
: .long 0
132 * cpu_heavy_switch(struct thread *next_thread)
134 * Switch from the current thread to a new thread. This entry
135 * is normally called via the thread->td_switch function, and will
136 * only be called when the current thread is a heavy weight process.
138 * Some instructions have been reordered to reduce pipeline stalls.
140 * YYY disable interrupts once giant is removed.
142 ENTRY
(cpu_heavy_switch
)
144 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
146 movq PCPU
(curthread
),%rcx
147 /* On top of the stack is the return adress. */
148 movq
(%rsp
),%rax
/* (reorder optimization) */
149 movq TD_PCB
(%rcx
),%rdx
/* RDX = PCB */
150 movq
%rax
,PCB_RIP
(%rdx
) /* return PC may be modified */
151 movq
%rbx
,PCB_RBX
(%rdx
)
152 movq
%rsp
,PCB_RSP
(%rdx
)
153 movq
%rbp
,PCB_RBP
(%rdx
)
154 movq
%r12,PCB_R12
(%rdx
)
155 movq
%r13,PCB_R13
(%rdx
)
156 movq
%r14,PCB_R14
(%rdx
)
157 movq
%r15,PCB_R15
(%rdx
)
160 * Clear the cpu bit in the pmap active mask. The restore
161 * function will set the bit in the pmap active mask.
163 * If we are switching away due to a preempt, TD_PREEMPTED(%rdi)
164 * will be non-NULL. In this situation we do want to avoid extra
165 * atomic ops and %cr3 reloads (see top of file for reasoning).
167 * NOTE: Do not try to optimize avoiding the %cr3 reload or pm_active
168 * adjustment. This mattered on uni-processor systems but in
169 * multi-core systems we are highly unlikely to be switching
170 * to another thread belonging to the same process on this cpu.
172 * (more likely the target thread is still sleeping, or if cpu-
173 * bound the scheduler is in batch mode and the switch rate is
176 movq
%rcx
,%rbx
/* RBX = oldthread */
177 #ifdef PREEMPT_OPTIMIZE
179 * If we are being preempted the target thread borrows our %cr3
180 * and we leave our pmap bits intact for the duration.
182 movq TD_PREEMPTED
(%rdi
),%r13
187 movq TD_LWP
(%rcx
),%rcx
/* RCX = oldlwp */
188 movq LWP_VMSPACE
(%rcx
), %rcx
/* RCX = oldvmspace */
189 #ifdef LWP_SWITCH_OPTIMIZE
190 movq TD_LWP
(%rdi
),%r13 /* R13 = newlwp */
191 testq
%r13,%r13 /* might not be a heavy */
193 cmpq LWP_VMSPACE
(%r13),%rcx
/* same vmspace? */
197 movq PCPU
(cpumask_simple
),%rsi
198 movq PCPU
(cpumask_offset
),%r12
200 MPLOCKED andq
%rsi
, VM_PMAP+PM_ACTIVE
(%rcx
, %r12, 1)
204 * Push the LWKT switch restore function, which resumes a heavy
205 * weight process. Note that the LWKT switcher is based on
206 * TD_SP, while the heavy weight process switcher is based on
207 * PCB_RSP. TD_SP is usually two ints pushed relative to
208 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore.
212 movq $cpu_heavy_restore
, %rax
214 movq
%rsp
,TD_SP
(%rbx
)
217 * Save debug regs if necessary
219 movq PCB_FLAGS
(%rdx
),%rax
220 andq $PCB_DBREGS
,%rax
221 jz
1f
/* no, skip over */
222 movq
%dr7
,%rax
/* yes, do the save */
223 movq
%rax
,PCB_DR7
(%rdx
)
224 /* JG correct value? */
225 andq $
0x0000fc00, %rax
/* disable all watchpoints */
228 movq
%rax
,PCB_DR6
(%rdx
)
230 movq
%rax
,PCB_DR3
(%rdx
)
232 movq
%rax
,PCB_DR2
(%rdx
)
234 movq
%rax
,PCB_DR1
(%rdx
)
236 movq
%rax
,PCB_DR0
(%rdx
)
240 * Save the FP state if we have used the FP. Note that calling
241 * npxsave will NULL out PCPU(npxthread).
243 cmpq
%rbx
,PCPU
(npxthread
)
245 movq
%rdi
,%r12 /* save %rdi. %r12 is callee-saved */
246 movq TD_SAVEFPU
(%rbx
),%rdi
247 call npxsave
/* do it in a big C function */
248 movq
%r12,%rdi
/* restore %rdi */
252 * Switch to the next thread, which was passed as an argument
253 * to cpu_heavy_switch(). The argument is in %rdi.
254 * Set the current thread, load the stack pointer,
255 * and 'ret' into the switch-restore function.
257 * The switch restore function expects the new thread to be in %rax
258 * and the old one to be in %rbx.
260 * There is a one-instruction window where curthread is the new
261 * thread but %rsp still points to the old thread's stack, but
262 * we are protected by a critical section so it is ok.
264 movq
%rdi
,%rax
/* RAX = newtd, RBX = oldtd */
265 movq
%rax
,PCPU
(curthread
)
266 movq TD_SP
(%rax
),%rsp
268 END
(cpu_heavy_switch
)
271 * cpu_exit_switch(struct thread *next)
273 * The switch function is changed to this when a thread is going away
274 * for good. We have to ensure that the MMU state is not cached, and
275 * we don't bother saving the existing thread state before switching.
277 * At this point we are in a critical section and this cpu owns the
278 * thread's token, which serves as an interlock until the switchout is
281 ENTRY
(cpu_exit_switch
)
283 #ifdef PREEMPT_OPTIMIZE
285 * If we were preempting we are switching back to the original thread.
286 * In this situation we already have the original thread's %cr3 and
287 * should not replace it!
289 testl $TDF_PREEMPT_DONE
, TD_FLAGS
(%rdi
)
294 * Get us out of the vmspace
303 movq PCPU
(curthread
),%rbx
306 * If this is a process/lwp, deactivate the pmap after we've
309 movq TD_LWP
(%rbx
),%rcx
312 movq LWP_VMSPACE
(%rcx
), %rcx
/* RCX = vmspace */
314 movq PCPU
(cpumask_simple
),%rax
315 movq PCPU
(cpumask_offset
),%r12
317 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE
(%rcx
, %r12, 1)
320 * Switch to the next thread. RET into the restore function, which
321 * expects the new thread in RAX and the old in RBX.
323 * There is a one-instruction window where curthread is the new
324 * thread but %rsp still points to the old thread's stack, but
325 * we are protected by a critical section so it is ok.
329 movq
%rax
,PCPU
(curthread
)
330 movq TD_SP
(%rax
),%rsp
335 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx)
337 * We immediately move %rax to %r12. %rbx is retained throughout, and
338 * we nominally use %r14 for TD_PCB(%r12) until near the end where we
339 * switch to %rdx for that.
341 * Restore the thread after an LWKT switch. This entry is normally
342 * called via the LWKT switch restore function, which was pulled
343 * off the thread stack and jumped to.
345 * This entry is only called if the thread was previously saved
346 * using cpu_heavy_switch() (the heavy weight process thread switcher),
347 * or when a new process is initially scheduled.
349 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
350 * a preemption switch may interrupt the process and then return via
353 * YYY theoretically we do not have to restore everything here, a lot
354 * of this junk can wait until we return to usermode. But for now
355 * we restore everything.
357 * YYY the PCB crap is really crap, it makes startup a bitch because
358 * we can't switch away.
360 * YYY note: spl check is done in mi_switch when it splx()'s.
363 ENTRY
(cpu_heavy_restore
)
364 movq
%rax
,%r12 /* R12 = newtd */
365 movq TD_PCB
(%rax
),%r14 /* R14 = PCB */
366 movq
%r14, PCPU
(trampoline
)+TR_PCB_RSP
367 movq PCB_FLAGS
(%r14), %rcx
368 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_FLAGS
369 movq PCB_CR3_ISO
(%r14), %rcx
370 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_CR3_ISO
371 movq PCB_CR3
(%r14), %rcx
372 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_CR3
375 #if defined(SWTCH_OPTIM_STATS)
376 incl _swtch_optim_stats
378 #ifdef PREEMPT_OPTIMIZE
380 * If restoring our thread after a preemption has returned to
381 * us, our %cr3 and pmap were borrowed and are being returned to
382 * us and no further action on those items need be taken.
384 testl $TDF_PREEMPT_DONE
, TD_FLAGS
(%r12)
389 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
390 * safely test/reload %cr3 until after we have set the bit in the
393 * We must do an interlocked test of the CPULOCK_EXCL at the same
394 * time. If found to be set we will have to wait for it to clear
395 * and then do a forced reload of %cr3 (even if the value matches).
397 * XXX When switching between two LWPs sharing the same vmspace
398 * the cpu_heavy_switch() code currently avoids clearing the
399 * cpu bit in PM_ACTIVE. So if the bit is already set we can
400 * avoid checking for the interlock via CPULOCK_EXCL. We currently
401 * do not perform this optimization.
403 movq TD_LWP
(%r12),%rcx
404 movq LWP_VMSPACE
(%rcx
),%rcx
/* RCX = vmspace */
406 movq PCPU
(cpumask_simple
),%rsi
407 movq PCPU
(cpumask_offset
),%r13
408 MPLOCKED orq
%rsi
, VM_PMAP+PM_ACTIVE
(%rcx
, %r13, 1)
410 movl VM_PMAP+PM_ACTIVE_LOCK
(%rcx
),%esi
411 testl $CPULOCK_EXCL
,%esi
414 movq
%rcx
,%rdi
/* (found to be set) */
415 call pmap_interlock_wait
/* pmap_interlock_wait(%rdi:vm) */
418 * Need unconditional load cr3
420 movq PCB_CR3
(%r14),%rcx
/* RCX = desired CR3 */
421 jmp
2f
/* unconditional reload */
424 * Restore the MMU address space. If it is the same as the last
425 * thread we don't have to invalidate the tlb (i.e. reload cr3).
427 * XXX Temporary cludge, do NOT do this optimization! The problem
428 * is that the pm_active bit for the cpu had dropped for a small
429 * period of time, just a few cycles, but even one cycle is long
430 * enough for some other cpu doing a pmap invalidation to not see
433 * When that happens, and we don't invltlb (by loading %cr3), we
434 * wind up with a stale TLB.
436 movq
%cr3
,%rsi
/* RSI = current CR3 */
437 movq PCB_CR3
(%r14),%rcx
/* RCX = desired CR3 */
441 #if defined(SWTCH_OPTIM_STATS)
442 decl _swtch_optim_stats
443 incl _tlb_flush_count
449 * NOTE: %rbx is the previous thread and %r12 is the new thread.
450 * %rbx is retained throughout so we can return it.
452 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
456 * Deal with the PCB extension, restore the private tss
458 movq PCB_EXT
(%r14),%rdi
/* check for a PCB extension */
459 movq $
1,%rcx
/* maybe mark use of a private tss */
467 * Going back to the common_tss. (this was already executed at
470 * Set the top of the supervisor stack for the new thread
471 * in gd_thread_pcb so the trampoline code can load it into %rsp.
473 movq
%r14, PCPU
(trampoline
)+TR_PCB_RSP
474 movq PCB_FLAGS
(%r14), %rcx
475 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_FLAGS
476 movq PCB_CR3_ISO
(%r14), %rcx
477 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_CR3_ISO
478 movq PCB_CR3
(%r14), %rcx
479 movq
%rcx
, PCPU
(trampoline
)+TR_PCB_CR3
483 cmpl $
0,PCPU
(private_tss
) /* don't have to reload if */
484 je
3f
/* already using the common TSS */
487 subq
%rcx
,%rcx
/* unmark use of private tss */
490 * Get the address of the common TSS descriptor for the ltr.
491 * There is no way to get the address of a segment-accessed variable
492 * so we store a self-referential pointer at the base of the per-cpu
493 * data area and add the appropriate offset.
496 movq $gd_common_tssd
, %rdi
497 /* JG name for "%gs:0"? */
501 * Move the correct TSS descriptor into the GDT slot, then reload
506 movl
%rcx
,PCPU
(private_tss
) /* mark/unmark private tss */
507 movq PCPU
(tss_gdt
), %rbx
/* entry in GDT */
510 movl $GPROC0_SEL
*8, %esi
/* GSEL(entry, SEL_KPL) */
516 * Restore the user %gs and %fs
518 movq PCB_FSBASE
(%r14),%r9
519 cmpq PCPU
(user_fs
),%r9
521 movq
%r9,PCPU
(user_fs
)
522 movl $MSR_FSBASE
,%ecx
523 movl PCB_FSBASE
(%r14),%eax
524 movl PCB_FSBASE+
4(%r14),%edx
527 movq PCB_GSBASE
(%r14),%r9
528 cmpq PCPU
(user_gs
),%r9
530 movq
%r9,PCPU
(user_gs
)
531 movl $MSR_KGSBASE
,%ecx
/* later swapgs moves it to GSBASE */
532 movl PCB_GSBASE
(%r14),%eax
533 movl PCB_GSBASE+
4(%r14),%edx
537 * Actively restore FP state
539 movq PCPU
(npxthread
),%r13
542 movl TD_FLAGS
(%r12),%r13d
543 andq $TDF_USINGFP
,%r13
545 movq
%r12,%rdi
/* npxdna_quick(newtd) */
550 * Restore general registers. %rbx is restored later.
552 * Switch our PCB register from %r14 to %rdx so we can restore
556 movq PCB_RSP
(%rdx
), %rsp
557 movq PCB_RBP
(%rdx
), %rbp
558 movq PCB_R12
(%rdx
), %r12
559 movq PCB_R13
(%rdx
), %r13
560 movq PCB_R14
(%rdx
), %r14
561 movq PCB_R15
(%rdx
), %r15
562 movq PCB_RIP
(%rdx
), %rax
569 * Restore the user LDT if we have one
571 cmpl $
0, PCB_USERLDT
(%edx
)
573 movl _default_ldt
,%eax
574 cmpl PCPU
(currentldt
),%eax
577 movl
%eax
,PCPU
(currentldt
)
586 * Restore the user TLS if we have one
594 * Restore the DEBUG register state if necessary.
596 movq PCB_FLAGS
(%rdx
),%rax
597 andq $PCB_DBREGS
,%rax
598 jz
1f
/* no, skip over */
599 movq PCB_DR6
(%rdx
),%rax
/* yes, do the restore */
601 movq PCB_DR3
(%rdx
),%rax
603 movq PCB_DR2
(%rdx
),%rax
605 movq PCB_DR1
(%rdx
),%rax
607 movq PCB_DR0
(%rdx
),%rax
609 movq
%dr7
,%rax
/* load dr7 so as not to disturb */
610 /* JG correct value? */
611 andq $
0x0000fc00,%rax
/* reserved bits */
612 /* JG we've got more registers on x86_64 */
613 movq PCB_DR7
(%rdx
),%rcx
614 /* JG correct value? */
615 andq $~
0x0000fc00,%rcx
620 * Clear the QUICKRET flag when restoring a user process context
621 * so we don't try to do a quick syscall return.
624 andl $~RQF_QUICKRET
,PCPU
(reqflags
)
626 movq PCB_RBX
(%rdx
),%rbx
628 END
(cpu_heavy_restore
)
631 * savectx(struct pcb *pcb)
633 * Update pcb, saving current processor state.
637 /* JG use %rdi instead of %rcx everywhere? */
640 /* caller's return address - child won't execute this routine */
642 movq
%rax
,PCB_RIP
(%rcx
)
645 movq
%rax
,PCB_CR3
(%rcx
)
647 movq
%rbx
,PCB_RBX
(%rcx
)
648 movq
%rsp
,PCB_RSP
(%rcx
)
649 movq
%rbp
,PCB_RBP
(%rcx
)
650 movq
%r12,PCB_R12
(%rcx
)
651 movq
%r13,PCB_R13
(%rcx
)
652 movq
%r14,PCB_R14
(%rcx
)
653 movq
%r15,PCB_R15
(%rcx
)
657 * If npxthread == NULL, then the npx h/w state is irrelevant and the
658 * state had better already be in the pcb. This is true for forks
659 * but not for dumps (the old book-keeping with FP flags in the pcb
660 * always lost for dumps because the dump pcb has 0 flags).
662 * If npxthread != NULL, then we have to save the npx h/w state to
663 * npxthread's pcb and copy it to the requested pcb, or save to the
664 * requested pcb and reload. Copying is easier because we would
665 * have to handle h/w bugs for reloading. We used to lose the
666 * parent's npx state for forks by forgetting to reload.
668 movq PCPU
(npxthread
),%rax
672 pushq
%rcx
/* target pcb */
673 movq TD_SAVEFPU
(%rax
),%rax
/* originating savefpu area */
682 movq $PCB_SAVEFPU_SIZE
,%rdx
683 leaq PCB_SAVEFPU
(%rcx
),%rcx
694 * cpu_idle_restore() (current thread in %rax on entry, old thread in %rbx)
697 * Don't bother setting up any regs other than %rbp so backtraces
698 * don't die. This restore function is used to bootstrap into the
699 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
702 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
703 * This only occurs during system boot so no special handling is
704 * required for migration.
706 * If we are an AP we have to call ap_init() before jumping to
707 * cpu_idle(). ap_init() will synchronize with the BP and finish
708 * setting up various ncpu-dependant globaldata fields. This may
709 * happen on UP as well as SMP if we happen to be simulating multiple
712 ENTRY
(cpu_idle_restore
)
715 xorq
%rbp
,%rbp
/* dummy frame pointer */
716 pushq $
0 /* dummy return pc */
718 /* NOTE: idle thread can never preempt */
722 andl $~TDF_RUNNING
,TD_FLAGS
(%rbx
)
723 orl $TDF_RUNNING
,TD_FLAGS
(%rax
) /* manual, no switch_return */
726 * ap_init can decide to enable interrupts early, but otherwise, or if
727 * we are UP, do it here.
733 * cpu 0's idle thread entry for the first time must use normal
734 * lwkt_switch_return() semantics or a pending cpu migration on
735 * thread0 will deadlock.
741 call lwkt_switch_return
744 END
(cpu_idle_restore
)
747 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx)
748 * (one-time execution)
750 * Don't bother setting up any regs other then %rbp so backtraces
751 * don't die. This restore function is used to bootstrap into an
752 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
755 * Because this switch target does not 'return' to lwkt_switch()
756 * we have to call lwkt_switch_return(otd) to clean up otd.
759 * Since all of our context is on the stack we are reentrant and
760 * we can release our critical section and enable interrupts early.
762 ENTRY
(cpu_kthread_restore
)
765 movq TD_PCB
(%rax
),%r13
768 #ifdef PREEMPT_OPTIMIZE
770 * If we are preempting someone we borrow their %cr3, do not overwrite
773 movq TD_PREEMPTED
(%rax
),%r14
781 * rax and rbx come from the switchout code. Call
782 * lwkt_switch_return(otd).
784 * NOTE: unlike i386, the %rsi and %rdi are not call-saved regs.
788 call lwkt_switch_return
790 decl TD_CRITCOUNT
(%rax
)
791 movq PCB_R12
(%r13),%rdi
/* argument to RBX function */
792 movq PCB_RBX
(%r13),%rax
/* thread function */
793 /* note: top of stack return address inherited by function */
795 END
(cpu_kthread_restore
)
798 * cpu_lwkt_switch(struct thread *)
800 * Standard LWKT switching function. Only non-scratch registers are
801 * saved and we don't bother with the MMU state or anything else.
803 * This function is always called while in a critical section.
805 * There is a one-instruction window where curthread is the new
806 * thread but %rsp still points to the old thread's stack, but
807 * we are protected by a critical section so it is ok.
809 ENTRY
(cpu_lwkt_switch
)
810 pushq
%rbp
/* JG note: GDB hacked to locate ebp rel to td_sp */
812 movq PCPU
(curthread
),%rbx
/* becomes old thread in restore */
822 * Save the FP state if we have used the FP. Note that calling
823 * npxsave will NULL out PCPU(npxthread).
825 * We have to deal with the FP state for LWKT threads in case they
826 * happen to get preempted or block while doing an optimized
827 * bzero/bcopy/memcpy.
829 cmpq
%rbx
,PCPU
(npxthread
)
831 movq
%rdi
,%r12 /* save %rdi. %r12 is callee-saved */
832 movq TD_SAVEFPU
(%rbx
),%rdi
833 call npxsave
/* do it in a big C function */
834 movq
%r12,%rdi
/* restore %rdi */
838 movq
%rdi
,%rax
/* switch to this thread */
839 pushq $cpu_lwkt_restore
840 movq
%rsp
,TD_SP
(%rbx
)
842 * %rax contains new thread, %rbx contains old thread.
844 movq
%rax
,PCPU
(curthread
)
845 movq TD_SP
(%rax
),%rsp
850 * cpu_lwkt_restore() (current thread in %rax on entry)
852 * Standard LWKT restore function. This function is always called
853 * while in a critical section.
855 * WARNING! Due to preemption the restore function can be used to 'return'
856 * to the original thread. Interrupt disablement must be
857 * protected through the switch so we cannot run splz here.
859 ENTRY
(cpu_lwkt_restore
)
860 #ifdef PREEMPT_OPTIMIZE
862 * If we are preempting someone we borrow their %cr3 and pmap
864 movq TD_PREEMPTED
(%rax
),%r14 /* kernel thread preempting? */
866 jne
1f
/* yes, borrow %cr3 from old thread */
869 * Don't reload %cr3 if it hasn't changed. Since this is a LWKT
870 * thread (a kernel thread), and the kernel_pmap always permanently
871 * sets all pm_active bits, we don't have the same problem with it
872 * that we do with process pmaps.
881 * NOTE: %rbx is the previous thread and %rax is the new thread.
882 * %rbx is retained throughout so we can return it.
884 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
895 END
(cpu_lwkt_restore
)