2 * Copyright (c) 2003,2004,2008 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2008 Jordan Gordeev.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1990 The Regents of the University of California.
36 * All rights reserved.
38 * This code is derived from software contributed to Berkeley by
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
68 //#include "use_npx.h"
70 #include <sys/rtprio.h>
72 #include <machine/asmacros.h>
73 #include <machine/segments.h>
75 #include <machine/pmap.h>
77 #include <machine_base/apic/apicreg.h>
79 #include <machine/lock.h>
83 #define MPLOCKED lock ;
88 .globl lwkt_switch_return
90 #if defined(SWTCH_OPTIM_STATS)
91 .globl swtch_optim_stats, tlb_flush_count
92 swtch_optim_stats
: .long 0 /* number of _swtch_optims */
93 tlb_flush_count
: .long 0
100 * cpu_heavy_switch(struct thread *next_thread)
102 * Switch from the current thread to a new thread. This entry
103 * is normally called via the thread->td_switch function, and will
104 * only be called when the current thread is a heavy weight process.
106 * Some instructions have been reordered to reduce pipeline stalls.
108 * YYY disable interrupts once giant is removed.
110 ENTRY
(cpu_heavy_switch
)
112 * Save RIP, RSP and callee-saved registers (RBX, RBP, R12-R15).
114 movq PCPU
(curthread
),%rcx
115 /* On top of the stack is the return adress. */
116 movq
(%rsp
),%rax
/* (reorder optimization) */
117 movq TD_PCB
(%rcx
),%rdx
/* RDX = PCB */
118 movq
%rax
,PCB_RIP
(%rdx
) /* return PC may be modified */
119 movq
%rbx
,PCB_RBX
(%rdx
)
120 movq
%rsp
,PCB_RSP
(%rdx
)
121 movq
%rbp
,PCB_RBP
(%rdx
)
122 movq
%r12,PCB_R12
(%rdx
)
123 movq
%r13,PCB_R13
(%rdx
)
124 movq
%r14,PCB_R14
(%rdx
)
125 movq
%r15,PCB_R15
(%rdx
)
128 * Clear the cpu bit in the pmap active mask. The restore
129 * function will set the bit in the pmap active mask.
131 * Special case: when switching between threads sharing the
132 * same vmspace if we avoid clearing the bit we do not have
133 * to reload %cr3 (if we clear the bit we could race page
134 * table ops done by other threads and would have to reload
135 * %cr3, because those ops will not know to IPI us).
137 movq
%rcx
,%rbx
/* RBX = oldthread */
138 movq TD_LWP
(%rcx
),%rcx
/* RCX = oldlwp */
139 movq TD_LWP
(%rdi
),%r13 /* R13 = newlwp */
140 movq LWP_VMSPACE
(%rcx
), %rcx
/* RCX = oldvmspace */
141 testq
%r13,%r13 /* might not be a heavy */
143 cmpq LWP_VMSPACE
(%r13),%rcx
/* same vmspace? */
145 #if CPUMASK_ELEMENTS != 4
146 #error "assembly incompatible with cpumask_t"
149 movq PCPU
(other_cpus
)+0,%rax
150 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
0(%rcx
)
151 movq PCPU
(other_cpus
)+8,%rax
152 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
8(%rcx
)
153 movq PCPU
(other_cpus
)+16,%rax
154 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
16(%rcx
)
155 movq PCPU
(other_cpus
)+24,%rax
156 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
24(%rcx
)
160 * Push the LWKT switch restore function, which resumes a heavy
161 * weight process. Note that the LWKT switcher is based on
162 * TD_SP, while the heavy weight process switcher is based on
163 * PCB_RSP. TD_SP is usually two ints pushed relative to
164 * PCB_RSP. We push the flags for later restore by cpu_heavy_restore.
168 movq $cpu_heavy_restore
, %rax
170 movq
%rsp
,TD_SP
(%rbx
)
173 * Save debug regs if necessary
175 movq PCB_FLAGS
(%rdx
),%rax
176 andq $PCB_DBREGS
,%rax
177 jz
1f
/* no, skip over */
178 movq
%dr7
,%rax
/* yes, do the save */
179 movq
%rax
,PCB_DR7
(%rdx
)
180 /* JG correct value? */
181 andq $
0x0000fc00, %rax
/* disable all watchpoints */
184 movq
%rax
,PCB_DR6
(%rdx
)
186 movq
%rax
,PCB_DR3
(%rdx
)
188 movq
%rax
,PCB_DR2
(%rdx
)
190 movq
%rax
,PCB_DR1
(%rdx
)
192 movq
%rax
,PCB_DR0
(%rdx
)
197 * Save the FP state if we have used the FP. Note that calling
198 * npxsave will NULL out PCPU(npxthread).
200 cmpq
%rbx
,PCPU
(npxthread
)
202 movq
%rdi
,%r12 /* save %rdi. %r12 is callee-saved */
203 movq TD_SAVEFPU
(%rbx
),%rdi
204 call npxsave
/* do it in a big C function */
205 movq
%r12,%rdi
/* restore %rdi */
210 * Switch to the next thread, which was passed as an argument
211 * to cpu_heavy_switch(). The argument is in %rdi.
212 * Set the current thread, load the stack pointer,
213 * and 'ret' into the switch-restore function.
215 * The switch restore function expects the new thread to be in %rax
216 * and the old one to be in %rbx.
218 * There is a one-instruction window where curthread is the new
219 * thread but %rsp still points to the old thread's stack, but
220 * we are protected by a critical section so it is ok.
222 movq
%rdi
,%rax
/* RAX = newtd, RBX = oldtd */
223 movq
%rax
,PCPU
(curthread
)
224 movq TD_SP
(%rax
),%rsp
228 * cpu_exit_switch(struct thread *next)
230 * The switch function is changed to this when a thread is going away
231 * for good. We have to ensure that the MMU state is not cached, and
232 * we don't bother saving the existing thread state before switching.
234 * At this point we are in a critical section and this cpu owns the
235 * thread's token, which serves as an interlock until the switchout is
238 ENTRY
(cpu_exit_switch
)
240 * Get us out of the vmspace
248 /* JG no increment of statistics counters? see cpu_heavy_restore */
251 movq PCPU
(curthread
),%rbx
254 * If this is a process/lwp, deactivate the pmap after we've
257 movq TD_LWP
(%rbx
),%rcx
260 movq LWP_VMSPACE
(%rcx
), %rcx
/* RCX = vmspace */
261 movq PCPU
(other_cpus
)+0,%rax
262 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
0(%rcx
)
263 movq PCPU
(other_cpus
)+8,%rax
264 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
8(%rcx
)
265 movq PCPU
(other_cpus
)+16,%rax
266 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
16(%rcx
)
267 movq PCPU
(other_cpus
)+24,%rax
268 MPLOCKED andq
%rax
, VM_PMAP+PM_ACTIVE+
24(%rcx
)
271 * Switch to the next thread. RET into the restore function, which
272 * expects the new thread in RAX and the old in RBX.
274 * There is a one-instruction window where curthread is the new
275 * thread but %rsp still points to the old thread's stack, but
276 * we are protected by a critical section so it is ok.
280 movq
%rax
,PCPU
(curthread
)
281 movq TD_SP
(%rax
),%rsp
285 * cpu_heavy_restore() (current thread in %rax on entry, old thread in %rbx)
287 * Restore the thread after an LWKT switch. This entry is normally
288 * called via the LWKT switch restore function, which was pulled
289 * off the thread stack and jumped to.
291 * This entry is only called if the thread was previously saved
292 * using cpu_heavy_switch() (the heavy weight process thread switcher),
293 * or when a new process is initially scheduled.
295 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
296 * a preemption switch may interrupt the process and then return via
299 * YYY theoretically we do not have to restore everything here, a lot
300 * of this junk can wait until we return to usermode. But for now
301 * we restore everything.
303 * YYY the PCB crap is really crap, it makes startup a bitch because
304 * we can't switch away.
306 * YYY note: spl check is done in mi_switch when it splx()'s.
309 ENTRY
(cpu_heavy_restore
)
310 movq TD_PCB
(%rax
),%rdx
/* RDX = PCB */
311 movq
%rdx
, PCPU
(common_tss
) + TSS_RSP0
314 #if defined(SWTCH_OPTIM_STATS)
315 incl _swtch_optim_stats
318 * Tell the pmap that our cpu is using the VMSPACE now. We cannot
319 * safely test/reload %cr3 until after we have set the bit in the
322 * We must do an interlocked test of the CPULOCK_EXCL at the same
323 * time. If found to be set we will have to wait for it to clear
324 * and then do a forced reload of %cr3 (even if the value matches).
326 * XXX When switching between two LWPs sharing the same vmspace
327 * the cpu_heavy_switch() code currently avoids clearing the
328 * cpu bit in PM_ACTIVE. So if the bit is already set we can
329 * avoid checking for the interlock via CPULOCK_EXCL. We currently
330 * do not perform this optimization.
332 movq TD_LWP
(%rax
),%rcx
333 movq LWP_VMSPACE
(%rcx
),%rcx
/* RCX = vmspace */
335 #if CPUMASK_ELEMENTS != 4
336 #error "assembly incompatible with cpumask_t"
338 movq PCPU
(cpumask
)+0,%rsi
/* new contents */
339 MPLOCKED orq
%rsi
, VM_PMAP+PM_ACTIVE+
0(%rcx
)
340 movq PCPU
(cpumask
)+8,%rsi
341 MPLOCKED orq
%rsi
, VM_PMAP+PM_ACTIVE+
8(%rcx
)
342 movq PCPU
(cpumask
)+16,%rsi
343 MPLOCKED orq
%rsi
, VM_PMAP+PM_ACTIVE+
16(%rcx
)
344 movq PCPU
(cpumask
)+24,%rsi
345 MPLOCKED orq
%rsi
, VM_PMAP+PM_ACTIVE+
24(%rcx
)
347 movl VM_PMAP+PM_ACTIVE_LOCK
(%rcx
),%esi
348 testl $CPULOCK_EXCL
,%esi
351 movq
%rax
,%r12 /* save newthread ptr */
352 movq
%rcx
,%rdi
/* (found to be set) */
353 call pmap_interlock_wait
/* pmap_interlock_wait(%rdi:vm) */
357 * Need unconditional load cr3
359 movq TD_PCB
(%rax
),%rdx
/* RDX = PCB */
360 movq PCB_CR3
(%rdx
),%rcx
/* RCX = desired CR3 */
361 jmp
2f
/* unconditional reload */
364 * Restore the MMU address space. If it is the same as the last
365 * thread we don't have to invalidate the tlb (i.e. reload cr3).
366 * YYY which naturally also means that the PM_ACTIVE bit had better
367 * already have been set before we set it above, check? YYY
369 movq TD_PCB
(%rax
),%rdx
/* RDX = PCB */
370 movq
%cr3
,%rsi
/* RSI = current CR3 */
371 movq PCB_CR3
(%rdx
),%rcx
/* RCX = desired CR3 */
375 #if defined(SWTCH_OPTIM_STATS)
376 decl _swtch_optim_stats
377 incl _tlb_flush_count
383 * NOTE: %rbx is the previous thread and %rax is the new thread.
384 * %rbx is retained throughout so we can return it.
386 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.
390 * Deal with the PCB extension, restore the private tss
392 movq PCB_EXT
(%rdx
),%rdi
/* check for a PCB extension */
393 movq $
1,%rcx
/* maybe mark use of a private tss */
400 * Going back to the common_tss. We may need to update TSS_RSP0
401 * which sets the top of the supervisor stack when entering from
402 * usermode. The PCB is at the top of the stack but we need another
403 * 16 bytes to take vm86 into account.
406 /*leaq -TF_SIZE(%rdx),%rcx*/
407 movq
%rcx
, PCPU
(common_tss
) + TSS_RSP0
410 cmpl $
0,PCPU
(private_tss
) /* don't have to reload if */
411 je
3f
/* already using the common TSS */
414 subq
%rcx
,%rcx
/* unmark use of private tss */
417 * Get the address of the common TSS descriptor for the ltr.
418 * There is no way to get the address of a segment-accessed variable
419 * so we store a self-referential pointer at the base of the per-cpu
420 * data area and add the appropriate offset.
423 movq $gd_common_tssd
, %rdi
424 /* JG name for "%gs:0"? */
428 * Move the correct TSS descriptor into the GDT slot, then reload
433 movl
%rcx
,PCPU
(private_tss
) /* mark/unmark private tss */
434 movq PCPU
(tss_gdt
), %rbx
/* entry in GDT */
437 movl $GPROC0_SEL
*8, %esi
/* GSEL(entry, SEL_KPL) */
443 * Restore the user %gs and %fs
445 movq PCB_FSBASE
(%rdx
),%r9
446 cmpq PCPU
(user_fs
),%r9
449 movq
%r9,PCPU
(user_fs
)
450 movl $MSR_FSBASE
,%ecx
451 movl PCB_FSBASE
(%r10),%eax
452 movl PCB_FSBASE+
4(%r10),%edx
456 movq PCB_GSBASE
(%rdx
),%r9
457 cmpq PCPU
(user_gs
),%r9
460 movq
%r9,PCPU
(user_gs
)
461 movl $MSR_KGSBASE
,%ecx
/* later swapgs moves it to GSBASE */
462 movl PCB_GSBASE
(%r10),%eax
463 movl PCB_GSBASE+
4(%r10),%edx
469 * Restore general registers. %rbx is restored later.
471 movq PCB_RSP
(%rdx
), %rsp
472 movq PCB_RBP
(%rdx
), %rbp
473 movq PCB_R12
(%rdx
), %r12
474 movq PCB_R13
(%rdx
), %r13
475 movq PCB_R14
(%rdx
), %r14
476 movq PCB_R15
(%rdx
), %r15
477 movq PCB_RIP
(%rdx
), %rax
484 * Restore the user LDT if we have one
486 cmpl $
0, PCB_USERLDT
(%edx
)
488 movl _default_ldt
,%eax
489 cmpl PCPU
(currentldt
),%eax
492 movl
%eax
,PCPU
(currentldt
)
501 * Restore the user TLS if we have one
509 * Restore the DEBUG register state if necessary.
511 movq PCB_FLAGS
(%rdx
),%rax
512 andq $PCB_DBREGS
,%rax
513 jz
1f
/* no, skip over */
514 movq PCB_DR6
(%rdx
),%rax
/* yes, do the restore */
516 movq PCB_DR3
(%rdx
),%rax
518 movq PCB_DR2
(%rdx
),%rax
520 movq PCB_DR1
(%rdx
),%rax
522 movq PCB_DR0
(%rdx
),%rax
524 movq
%dr7
,%rax
/* load dr7 so as not to disturb */
525 /* JG correct value? */
526 andq $
0x0000fc00,%rax
/* reserved bits */
527 /* JG we've got more registers on x86_64 */
528 movq PCB_DR7
(%rdx
),%rcx
529 /* JG correct value? */
530 andq $~
0x0000fc00,%rcx
535 * Clear the QUICKRET flag when restoring a user process context
536 * so we don't try to do a quick syscall return.
539 andl $~RQF_QUICKRET
,PCPU
(reqflags
)
541 movq PCB_RBX
(%rdx
),%rbx
545 * savectx(struct pcb *pcb)
547 * Update pcb, saving current processor state.
551 /* JG use %rdi instead of %rcx everywhere? */
554 /* caller's return address - child won't execute this routine */
556 movq
%rax
,PCB_RIP
(%rcx
)
559 movq
%rax
,PCB_CR3
(%rcx
)
561 movq
%rbx
,PCB_RBX
(%rcx
)
562 movq
%rsp
,PCB_RSP
(%rcx
)
563 movq
%rbp
,PCB_RBP
(%rcx
)
564 movq
%r12,PCB_R12
(%rcx
)
565 movq
%r13,PCB_R13
(%rcx
)
566 movq
%r14,PCB_R14
(%rcx
)
567 movq
%r15,PCB_R15
(%rcx
)
571 * If npxthread == NULL, then the npx h/w state is irrelevant and the
572 * state had better already be in the pcb. This is true for forks
573 * but not for dumps (the old book-keeping with FP flags in the pcb
574 * always lost for dumps because the dump pcb has 0 flags).
576 * If npxthread != NULL, then we have to save the npx h/w state to
577 * npxthread's pcb and copy it to the requested pcb, or save to the
578 * requested pcb and reload. Copying is easier because we would
579 * have to handle h/w bugs for reloading. We used to lose the
580 * parent's npx state for forks by forgetting to reload.
582 movq PCPU
(npxthread
),%rax
586 pushq
%rcx
/* target pcb */
587 movq TD_SAVEFPU
(%rax
),%rax
/* originating savefpu area */
596 movq $PCB_SAVEFPU_SIZE
,%rdx
597 leaq PCB_SAVEFPU
(%rcx
),%rcx
607 * cpu_idle_restore() (current thread in %rax on entry) (one-time execution)
609 * Don't bother setting up any regs other than %rbp so backtraces
610 * don't die. This restore function is used to bootstrap into the
611 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
614 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
615 * This only occurs during system boot so no special handling is
616 * required for migration.
618 * If we are an AP we have to call ap_init() before jumping to
619 * cpu_idle(). ap_init() will synchronize with the BP and finish
620 * setting up various ncpu-dependant globaldata fields. This may
621 * happen on UP as well as SMP if we happen to be simulating multiple
624 ENTRY
(cpu_idle_restore
)
634 andl $~TDF_RUNNING
,TD_FLAGS
(%rbx
)
635 orl $TDF_RUNNING
,TD_FLAGS
(%rax
) /* manual, no switch_return */
638 * ap_init can decide to enable interrupts early, but otherwise, or if
639 * we are UP, do it here.
645 * cpu 0's idle thread entry for the first time must use normal
646 * lwkt_switch_return() semantics or a pending cpu migration on
647 * thread0 will deadlock.
653 call lwkt_switch_return
658 * cpu_kthread_restore() (current thread is %rax on entry, previous is %rbx)
659 * (one-time execution)
661 * Don't bother setting up any regs other then %rbp so backtraces
662 * don't die. This restore function is used to bootstrap into an
663 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
666 * Because this switch target does not 'return' to lwkt_switch()
667 * we have to call lwkt_switch_return(otd) to clean up otd.
670 * Since all of our context is on the stack we are reentrant and
671 * we can release our critical section and enable interrupts early.
673 ENTRY
(cpu_kthread_restore
)
676 movq TD_PCB
(%rax
),%r13
681 * rax and rbx come from the switchout code. Call
682 * lwkt_switch_return(otd).
684 * NOTE: unlike i386, %rsi and %rdi are not call-saved regs.
688 call lwkt_switch_return
690 decl TD_CRITCOUNT
(%rax
)
691 movq PCB_R12
(%r13),%rdi
/* argument to RBX function */
692 movq PCB_RBX
(%r13),%rax
/* thread function */
693 /* note: top of stack return address inherited by function */
697 * cpu_lwkt_switch(struct thread *)
699 * Standard LWKT switching function. Only non-scratch registers are
700 * saved and we don't bother with the MMU state or anything else.
702 * This function is always called while in a critical section.
704 * There is a one-instruction window where curthread is the new
705 * thread but %rsp still points to the old thread's stack, but
706 * we are protected by a critical section so it is ok.
708 ENTRY
(cpu_lwkt_switch
)
709 pushq
%rbp
/* JG note: GDB hacked to locate ebp rel to td_sp */
711 movq PCPU
(curthread
),%rbx
/* becomes old thread in restore */
721 * Save the FP state if we have used the FP. Note that calling
722 * npxsave will NULL out PCPU(npxthread).
724 * We have to deal with the FP state for LWKT threads in case they
725 * happen to get preempted or block while doing an optimized
726 * bzero/bcopy/memcpy.
728 cmpq
%rbx
,PCPU
(npxthread
)
730 movq
%rdi
,%r12 /* save %rdi. %r12 is callee-saved */
731 movq TD_SAVEFPU
(%rbx
),%rdi
732 call npxsave
/* do it in a big C function */
733 movq
%r12,%rdi
/* restore %rdi */
737 movq
%rdi
,%rax
/* switch to this thread */
738 pushq $cpu_lwkt_restore
739 movq
%rsp
,TD_SP
(%rbx
)
741 * %rax contains new thread, %rbx contains old thread.
743 movq
%rax
,PCPU
(curthread
)
744 movq TD_SP
(%rax
),%rsp
748 * cpu_lwkt_restore() (current thread in %rax on entry)
750 * Standard LWKT restore function. This function is always called
751 * while in a critical section.
753 * Warning: due to preemption the restore function can be used to
754 * 'return' to the original thread. Interrupt disablement must be
755 * protected through the switch so we cannot run splz here.
757 * YYY we theoretically do not need to load KPML4phys into cr3, but if
758 * so we need a way to detect when the PTD we are using is being
759 * deleted due to a process exiting.
761 ENTRY
(cpu_lwkt_restore
)
762 movq KPML4phys
,%rcx
/* YYY borrow but beware desched/cpuchg/exit */
771 * Safety, clear RSP0 in the tss so it isn't pointing at the
772 * previous thread's kstack (if a heavy weight user thread).
773 * RSP0 should only be used in ring 3 transitions and kernel
774 * threads run in ring 0 so there should be none.
777 movq
%rdx
, PCPU
(common_tss
) + TSS_RSP0
780 * NOTE: %rbx is the previous thread and %rax is the new thread.
781 * %rbx is retained throughout so we can return it.
783 * lwkt_switch[_return] is responsible for handling TDF_RUNNING.