more header changes for amd64 port; the pc64 building infrastructure
[dragonfly/port-amd64.git] / sys / platform / pc64 / amd64 / swtch.s
blobbbbc0ef63070eed9358992cec3daa13b9c413ae6
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1990 The Regents of the University of California.
35 * All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * William Jolitz.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
68 * $FreeBSD: src/sys/i386/i386/swtch.s,v 1.89.2.10 2003/01/23 03:36:24 ps Exp $
69 * $DragonFly: src/sys/platform/pc64/amd64/swtch.s,v 1.1 2007/09/23 04:29:31 yanyh Exp $
70 * $DragonFly: src/sys/platform/pc64/amd64/swtch.s,v 1.1 2007/09/23 04:29:31 yanyh Exp $
73 #include "use_npx.h"
75 #include <sys/rtprio.h>
77 #include <machine/asmacros.h>
78 #include <machine/segments.h>
80 #include <machine/pmap.h>
81 #include <machine/lock.h>
83 #include "assym.s"
85 #if defined(SMP)
86 #define MPLOCKED lock ;
87 #else
88 #define MPLOCKED
89 #endif
91 .data
93 .globl panic
95 #if defined(SWTCH_OPTIM_STATS)
96 .globl swtch_optim_stats, tlb_flush_count
97 swtch_optim_stats: .long 0 /* number of _swtch_optims */
98 tlb_flush_count: .long 0
99 #endif
101 .text
105 * cpu_heavy_switch(next_thread)
107 * Switch from the current thread to a new thread. This entry
108 * is normally called via the thread->td_switch function, and will
109 * only be called when the current thread is a heavy weight process.
111 * Some instructions have been reordered to reduce pipeline stalls.
113 * YYY disable interrupts once giant is removed.
115 ENTRY(cpu_heavy_switch)
118 * cpu_exit_switch()
120 * The switch function is changed to this when a thread is going away
121 * for good. We have to ensure that the MMU state is not cached, and
122 * we don't bother saving the existing thread state before switching.
124 * At this point we are in a critical section and this cpu owns the
125 * thread's token, which serves as an interlock until the switchout is
126 * complete.
128 ENTRY(cpu_exit_switch)
131 * cpu_heavy_restore() (current thread in %eax on entry)
133 * Restore the thread after an LWKT switch. This entry is normally
134 * called via the LWKT switch restore function, which was pulled
135 * off the thread stack and jumped to.
137 * This entry is only called if the thread was previously saved
138 * using cpu_heavy_switch() (the heavy weight process thread switcher),
139 * or when a new process is initially scheduled. The first thing we
140 * do is clear the TDF_RUNNING bit in the old thread and set it in the
141 * new thread.
143 * NOTE: The lwp may be in any state, not necessarily LSRUN, because
144 * a preemption switch may interrupt the process and then return via
145 * cpu_heavy_restore.
147 * YYY theoretically we do not have to restore everything here, a lot
148 * of this junk can wait until we return to usermode. But for now
149 * we restore everything.
151 * YYY the PCB crap is really crap, it makes startup a bitch because
152 * we can't switch away.
154 * YYY note: spl check is done in mi_switch when it splx()'s.
157 ENTRY(cpu_heavy_restore)
160 * savectx(pcb)
162 * Update pcb, saving current processor state.
164 ENTRY(savectx)
167 * cpu_idle_restore() (current thread in %eax on entry) (one-time execution)
169 * Don't bother setting up any regs other then %ebp so backtraces
170 * don't die. This restore function is used to bootstrap into the
171 * cpu_idle() LWKT only, after that cpu_lwkt_*() will be used for
172 * switching.
174 * Clear TDF_RUNNING in old thread only after we've cleaned up %cr3.
176 * If we are an AP we have to call ap_init() before jumping to
177 * cpu_idle(). ap_init() will synchronize with the BP and finish
178 * setting up various ncpu-dependant globaldata fields. This may
179 * happen on UP as well as SMP if we happen to be simulating multiple
180 * cpus.
182 ENTRY(cpu_idle_restore)
185 * cpu_kthread_restore() (current thread is %eax on entry) (one-time execution)
187 * Don't bother setting up any regs other then %ebp so backtraces
188 * don't die. This restore function is used to bootstrap into an
189 * LWKT based kernel thread only. cpu_lwkt_switch() will be used
190 * after this.
192 * Since all of our context is on the stack we are reentrant and
193 * we can release our critical section and enable interrupts early.
195 ENTRY(cpu_kthread_restore)
198 * cpu_lwkt_switch()
200 * Standard LWKT switching function. Only non-scratch registers are
201 * saved and we don't bother with the MMU state or anything else.
203 * This function is always called while in a critical section.
205 * There is a one-instruction window where curthread is the new
206 * thread but %esp still points to the old thread's stack, but
207 * we are protected by a critical section so it is ok.
209 * YYY BGL, SPL
211 ENTRY(cpu_lwkt_switch)
214 * cpu_lwkt_restore() (current thread in %eax on entry)
216 * Standard LWKT restore function. This function is always called
217 * while in a critical section.
219 * Warning: due to preemption the restore function can be used to
220 * 'return' to the original thread. Interrupt disablement must be
221 * protected through the switch so we cannot run splz here.
223 ENTRY(cpu_lwkt_restore)
226 * bootstrap_idle()
228 * Make AP become the idle loop.
230 ENTRY(bootstrap_idle)