more header changes for amd64 port; the pc64 building infrastructure
[dragonfly/port-amd64.git] / sys / platform / pc64 / amd64 / mplock.s
blob5f9e146adca7917fd2b8b51fa47204d6767e274f
1 /*
2 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
3 * $DragonFly: src/sys/platform/pc64/amd64/mplock.s,v 1.1 2007/09/23 04:29:31 yanyh Exp $
4 * $DragonFly: src/sys/platform/pc64/amd64/mplock.s,v 1.1 2007/09/23 04:29:31 yanyh Exp $
6 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * DragonFly MPLOCK operation
40 * Each thread has an MP lock count, td_mpcount, and there is a shared
41 * global called mp_lock. mp_lock is the physical MP lock and contains either
42 * -1 or the cpuid of the cpu owning the lock. The count is *NOT* integrated
43 * into mp_lock but instead resides in each thread td_mpcount.
45 * When obtaining or releasing the MP lock the td_mpcount is PREDISPOSED
46 * to the desired count *PRIOR* to operating on the mp_lock itself. MP
47 * lock operations can occur outside a critical section with interrupts
48 * enabled with the provisio (which the routines below handle) that an
49 * interrupt may come along and preempt us, racing our cmpxchgl instruction
50 * to perform the operation we have requested by pre-disposing td_mpcount.
52 * Additionally, the LWKT threading system manages the MP lock and
53 * lwkt_switch(), in particular, may be called after pre-disposing td_mpcount
54 * to handle 'blocking' on the MP lock.
57 * Recoded from the FreeBSD original:
58 * ----------------------------------------------------------------------------
59 * "THE BEER-WARE LICENSE" (Revision 42):
60 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
61 * can do whatever you want with this stuff. If we meet some day, and you think
62 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
63 * ----------------------------------------------------------------------------
66 #include <machine/asmacros.h>
67 #if 0
68 #include <machine_base/apic/apicreg.h>
69 #endif
71 #include "assym.s"
74 * YYY Debugging only. Define this to be paranoid about invalidating the
75 * TLB when we get giant.
77 #undef PARANOID_INVLTLB
79 .data
80 ALIGN_DATA
81 #ifdef SMP
82 .globl mp_lock
83 mp_lock:
84 .long -1 /* initialized to not held */
85 #endif
87 .text
88 SUPERALIGN_TEXT
91 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
92 * Z=1 (jz) on success. A lock prefix is required for MP.
94 NON_GPROF_ENTRY(cpu_get_initial_mplock)
95 movl PCPU(curthread),%ecx
96 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
97 movl $0,mp_lock /* owned by cpu 0 */
98 NON_GPROF_RET
101 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
102 * only adjusts mp_lock, it does not touch td_mpcount. Callers
103 * should always increment td_mpcount *before* trying to acquire
104 * the actual lock, predisposing td_mpcount to the desired state of
105 * the lock.
107 * NOTE! Only call cpu_try_mplock() inside a critical section. If
108 * you don't an interrupt can come along and get and release
109 * the lock before our cmpxchgl instruction, causing us to fail
110 * but resulting in the lock being held by our cpu.
112 NON_GPROF_ENTRY(cpu_try_mplock)
113 movl PCPU(cpuid),%ecx
114 movl $-1,%eax
115 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
116 jnz 1f
117 #ifdef PARANOID_INVLTLB
118 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
119 #endif
120 movl $1,%eax
121 NON_GPROF_RET
123 subl %eax,%eax
124 NON_GPROF_RET
127 * get_mplock() Obtains the MP lock and may switch away if it cannot
128 * get it. This routine may be called WITHOUT a critical section
129 * and with cpu interrupts enabled.
131 * To handle races in a sane fashion we predispose TD_MPCOUNT,
132 * which prevents us from losing the lock in a race if we already
133 * have it or happen to get it. It also means that we might get
134 * the lock in an interrupt race before we have a chance to execute
135 * our cmpxchgl instruction, so we have to handle that case.
136 * Fortunately simply calling lwkt_switch() handles the situation
137 * for us and also 'blocks' us until the MP lock can be obtained.
139 NON_GPROF_ENTRY(get_mplock)
140 movl PCPU(cpuid),%ecx
141 movl PCPU(curthread),%edx
142 incl TD_MPCOUNT(%edx) /* predispose */
143 cmpl %ecx,mp_lock
144 jne 1f
145 NON_GPROF_RET /* success! */
148 * We don't already own the mp_lock, use cmpxchgl to try to get
149 * it.
152 movl $-1,%eax
153 lock cmpxchgl %ecx,mp_lock
154 jnz 2f
155 NON_GPROF_RET /* success */
158 * Failure, but we could end up owning mp_lock anyway due to
159 * an interrupt race. lwkt_switch() will clean up the mess
160 * and 'block' until the mp_lock is obtained.
162 * Create a stack frame for the call so KTR logs the stack
163 * backtrace properly.
166 pushl %ebp
167 movl %esp,%ebp
168 call lwkt_mp_lock_contested
169 popl %ebp
170 #ifdef INVARIANTS
171 movl PCPU(cpuid),%eax /* failure */
172 cmpl %eax,mp_lock
173 jne 4f
174 #endif
175 NON_GPROF_RET
176 #ifdef INVARIANTS
178 cmpl $0,panicstr /* don't double panic */
179 je badmp_get2
180 NON_GPROF_RET
181 #endif
184 * try_mplock() attempts to obtain the MP lock. 1 is returned on
185 * success, 0 on failure. We do not have to be in a critical section
186 * and interrupts are almost certainly enabled.
188 * We must pre-dispose TD_MPCOUNT in order to deal with races in
189 * a reasonable way.
192 NON_GPROF_ENTRY(try_mplock)
193 movl PCPU(cpuid),%ecx
194 movl PCPU(curthread),%edx
195 incl TD_MPCOUNT(%edx) /* pre-dispose for race */
196 cmpl %ecx,mp_lock
197 je 1f /* trivial success */
198 movl $-1,%eax
199 lock cmpxchgl %ecx,mp_lock
200 jnz 2f
202 * Success
204 #ifdef PARANOID_INVLTLB
205 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
206 #endif
208 movl $1,%eax /* success (cmpxchgl good!) */
209 NON_GPROF_RET
212 * The cmpxchgl failed but we might have raced. Undo the mess by
213 * predispoing TD_MPCOUNT and then checking. If TD_MPCOUNT is
214 * still non-zero we don't care what state the lock is in (since
215 * we obviously didn't own it above), just return failure even if
216 * we won the lock in an interrupt race. If TD_MPCOUNT is zero
217 * make sure we don't own the lock in case we did win it in a race.
220 decl TD_MPCOUNT(%edx)
221 cmpl $0,TD_MPCOUNT(%edx)
222 jne 3f
223 movl PCPU(cpuid),%eax
224 movl $-1,%ecx
225 lock cmpxchgl %ecx,mp_lock
227 subl %eax,%eax
228 NON_GPROF_RET
231 * rel_mplock() releases a previously obtained MP lock.
233 * In order to release the MP lock we pre-dispose TD_MPCOUNT for
234 * the release and basically repeat the release portion of try_mplock
235 * above.
237 NON_GPROF_ENTRY(rel_mplock)
238 movl PCPU(curthread),%edx
239 movl TD_MPCOUNT(%edx),%eax
240 #ifdef INVARIANTS
241 cmpl $0,%eax
242 je badmp_rel
243 #endif
244 subl $1,%eax
245 movl %eax,TD_MPCOUNT(%edx)
246 cmpl $0,%eax
247 jne 3f
248 movl PCPU(cpuid),%eax
249 movl $-1,%ecx
250 lock cmpxchgl %ecx,mp_lock
252 NON_GPROF_RET
254 #ifdef INVARIANTS
256 badmp_get:
257 pushl $bmpsw1
258 call panic
259 badmp_get2:
260 pushl $bmpsw1a
261 call panic
262 badmp_rel:
263 pushl $bmpsw2
264 call panic
266 .data
268 bmpsw1:
269 .asciz "try/get_mplock(): already have lock! %d %p"
271 bmpsw1a:
272 .asciz "try/get_mplock(): failed on count or switch %d %p"
274 bmpsw2:
275 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
277 #endif