2 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
3 * $DragonFly: src/lib/libcaps/i386/mplock.S,v 1.2 2003/12/07 04:21:54 dillon Exp $
5 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * DragonFly MPLOCK operation
31 * Each thread as an MP lock count, td_mpcount, and there is a shared
32 * global called mp_lock. mp_lock is the physical MP lock and contains either
33 * -1 or the cpuid of the cpu owning the lock. The count is *NOT* integrated
34 * into mp_lock but instead resides in each thread td_mpcount.
36 * When obtaining or releasing the MP lock the td_mpcount is PREDISPOSED
37 * to the desired count *PRIOR* to operating on the mp_lock itself. MP
38 * lock operations can occur outside a critical section with interrupts
39 * enabled with the provisio (which the routines below handle) that an
40 * interrupt may come along and preempt us, racing our cmpxchgl instruction
41 * to perform the operation we have requested by pre-dispoing td_mpcount.
43 * Additionally, the LWKT threading system manages the MP lock and
44 * lwkt_switch(), in particular, may be called after pre-dispoing td_mpcount
45 * to handle 'blocking' on the MP lock.
48 * Recoded from the FreeBSD original:
49 * ----------------------------------------------------------------------------
50 * "THE BEER-WARE LICENSE" (Revision 42):
51 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
52 * can do whatever you want with this stuff. If we meet some day, and you think
53 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
54 * ----------------------------------------------------------------------------
65 .long -1 /* initialized to not held */
72 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
73 * Z=1 (jz) on success. A lock prefix is required for MP.
75 NON_GPROF_ENTRY(cpu_get_initial_mplock)
76 movl PCPU(curthread),%ecx
77 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
78 movl $0,mp_lock /* owned by cpu 0 */
82 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
83 * only adjusts mp_lock, it does not touch td_mpcount. Callers
84 * should always increment td_mpcount *before* trying to acquire
85 * the actual lock, predisposing td_mpcount to the desired state of
88 * NOTE! Only call cpu_try_mplock() inside a critical section. If
89 * you don't an interrupt can come along and get and release
90 * the lock before our cmpxchgl instruction, causing us to fail
91 * but resulting in the lock being held by our cpu.
93 NON_GPROF_ENTRY(cpu_try_mplock)
96 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
98 #ifdef PARANOID_INVLTLB
99 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
108 * get_mplock() Obtains the MP lock and may switch away if it cannot
109 * get it. This routine may be called WITHOUT a critical section
110 * and with cpu interrupts enabled.
112 * To handle races in a sane fashion we predispose TD_MPCOUNT,
113 * which prevents us from losing the lock in a race if we already
114 * have it or happen to get it. It also means that we might get
115 * the lock in an interrupt race before we have a chance to execute
116 * our cmpxchgl instruction, so we have to handle that case.
117 * Fortunately simply calling lwkt_switch() handles the situation
118 * for us and also 'blocks' us until the MP lock can be obtained.
120 NON_GPROF_ENTRY(get_mplock)
121 movl PCPU(cpuid),%ecx
122 movl PCPU(curthread),%edx
123 incl TD_MPCOUNT(%edx) /* predispose */
129 * We don't already own the mp_lock, use cmpxchgl to try to get
134 lock cmpxchgl %ecx,mp_lock
136 #ifdef PARANOID_INVLTLB
137 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
142 * Failure, but we could end up owning mp_lock anyway due to
143 * an interrupt race. lwkt_switch() will clean up the mess
144 * and 'block' until the mp_lock is obtained.
149 movl PCPU(cpuid),%eax /* failure */
156 cmpl $0,panicstr /* don't double panic */
162 * try_mplock() attempts to obtain the MP lock. 1 is returned on
163 * success, 0 on failure. We do not have to be in a critical section
164 * and interrupts are almost certainly enabled.
166 * We must pre-dispose TD_MPCOUNT in order to deal with races in
170 NON_GPROF_ENTRY(try_mplock)
171 movl PCPU(cpuid),%ecx
172 movl PCPU(curthread),%edx
173 incl TD_MPCOUNT(%edx) /* pre-dispose for race */
175 je 1f /* trivial success */
177 lock cmpxchgl %ecx,mp_lock
182 #ifdef PARANOID_INVLTLB
183 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
186 movl $1,%eax /* success (cmpxchgl good!) */
190 * The cmpxchgl failed but we might have raced. Undo the mess by
191 * predispoing TD_MPCOUNT and then checking. If TD_MPCOUNT is
192 * still non-zero we don't care what state the lock is in (since
193 * we obviously didn't own it above), just return failure even if
194 * we won the lock in an interrupt race. If TD_MPCOUNT is zero
195 * make sure we don't own the lock in case we did win it in a race.
198 decl TD_MPCOUNT(%edx)
199 cmpl $0,TD_MPCOUNT(%edx)
201 movl PCPU(cpuid),%eax
203 lock cmpxchgl %ecx,mp_lock
209 * rel_mplock() releases a previously obtained MP lock.
211 * In order to release the MP lock we pre-dispose TD_MPCOUNT for
212 * the release and basically repeat the release portion of try_mplock
215 NON_GPROF_ENTRY(rel_mplock)
216 movl PCPU(curthread),%edx
217 movl TD_MPCOUNT(%edx),%eax
223 movl %eax,TD_MPCOUNT(%edx)
226 movl PCPU(cpuid),%eax
228 lock cmpxchgl %ecx,mp_lock
247 .asciz "try/get_mplock(): already have lock! %d %p"
250 .asciz "try/get_mplock(): failed on count or switch %d %p"
253 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"