- Test m_pkthdr.fw_flags against DUMMYNET_MBUF_TAGGED before trying to locate
[dragonfly/netmp.git] / sys / platform / vkernel / i386 / mplock.s
blob63ded8548fa39612b9dd80612a6e5362fb95f3ee
1 /*
2 * $FreeBSD: src/sys/i386/i386/mplock.s,v 1.29.2.2 2000/05/16 06:58:06 dillon Exp $
3 * $DragonFly: src/sys/platform/vkernel/i386/mplock.s,v 1.2 2007/07/01 02:51:43 dillon Exp $
5 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@backplane.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * DragonFly MPLOCK operation
39 * Each thread has an MP lock count, td_mpcount, and there is a shared
40 * global called mp_lock. mp_lock is the physical MP lock and contains either
41 * -1 or the cpuid of the cpu owning the lock. The count is *NOT* integrated
42 * into mp_lock but instead resides in each thread td_mpcount.
44 * When obtaining or releasing the MP lock the td_mpcount is PREDISPOSED
45 * to the desired count *PRIOR* to operating on the mp_lock itself. MP
46 * lock operations can occur outside a critical section with interrupts
47 * enabled with the provisio (which the routines below handle) that an
48 * interrupt may come along and preempt us, racing our cmpxchgl instruction
49 * to perform the operation we have requested by pre-disposing td_mpcount.
51 * Additionally, the LWKT threading system manages the MP lock and
52 * lwkt_switch(), in particular, may be called after pre-disposing td_mpcount
53 * to handle 'blocking' on the MP lock.
56 * Recoded from the FreeBSD original:
57 * ----------------------------------------------------------------------------
58 * "THE BEER-WARE LICENSE" (Revision 42):
59 * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you
60 * can do whatever you want with this stuff. If we meet some day, and you think
61 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
62 * ----------------------------------------------------------------------------
65 #include <machine/asmacros.h>
66 #if 0
67 #include <machine_base/apic/apicreg.h>
68 #endif
70 #include "assym.s"
73 * YYY Debugging only. Define this to be paranoid about invalidating the
74 * TLB when we get giant.
76 #undef PARANOID_INVLTLB
78 .data
79 ALIGN_DATA
80 #ifdef SMP
81 .globl mp_lock
82 mp_lock:
83 .long -1 /* initialized to not held */
84 #endif
86 .text
87 SUPERALIGN_TEXT
90 * Note on cmpxchgl... exchanges ecx with mem if mem matches eax.
91 * Z=1 (jz) on success. A lock prefix is required for MP.
93 NON_GPROF_ENTRY(cpu_get_initial_mplock)
94 movl PCPU(curthread),%ecx
95 movl $1,TD_MPCOUNT(%ecx) /* curthread has mpcount of 1 */
96 movl $0,mp_lock /* owned by cpu 0 */
97 NON_GPROF_RET
100 * cpu_try_mplock() returns non-zero on success, 0 on failure. It
101 * only adjusts mp_lock, it does not touch td_mpcount. Callers
102 * should always increment td_mpcount *before* trying to acquire
103 * the actual lock, predisposing td_mpcount to the desired state of
104 * the lock.
106 * NOTE! Only call cpu_try_mplock() inside a critical section. If
107 * you don't an interrupt can come along and get and release
108 * the lock before our cmpxchgl instruction, causing us to fail
109 * but resulting in the lock being held by our cpu.
111 NON_GPROF_ENTRY(cpu_try_mplock)
112 movl PCPU(cpuid),%ecx
113 movl $-1,%eax
114 lock cmpxchgl %ecx,mp_lock /* ecx<->mem if eax matches */
115 jnz 1f
116 #ifdef PARANOID_INVLTLB
117 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
118 #endif
119 movl $1,%eax
120 NON_GPROF_RET
122 subl %eax,%eax
123 NON_GPROF_RET
126 * get_mplock() Obtains the MP lock and may switch away if it cannot
127 * get it. This routine may be called WITHOUT a critical section
128 * and with cpu interrupts enabled.
130 * To handle races in a sane fashion we predispose TD_MPCOUNT,
131 * which prevents us from losing the lock in a race if we already
132 * have it or happen to get it. It also means that we might get
133 * the lock in an interrupt race before we have a chance to execute
134 * our cmpxchgl instruction, so we have to handle that case.
135 * Fortunately simply calling lwkt_switch() handles the situation
136 * for us and also 'blocks' us until the MP lock can be obtained.
138 NON_GPROF_ENTRY(get_mplock)
139 movl PCPU(cpuid),%ecx
140 movl PCPU(curthread),%edx
141 incl TD_MPCOUNT(%edx) /* predispose */
142 cmpl %ecx,mp_lock
143 jne 1f
144 NON_GPROF_RET /* success! */
147 * We don't already own the mp_lock, use cmpxchgl to try to get
148 * it.
151 movl $-1,%eax
152 lock cmpxchgl %ecx,mp_lock
153 jnz 2f
154 NON_GPROF_RET /* success */
157 * Failure, but we could end up owning mp_lock anyway due to
158 * an interrupt race. lwkt_switch() will clean up the mess
159 * and 'block' until the mp_lock is obtained.
161 * Create a stack frame for the call so KTR logs the stack
162 * backtrace properly.
165 pushl %ebp
166 movl %esp,%ebp
167 call lwkt_mp_lock_contested
168 popl %ebp
169 #ifdef INVARIANTS
170 movl PCPU(cpuid),%eax /* failure */
171 cmpl %eax,mp_lock
172 jne 4f
173 #endif
174 NON_GPROF_RET
175 #ifdef INVARIANTS
177 cmpl $0,panicstr /* don't double panic */
178 je badmp_get2
179 NON_GPROF_RET
180 #endif
183 * try_mplock() attempts to obtain the MP lock. 1 is returned on
184 * success, 0 on failure. We do not have to be in a critical section
185 * and interrupts are almost certainly enabled.
187 * We must pre-dispose TD_MPCOUNT in order to deal with races in
188 * a reasonable way.
191 NON_GPROF_ENTRY(try_mplock)
192 movl PCPU(cpuid),%ecx
193 movl PCPU(curthread),%edx
194 incl TD_MPCOUNT(%edx) /* pre-dispose for race */
195 cmpl %ecx,mp_lock
196 je 1f /* trivial success */
197 movl $-1,%eax
198 lock cmpxchgl %ecx,mp_lock
199 jnz 2f
201 * Success
203 #ifdef PARANOID_INVLTLB
204 movl %cr3,%eax; movl %eax,%cr3 /* YYY check and remove */
205 #endif
207 movl $1,%eax /* success (cmpxchgl good!) */
208 NON_GPROF_RET
211 * The cmpxchgl failed but we might have raced. Undo the mess by
212 * predispoing TD_MPCOUNT and then checking. If TD_MPCOUNT is
213 * still non-zero we don't care what state the lock is in (since
214 * we obviously didn't own it above), just return failure even if
215 * we won the lock in an interrupt race. If TD_MPCOUNT is zero
216 * make sure we don't own the lock in case we did win it in a race.
219 decl TD_MPCOUNT(%edx)
220 cmpl $0,TD_MPCOUNT(%edx)
221 jne 3f
222 movl PCPU(cpuid),%eax
223 movl $-1,%ecx
224 lock cmpxchgl %ecx,mp_lock
226 subl %eax,%eax
227 NON_GPROF_RET
230 * rel_mplock() releases a previously obtained MP lock.
232 * In order to release the MP lock we pre-dispose TD_MPCOUNT for
233 * the release and basically repeat the release portion of try_mplock
234 * above.
236 NON_GPROF_ENTRY(rel_mplock)
237 movl PCPU(curthread),%edx
238 movl TD_MPCOUNT(%edx),%eax
239 #ifdef INVARIANTS
240 cmpl $0,%eax
241 je badmp_rel
242 #endif
243 subl $1,%eax
244 movl %eax,TD_MPCOUNT(%edx)
245 cmpl $0,%eax
246 jne 3f
247 movl PCPU(cpuid),%eax
248 movl $-1,%ecx
249 lock cmpxchgl %ecx,mp_lock
251 NON_GPROF_RET
253 #ifdef INVARIANTS
255 badmp_get:
256 pushl $bmpsw1
257 call panic
258 badmp_get2:
259 pushl $bmpsw1a
260 call panic
261 badmp_rel:
262 pushl $bmpsw2
263 call panic
265 .data
267 bmpsw1:
268 .asciz "try/get_mplock(): already have lock! %d %p"
270 bmpsw1a:
271 .asciz "try/get_mplock(): failed on count or switch %d %p"
273 bmpsw2:
274 .asciz "rel_mplock(): mpcount already 0 @ %p %p %p %p %p %p %p %p!"
276 #endif