kernel - pmap->pm_spin now uses a shared spinlock
[dragonfly.git] / sys / sys / spinlock2.h
blobbcad3cd882981ae570c2d8ad06a633e7d84477f7
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
36 #ifndef _KERNEL
38 #error "This file should not be included by userland programs."
40 #else
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
54 extern struct spinlock pmap_spin;
56 int spin_trylock_contested(struct spinlock *spin);
57 void _spin_lock_contested(struct spinlock *spin, const char *ident, int count);
58 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
60 #define spin_lock(spin) _spin_lock(spin, __func__)
61 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
62 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
63 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
66 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
67 * TRUE on success.
69 static __inline boolean_t
70 spin_trylock(struct spinlock *spin)
72 globaldata_t gd = mycpu;
74 crit_enter_raw(gd->gd_curthread);
75 ++gd->gd_spinlocks;
76 cpu_ccfence();
77 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
78 return (spin_trylock_contested(spin));
79 #ifdef DEBUG_LOCKS
80 int i;
81 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
82 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
83 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
84 gd->gd_curthread->td_spinlock_stack[i] = spin;
85 gd->gd_curthread->td_spinlock_caller_pc[i] =
86 __builtin_return_address(0);
87 break;
90 #endif
91 return (TRUE);
95 * Return TRUE if the spinlock is held (we can't tell by whom, though)
97 static __inline int
98 spin_held(struct spinlock *spin)
100 return((spin->counta & ~SPINLOCK_SHARED) != 0);
104 * Obtain an exclusive spinlock and return. It is possible for the
105 * SPINLOCK_SHARED bit to already be set, in which case the contested
106 * code is called to fix it up.
108 static __inline void
109 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
111 int count;
113 crit_enter_raw(gd->gd_curthread);
114 ++gd->gd_spinlocks;
115 cpu_ccfence();
117 count = atomic_fetchadd_int(&spin->counta, 1);
118 if (__predict_false(count != 0)) {
119 _spin_lock_contested(spin, ident, count);
121 #ifdef DEBUG_LOCKS
122 int i;
123 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
124 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
125 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
126 gd->gd_curthread->td_spinlock_stack[i] = spin;
127 gd->gd_curthread->td_spinlock_caller_pc[i] =
128 __builtin_return_address(0);
129 break;
132 #endif
135 static __inline void
136 _spin_lock(struct spinlock *spin, const char *ident)
138 _spin_lock_quick(mycpu, spin, ident);
142 * Release an exclusive spinlock. We can just do this passively, only
143 * ensuring that our spinlock count is left intact until the mutex is
144 * cleared.
146 static __inline void
147 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
149 #ifdef DEBUG_LOCKS
150 int i;
151 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
152 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
153 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
154 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
155 gd->gd_curthread->td_spinlock_stack[i] = NULL;
156 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
157 break;
160 #endif
162 * Don't use a locked instruction here. To reduce latency we avoid
163 * reading spin->counta prior to writing to it.
165 #ifdef DEBUG_LOCKS
166 KKASSERT(spin->counta != 0);
167 #endif
168 cpu_sfence();
169 atomic_add_int(&spin->counta, -1);
170 cpu_sfence();
171 #ifdef DEBUG_LOCKS
172 KKASSERT(gd->gd_spinlocks > 0);
173 #endif
174 cpu_ccfence();
175 --gd->gd_spinlocks;
176 crit_exit_raw(gd->gd_curthread);
179 static __inline void
180 spin_unlock(struct spinlock *spin)
182 spin_unlock_quick(mycpu, spin);
186 * Shared spinlock. Acquire a count, if SPINLOCK_SHARED is not already
187 * set then try a trivial conversion and drop into the contested code if
188 * the trivial cocnversion fails. The SHARED bit is 'cached' when lock
189 * counts go to 0 so the critical path is typically just the fetchadd.
191 * WARNING! Due to the way exclusive conflict resolution works, we cannot
192 * just unconditionally set the SHARED bit on previous-count == 0.
193 * Doing so will interfere with the exclusive contended code.
195 static __inline void
196 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
197 const char *ident)
199 int counta;
201 crit_enter_raw(gd->gd_curthread);
202 ++gd->gd_spinlocks;
203 cpu_ccfence();
205 counta = atomic_fetchadd_int(&spin->counta, 1);
206 if (__predict_false((counta & SPINLOCK_SHARED) == 0)) {
207 if (counta != 0 ||
208 !atomic_cmpset_int(&spin->counta, 1, SPINLOCK_SHARED | 1)) {
209 _spin_lock_shared_contested(spin, ident);
212 #ifdef DEBUG_LOCKS
213 int i;
214 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
215 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
216 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
217 gd->gd_curthread->td_spinlock_stack[i] = spin;
218 gd->gd_curthread->td_spinlock_caller_pc[i] =
219 __builtin_return_address(0);
220 break;
223 #endif
227 * Unlock a shared lock. For convenience we allow the last transition
228 * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
229 * with a count to 0 which will optimize the next shared lock obtained.
231 * WARNING! In order to implement shared and exclusive spinlocks, an
232 * exclusive request will convert a multiply-held shared lock
233 * to exclusive and wait for shared holders to unlock. So keep
234 * in mind that as of now the spinlock could actually be in an
235 * exclusive state.
237 static __inline void
238 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
240 #ifdef DEBUG_LOCKS
241 int i;
242 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
243 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
244 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
245 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
246 gd->gd_curthread->td_spinlock_stack[i] = NULL;
247 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
248 break;
251 #endif
252 #ifdef DEBUG_LOCKS
253 KKASSERT(spin->counta != 0);
254 #endif
255 cpu_sfence();
256 atomic_add_int(&spin->counta, -1);
258 #ifdef DEBUG_LOCKS
259 KKASSERT(gd->gd_spinlocks > 0);
260 #endif
261 cpu_ccfence();
262 --gd->gd_spinlocks;
263 crit_exit_raw(gd->gd_curthread);
266 static __inline void
267 _spin_lock_shared(struct spinlock *spin, const char *ident)
269 _spin_lock_shared_quick(mycpu, spin, ident);
272 static __inline void
273 spin_unlock_shared(struct spinlock *spin)
275 spin_unlock_shared_quick(mycpu, spin);
279 * Attempt to upgrade a shared spinlock to exclusive. Return non-zero
280 * on success, 0 on failure.
282 static __inline int
283 spin_lock_upgrade_try(struct spinlock *spin)
285 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|1, 1))
286 return 1;
287 else
288 return 0;
291 static __inline void
292 spin_init(struct spinlock *spin, const char *descr __unused)
294 spin->counta = 0;
295 spin->countb = 0;
296 #if 0
297 spin->descr = descr;
298 #endif
301 static __inline void
302 spin_uninit(struct spinlock *spin)
304 /* unused */
307 #endif /* _KERNEL */
308 #endif /* _SYS_SPINLOCK2_H_ */