kernel - Rename spinlock counter trick API
[dragonfly.git] / sys / sys / spinlock2.h
blob8e13cccec4f2f54316b69f41f524f63fb230f99c
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
36 #ifndef _KERNEL
37 #error "This file should not be included by userland programs."
38 #endif
40 #ifndef _SYS_SYSTM_H_
41 #include <sys/systm.h>
42 #endif
43 #ifndef _SYS_THREAD2_H_
44 #include <sys/thread2.h>
45 #endif
46 #ifndef _SYS_GLOBALDATA_H_
47 #include <sys/globaldata.h>
48 #endif
49 #include <machine/atomic.h>
50 #include <machine/cpufunc.h>
52 extern struct spinlock pmap_spin;
54 int spin_trylock_contested(struct spinlock *spin);
55 void _spin_lock_contested(struct spinlock *spin, const char *ident, int count);
56 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
58 #define spin_lock(spin) _spin_lock(spin, __func__)
59 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
60 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
61 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
64 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
65 * TRUE on success.
67 static __inline boolean_t
68 spin_trylock(struct spinlock *spin)
70 globaldata_t gd = mycpu;
72 crit_enter_raw(gd->gd_curthread);
73 ++gd->gd_spinlocks;
74 cpu_ccfence();
75 if (atomic_cmpset_int(&spin->lock, 0, 1) == 0)
76 return (spin_trylock_contested(spin));
77 #ifdef DEBUG_LOCKS
78 int i;
79 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
80 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
81 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
82 gd->gd_curthread->td_spinlock_stack[i] = spin;
83 gd->gd_curthread->td_spinlock_caller_pc[i] =
84 __builtin_return_address(0);
85 break;
88 #endif
89 return (TRUE);
93 * Return TRUE if the spinlock is held (we can't tell by whom, though)
95 static __inline int
96 spin_held(struct spinlock *spin)
98 return((spin->lock & ~SPINLOCK_SHARED) != 0);
102 * Obtain an exclusive spinlock and return. It is possible for the
103 * SPINLOCK_SHARED bit to already be set, in which case the contested
104 * code is called to fix it up.
106 static __inline void
107 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
109 int count;
111 crit_enter_raw(gd->gd_curthread);
112 ++gd->gd_spinlocks;
113 cpu_ccfence();
115 count = atomic_fetchadd_int(&spin->lock, 1);
116 if (__predict_false(count != 0)) {
117 _spin_lock_contested(spin, ident, count);
119 #ifdef DEBUG_LOCKS
120 int i;
121 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
122 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
123 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
124 gd->gd_curthread->td_spinlock_stack[i] = spin;
125 gd->gd_curthread->td_spinlock_caller_pc[i] =
126 __builtin_return_address(0);
127 break;
130 #endif
133 static __inline void
134 _spin_lock(struct spinlock *spin, const char *ident)
136 _spin_lock_quick(mycpu, spin, ident);
140 * Release an exclusive spinlock. We can just do this passively, only
141 * ensuring that our spinlock count is left intact until the mutex is
142 * cleared.
144 * NOTE: Actually works for shared OR exclusive spinlocks. spin_unlock_any()
145 * assumes this too.
147 static __inline void
148 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
150 #ifdef DEBUG_LOCKS
151 int i;
152 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
153 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
154 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
155 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
156 gd->gd_curthread->td_spinlock_stack[i] = NULL;
157 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
158 break;
161 #endif
163 * Don't use a locked instruction here. To reduce latency we avoid
164 * reading spin->lock prior to writing to it.
166 #ifdef DEBUG_LOCKS
167 KKASSERT(spin->lock != 0);
168 #endif
169 cpu_sfence();
170 atomic_add_int(&spin->lock, -1);
171 cpu_sfence();
172 #ifdef DEBUG_LOCKS
173 KKASSERT(gd->gd_spinlocks > 0);
174 #endif
175 cpu_ccfence();
176 --gd->gd_spinlocks;
177 crit_exit_quick(gd->gd_curthread);
180 static __inline void
181 spin_unlock(struct spinlock *spin)
183 spin_unlock_quick(mycpu, spin);
186 static __inline void
187 spin_unlock_any(struct spinlock *spin)
189 spin_unlock_quick(mycpu, spin);
193 * Shared spinlock. Acquire a count, if SPINLOCK_SHARED is not already
194 * set then try a trivial conversion and drop into the contested code if
195 * the trivial cocnversion fails. The SHARED bit is 'cached' when lock
196 * counts go to 0 so the critical path is typically just the fetchadd.
198 * WARNING! Due to the way exclusive conflict resolution works, we cannot
199 * just unconditionally set the SHARED bit on previous-count == 0.
200 * Doing so will interfere with the exclusive contended code.
202 static __inline void
203 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
204 const char *ident)
206 int lock;
208 crit_enter_raw(gd->gd_curthread);
209 ++gd->gd_spinlocks;
210 cpu_ccfence();
212 lock = atomic_fetchadd_int(&spin->lock, 1);
213 if (__predict_false((lock & SPINLOCK_SHARED) == 0)) {
214 if (lock != 0 ||
215 !atomic_cmpset_int(&spin->lock, 1, SPINLOCK_SHARED | 1)) {
216 _spin_lock_shared_contested(spin, ident);
219 #ifdef DEBUG_LOCKS
220 int i;
221 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
222 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
223 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
224 gd->gd_curthread->td_spinlock_stack[i] = spin;
225 gd->gd_curthread->td_spinlock_caller_pc[i] =
226 __builtin_return_address(0);
227 break;
230 #endif
234 * Unlock a shared lock. For convenience we allow the last transition
235 * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
236 * with a count to 0 which will optimize the next shared lock obtained.
238 * WARNING! In order to implement shared and exclusive spinlocks, an
239 * exclusive request will convert a multiply-held shared lock
240 * to exclusive and wait for shared holders to unlock. So keep
241 * in mind that as of now the spinlock could actually be in an
242 * exclusive state.
244 static __inline void
245 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
247 #ifdef DEBUG_LOCKS
248 int i;
249 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
250 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
251 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
252 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
253 gd->gd_curthread->td_spinlock_stack[i] = NULL;
254 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
255 break;
258 #endif
259 #ifdef DEBUG_LOCKS
260 KKASSERT(spin->lock != 0);
261 #endif
262 cpu_sfence();
263 atomic_add_int(&spin->lock, -1);
265 #ifdef DEBUG_LOCKS
266 KKASSERT(gd->gd_spinlocks > 0);
267 #endif
268 cpu_ccfence();
269 --gd->gd_spinlocks;
270 crit_exit_quick(gd->gd_curthread);
273 static __inline void
274 _spin_lock_shared(struct spinlock *spin, const char *ident)
276 _spin_lock_shared_quick(mycpu, spin, ident);
279 static __inline void
280 spin_unlock_shared(struct spinlock *spin)
282 spin_unlock_shared_quick(mycpu, spin);
286 * Attempt to upgrade a shared spinlock to exclusive. Return non-zero
287 * on success, 0 on failure.
289 static __inline int
290 spin_lock_upgrade_try(struct spinlock *spin)
292 if (atomic_cmpset_int(&spin->lock, SPINLOCK_SHARED|1, 1))
293 return 1;
294 else
295 return 0;
298 static __inline void
299 spin_init(struct spinlock *spin, const char *descr __unused)
301 spin->lock = 0;
302 spin->update = 0;
303 #if 0
304 spin->descr = descr;
305 #endif
308 static __inline void
309 spin_uninit(struct spinlock *spin)
311 /* unused */
315 * SMP friendly update counter support. Allows protected structures to
316 * be accessed and retried without dirtying the cache line. Retries if
317 * modified, gains shared spin-lock if modification is underway.
319 * The returned value from spin_access_start() must be passed into
320 * spin_access_end().
322 static __inline int
323 spin_access_start(struct spinlock *spin)
325 int v;
327 v = *(volatile int *)&spin->update;
328 cpu_lfence();
329 if (__predict_false(v & 1))
330 spin_lock_shared(spin);
331 return v;
334 static __inline int
335 spin_access_end(struct spinlock *spin, int v)
337 if (__predict_false(v & 1)) {
338 spin_unlock_shared(spin);
339 return 0;
341 cpu_lfence();
342 return(*(volatile int *)&spin->update != v);
345 static __inline void
346 spin_lock_update(struct spinlock *spin)
348 spin_lock(spin);
349 atomic_add_int_nonlocked(&spin->update, 1);
350 cpu_sfence();
351 KKASSERT_UNSPIN((spin->update & 1), spin);
354 static __inline void
355 spin_unlock_update(struct spinlock *spin)
357 cpu_sfence();
358 atomic_add_int_nonlocked(&spin->update, 1);
359 KKASSERT_UNSPIN(((spin->update & 1) == 0), spin);
360 spin_unlock(spin);
364 * API that doesn't integrate the acquisition of the spin-lock
366 static __inline int
367 spin_access_start_only(struct spinlock *spin)
369 int v;
371 v = *(volatile int *)&spin->update;
372 cpu_lfence();
374 return v;
377 static __inline int
378 spin_access_check_inprog(int v)
380 return (v & 1);
383 static __inline int
384 spin_access_end_only(struct spinlock *spin, int v)
386 cpu_lfence();
387 return(*(volatile int *)&spin->update != v);
390 static __inline void
391 spin_lock_update_only(struct spinlock *spin)
393 atomic_add_int_nonlocked(&spin->update, 1);
394 cpu_sfence();
395 KKASSERT(spin->update & 1);
398 static __inline void
399 spin_unlock_update_only(struct spinlock *spin)
401 cpu_sfence();
402 atomic_add_int_nonlocked(&spin->update, 1);
403 KKASSERT((spin->update & 1) == 0);
406 #endif /* _SYS_SPINLOCK2_H_ */