kernel - remove mapzone
[dragonfly.git] / sys / sys / spinlock2.h
blobde6e07dad369b4d03d87714a5152236f2e50b88e
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
36 #ifndef _KERNEL
38 #error "This file should not be included by userland programs."
40 #else
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
54 extern struct spinlock pmap_spin;
56 int spin_trylock_contested(struct spinlock *spin);
57 void _spin_lock_contested(struct spinlock *spin, const char *ident, int count);
58 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident,
59 int count);
60 void _spin_pool_lock(void *chan, const char *ident);
61 void _spin_pool_unlock(void *chan);
63 #define spin_lock(spin) _spin_lock(spin, __func__)
64 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
65 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
66 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
67 #define spin_pool_lock(chan) _spin_pool_lock(chan, __func__)
70 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
71 * TRUE on success.
73 static __inline boolean_t
74 spin_trylock(struct spinlock *spin)
76 globaldata_t gd = mycpu;
78 ++gd->gd_curthread->td_critcount;
79 cpu_ccfence();
80 ++gd->gd_spinlocks;
81 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
82 return (spin_trylock_contested(spin));
83 #ifdef DEBUG_LOCKS
84 int i;
85 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
86 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
87 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
88 gd->gd_curthread->td_spinlock_stack[i] = spin;
89 gd->gd_curthread->td_spinlock_caller_pc[i] =
90 __builtin_return_address(0);
91 break;
94 #endif
95 return (TRUE);
99 * Return TRUE if the spinlock is held (we can't tell by whom, though)
101 static __inline int
102 spin_held(struct spinlock *spin)
104 return((spin->counta & ~SPINLOCK_SHARED) != 0);
108 * Obtain an exclusive spinlock and return. It is possible for the
109 * SPINLOCK_SHARED bit to already be set, in which case the contested
110 * code is called to fix it up.
112 static __inline void
113 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
115 int count;
117 ++gd->gd_curthread->td_critcount;
118 cpu_ccfence();
119 ++gd->gd_spinlocks;
120 if ((count = atomic_fetchadd_int(&spin->counta, 1)) != 0)
121 _spin_lock_contested(spin, ident, count + 1);
122 #ifdef DEBUG_LOCKS
123 int i;
124 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
125 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
126 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
127 gd->gd_curthread->td_spinlock_stack[i] = spin;
128 gd->gd_curthread->td_spinlock_caller_pc[i] =
129 __builtin_return_address(0);
130 break;
133 #endif
136 static __inline void
137 _spin_lock(struct spinlock *spin, const char *ident)
139 _spin_lock_quick(mycpu, spin, ident);
143 * Release an exclusive spinlock. We can just do this passively, only
144 * ensuring that our spinlock count is left intact until the mutex is
145 * cleared.
147 static __inline void
148 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
150 #ifdef DEBUG_LOCKS
151 int i;
152 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
153 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
154 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
155 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
156 gd->gd_curthread->td_spinlock_stack[i] = NULL;
157 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
158 break;
161 #endif
163 * Don't use a locked instruction here. To reduce latency we avoid
164 * reading spin->counta prior to writing to it.
166 #ifdef DEBUG_LOCKS
167 KKASSERT(spin->counta != 0);
168 #endif
169 cpu_sfence();
170 atomic_add_int(&spin->counta, -1);
171 cpu_sfence();
172 #ifdef DEBUG_LOCKS
173 KKASSERT(gd->gd_spinlocks > 0);
174 #endif
175 --gd->gd_spinlocks;
176 cpu_ccfence();
177 --gd->gd_curthread->td_critcount;
180 static __inline void
181 spin_unlock(struct spinlock *spin)
183 spin_unlock_quick(mycpu, spin);
187 * Shared spinlock. Acquire a count, if SPINLOCK_SHARED is not already
188 * set then set it. The bit will already be set in the unmixed critical
189 * path.
191 static __inline void
192 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
193 const char *ident)
195 int counta;
197 ++gd->gd_curthread->td_critcount;
198 cpu_ccfence();
199 ++gd->gd_spinlocks;
200 counta = atomic_fetchadd_int(&spin->counta, 1);
201 if (counta == 0) {
202 atomic_set_int(&spin->counta, SPINLOCK_SHARED);
203 } else if ((counta & SPINLOCK_SHARED) == 0) {
204 atomic_add_int(&spin->counta, -1);
205 _spin_lock_shared_contested(spin, ident, counta);
207 #ifdef DEBUG_LOCKS
208 int i;
209 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
210 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
211 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
212 gd->gd_curthread->td_spinlock_stack[i] = spin;
213 gd->gd_curthread->td_spinlock_caller_pc[i] =
214 __builtin_return_address(0);
215 break;
218 #endif
222 * Unlock a shared lock. For convenience we allow the last transition
223 * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
224 * with a count to 0 which will optimize the next shared lock obtained.
226 static __inline void
227 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
229 #ifdef DEBUG_LOCKS
230 int i;
231 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
232 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
233 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
234 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
235 gd->gd_curthread->td_spinlock_stack[i] = NULL;
236 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
237 break;
240 #endif
241 #ifdef DEBUG_LOCKS
242 KKASSERT(spin->counta != 0);
243 #endif
244 cpu_sfence();
245 atomic_add_int(&spin->counta, -1);
247 #ifdef DEBUG_LOCKS
248 KKASSERT(gd->gd_spinlocks > 0);
249 #endif
250 --gd->gd_spinlocks;
251 cpu_ccfence();
252 --gd->gd_curthread->td_critcount;
255 static __inline void
256 _spin_lock_shared(struct spinlock *spin, const char *ident)
258 _spin_lock_shared_quick(mycpu, spin, ident);
261 static __inline void
262 spin_unlock_shared(struct spinlock *spin)
264 spin_unlock_shared_quick(mycpu, spin);
267 static __inline void
268 spin_pool_unlock(void *chan)
270 _spin_pool_unlock(chan);
273 static __inline void
274 spin_init(struct spinlock *spin, const char *descr)
276 spin->counta = 0;
277 spin->countb = 0;
278 spin->descr = descr;
281 static __inline void
282 spin_uninit(struct spinlock *spin)
284 /* unused */
287 #endif /* _KERNEL */
288 #endif /* _SYS_SPINLOCK2_H_ */