2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
38 #error "This file should not be included by userland programs."
43 #include <sys/systm.h>
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
54 extern struct spinlock pmap_spin
;
56 int spin_trylock_contested(struct spinlock
*spin
);
57 void _spin_lock_contested(struct spinlock
*spin
, const char *ident
, int count
);
58 void _spin_lock_shared_contested(struct spinlock
*spin
, const char *ident
,
60 void _spin_pool_lock(void *chan
, const char *ident
);
61 void _spin_pool_unlock(void *chan
);
63 #define spin_lock(spin) _spin_lock(spin, __func__)
64 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
65 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
66 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
67 #define spin_pool_lock(chan) _spin_pool_lock(chan, __func__)
70 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
73 static __inline boolean_t
74 spin_trylock(struct spinlock
*spin
)
76 globaldata_t gd
= mycpu
;
78 ++gd
->gd_curthread
->td_critcount
;
81 if (atomic_cmpset_int(&spin
->counta
, 0, 1) == 0)
82 return (spin_trylock_contested(spin
));
85 for (i
= 0; i
< SPINLOCK_DEBUG_ARRAY_SIZE
; i
++) {
86 if (gd
->gd_curthread
->td_spinlock_stack_id
[i
] == 0) {
87 gd
->gd_curthread
->td_spinlock_stack_id
[i
] = 1;
88 gd
->gd_curthread
->td_spinlock_stack
[i
] = spin
;
89 gd
->gd_curthread
->td_spinlock_caller_pc
[i
] =
90 __builtin_return_address(0);
99 * Return TRUE if the spinlock is held (we can't tell by whom, though)
102 spin_held(struct spinlock
*spin
)
104 return((spin
->counta
& ~SPINLOCK_SHARED
) != 0);
108 * Obtain an exclusive spinlock and return. It is possible for the
109 * SPINLOCK_SHARED bit to already be set, in which case the contested
110 * code is called to fix it up.
113 _spin_lock_quick(globaldata_t gd
, struct spinlock
*spin
, const char *ident
)
117 ++gd
->gd_curthread
->td_critcount
;
120 if ((count
= atomic_fetchadd_int(&spin
->counta
, 1)) != 0)
121 _spin_lock_contested(spin
, ident
, count
+ 1);
124 for (i
= 0; i
< SPINLOCK_DEBUG_ARRAY_SIZE
; i
++) {
125 if (gd
->gd_curthread
->td_spinlock_stack_id
[i
] == 0) {
126 gd
->gd_curthread
->td_spinlock_stack_id
[i
] = 1;
127 gd
->gd_curthread
->td_spinlock_stack
[i
] = spin
;
128 gd
->gd_curthread
->td_spinlock_caller_pc
[i
] =
129 __builtin_return_address(0);
137 _spin_lock(struct spinlock
*spin
, const char *ident
)
139 _spin_lock_quick(mycpu
, spin
, ident
);
143 * Release an exclusive spinlock. We can just do this passively, only
144 * ensuring that our spinlock count is left intact until the mutex is
148 spin_unlock_quick(globaldata_t gd
, struct spinlock
*spin
)
152 for (i
= 0; i
< SPINLOCK_DEBUG_ARRAY_SIZE
; i
++) {
153 if ((gd
->gd_curthread
->td_spinlock_stack_id
[i
] == 1) &&
154 (gd
->gd_curthread
->td_spinlock_stack
[i
] == spin
)) {
155 gd
->gd_curthread
->td_spinlock_stack_id
[i
] = 0;
156 gd
->gd_curthread
->td_spinlock_stack
[i
] = NULL
;
157 gd
->gd_curthread
->td_spinlock_caller_pc
[i
] = NULL
;
163 * Don't use a locked instruction here. To reduce latency we avoid
164 * reading spin->counta prior to writing to it.
167 KKASSERT(spin
->counta
!= 0);
170 atomic_add_int(&spin
->counta
, -1);
173 KKASSERT(gd
->gd_spinlocks
> 0);
177 --gd
->gd_curthread
->td_critcount
;
181 spin_unlock(struct spinlock
*spin
)
183 spin_unlock_quick(mycpu
, spin
);
187 * Shared spinlock. Acquire a count, if SPINLOCK_SHARED is not already
188 * set then set it. The bit will already be set in the unmixed critical
192 _spin_lock_shared_quick(globaldata_t gd
, struct spinlock
*spin
,
197 ++gd
->gd_curthread
->td_critcount
;
200 counta
= atomic_fetchadd_int(&spin
->counta
, 1);
202 atomic_set_int(&spin
->counta
, SPINLOCK_SHARED
);
203 } else if ((counta
& SPINLOCK_SHARED
) == 0) {
204 atomic_add_int(&spin
->counta
, -1);
205 _spin_lock_shared_contested(spin
, ident
, counta
);
209 for (i
= 0; i
< SPINLOCK_DEBUG_ARRAY_SIZE
; i
++) {
210 if (gd
->gd_curthread
->td_spinlock_stack_id
[i
] == 0) {
211 gd
->gd_curthread
->td_spinlock_stack_id
[i
] = 1;
212 gd
->gd_curthread
->td_spinlock_stack
[i
] = spin
;
213 gd
->gd_curthread
->td_spinlock_caller_pc
[i
] =
214 __builtin_return_address(0);
222 * Unlock a shared lock. For convenience we allow the last transition
223 * to be to (SPINLOCK_SHARED|0), leaving the SPINLOCK_SHARED bit set
224 * with a count to 0 which will optimize the next shared lock obtained.
227 spin_unlock_shared_quick(globaldata_t gd
, struct spinlock
*spin
)
231 for (i
= 0; i
< SPINLOCK_DEBUG_ARRAY_SIZE
; i
++) {
232 if ((gd
->gd_curthread
->td_spinlock_stack_id
[i
] == 1) &&
233 (gd
->gd_curthread
->td_spinlock_stack
[i
] == spin
)) {
234 gd
->gd_curthread
->td_spinlock_stack_id
[i
] = 0;
235 gd
->gd_curthread
->td_spinlock_stack
[i
] = NULL
;
236 gd
->gd_curthread
->td_spinlock_caller_pc
[i
] = NULL
;
242 KKASSERT(spin
->counta
!= 0);
245 atomic_add_int(&spin
->counta
, -1);
248 KKASSERT(gd
->gd_spinlocks
> 0);
252 --gd
->gd_curthread
->td_critcount
;
256 _spin_lock_shared(struct spinlock
*spin
, const char *ident
)
258 _spin_lock_shared_quick(mycpu
, spin
, ident
);
262 spin_unlock_shared(struct spinlock
*spin
)
264 spin_unlock_shared_quick(mycpu
, spin
);
268 spin_pool_unlock(void *chan
)
270 _spin_pool_unlock(chan
);
274 spin_init(struct spinlock
*spin
, const char *descr
)
282 spin_uninit(struct spinlock
*spin
)
288 #endif /* _SYS_SPINLOCK2_H_ */