2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu. and Matthew Dillon
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * The implementation is designed to avoid looping when compatible operations
37 * To acquire a spinlock we first increment counta. Then we check if counta
38 * meets our requirements. For an exclusive spinlock it must be 1, of a
39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set.
41 * Shared spinlock failure case: Decrement the count, loop until we can
42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK
43 * is set and increment the count.
45 * Exclusive spinlock failure case: While maintaining the count, clear the
46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer
47 * the count from the low bits to the high bits of counta. Then loop until
48 * all low bits are 0. Once the low bits drop to 0 we can transfer the
49 * count back with an atomic_cmpset_int(), atomically, and return.
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/types.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
60 #include <machine/atomic.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #include <machine/clock.h>
65 #include <sys/spinlock.h>
66 #include <sys/spinlock2.h>
69 struct spinlock pmap_spin
= SPINLOCK_INITIALIZER(pmap_spin
);
71 struct indefinite_info
{
79 #if !defined(KTR_SPIN_CONTENTION)
80 #define KTR_SPIN_CONTENTION KTR_ALL
82 #define SPIN_STRING "spin=%p type=%c"
83 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int))
85 KTR_INFO_MASTER(spin
);
87 KTR_INFO(KTR_SPIN_CONTENTION
, spin
, beg
, 0, SPIN_STRING
, SPIN_ARG_SIZE
);
88 KTR_INFO(KTR_SPIN_CONTENTION
, spin
, end
, 1, SPIN_STRING
, SPIN_ARG_SIZE
);
91 #define logspin(name, spin, type) \
92 KTR_LOG(spin_ ## name, spin, type)
95 static int spin_lock_test_mode
;
98 static int64_t spinlocks_contested1
;
99 SYSCTL_QUAD(_debug
, OID_AUTO
, spinlocks_contested1
, CTLFLAG_RD
,
100 &spinlocks_contested1
, 0,
101 "Spinlock contention count due to collisions with exclusive lock holders");
103 static int64_t spinlocks_contested2
;
104 SYSCTL_QUAD(_debug
, OID_AUTO
, spinlocks_contested2
, CTLFLAG_RD
,
105 &spinlocks_contested2
, 0,
106 "Serious spinlock contention count");
108 #ifdef DEBUG_LOCKS_LATENCY
110 static long spinlocks_add_latency
;
111 SYSCTL_LONG(_debug
, OID_AUTO
, spinlocks_add_latency
, CTLFLAG_RW
,
112 &spinlocks_add_latency
, 0,
113 "Add spinlock latency");
119 * We need a fairly large pool to avoid contention on large SMP systems,
120 * particularly multi-chip systems.
122 /*#define SPINLOCK_NUM_POOL 8101*/
123 #define SPINLOCK_NUM_POOL 8192
124 #define SPINLOCK_NUM_POOL_MASK (SPINLOCK_NUM_POOL - 1)
126 static __cachealign
struct {
127 struct spinlock spin
;
128 char filler
[32 - sizeof(struct spinlock
)];
129 } pool_spinlocks
[SPINLOCK_NUM_POOL
];
131 static int spin_indefinite_check(struct spinlock
*spin
,
132 struct indefinite_info
*info
);
135 * We contested due to another exclusive lock holder. We lose.
137 * We have to unwind the attempt and may acquire the spinlock
138 * anyway while doing so. countb was incremented on our behalf.
141 spin_trylock_contested(struct spinlock
*spin
)
143 globaldata_t gd
= mycpu
;
145 /*++spinlocks_contested1;*/
146 /*atomic_add_int(&spin->counta, -1);*/
148 --gd
->gd_curthread
->td_critcount
;
153 * The spin_lock() inline was unable to acquire the lock.
155 * atomic_swap_int() is the absolute fastest spinlock instruction, at
156 * least on multi-socket systems. All instructions seem to be about
157 * the same on single-socket multi-core systems. However, atomic_swap_int()
158 * does not result in an even distribution of successful acquisitions.
160 * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing
161 * shared spin locks, so as we do a better job removing contention we've
162 * moved to atomic_cmpset_int() to be able handle multiple states.
164 * Another problem we have is that (at least on the 48-core opteron we test
165 * with) having all 48 cores contesting the same spin lock reduces
166 * performance to around 600,000 ops/sec, verses millions when fewer cores
167 * are going after the same lock.
169 * Backoff algorithms can create even worse starvation problems, and don't
170 * really improve performance when a lot of cores are contending.
172 * Our solution is to allow the data cache to lazy-update by reading it
173 * non-atomically and only attempting to acquire the lock if the lazy read
174 * looks good. This effectively limits cache bus bandwidth. A cpu_pause()
175 * (for intel/amd anyhow) is not strictly needed as cache bus resource use
176 * is governed by the lazy update.
178 * WARNING!!!! Performance matters here, by a huge margin.
180 * 48-core test with pre-read / -j 48 no-modules kernel compile
181 * with fanned-out inactive and active queues came in at 55 seconds.
183 * 48-core test with pre-read / -j 48 no-modules kernel compile
184 * came in at 75 seconds. Without pre-read it came in at 170 seconds.
186 * 4-core test with pre-read / -j 48 no-modules kernel compile
187 * came in at 83 seconds. Without pre-read it came in at 83 seconds
188 * as well (no difference).
191 spin_lock_contested(struct spinlock
*spin
)
193 struct indefinite_info info
= { 0, 0 };
197 * Force any existing shared locks to exclusive so no new shared
198 * locks can occur. Transfer our count to the high bits, then
199 * loop until we can acquire the low counter (== 1).
201 atomic_clear_int(&spin
->counta
, SPINLOCK_SHARED
);
202 atomic_add_int(&spin
->counta
, SPINLOCK_EXCLWAIT
- 1);
204 #ifdef DEBUG_LOCKS_LATENCY
206 for (j
= spinlocks_add_latency
; j
> 0; --j
)
209 #if defined(INVARIANTS)
210 if (spin_lock_test_mode
> 10 &&
211 spin
->countb
> spin_lock_test_mode
&&
212 (spin_lock_test_mode
& 0xFF) == mycpu
->gd_cpuid
) {
220 /*logspin(beg, spin, 'w');*/
223 * If the low bits are zero, try to acquire the exclusive lock
224 * by transfering our high bit counter to the low bits.
226 * NOTE: Reading spin->counta prior to the swap is extremely
227 * important on multi-chip/many-core boxes. On 48-core
228 * this one change improves fully concurrent all-cores
229 * compiles by 100% or better.
231 * I can't emphasize enough how important the pre-read
232 * is in preventing hw cache bus armageddon on
233 * multi-chip systems. And on single-chip/multi-core
234 * systems it just doesn't hurt.
236 uint32_t ovalue
= spin
->counta
;
238 if ((ovalue
& (SPINLOCK_EXCLWAIT
- 1)) == 0 &&
239 atomic_cmpset_int(&spin
->counta
, ovalue
,
240 (ovalue
- SPINLOCK_EXCLWAIT
) | 1)) {
243 if ((++i
& 0x7F) == 0x7F) {
244 #if defined(INVARIANTS)
247 if (spin_indefinite_check(spin
, &info
))
251 /*logspin(end, spin, 'w');*/
258 spin_lock_shared_contested(struct spinlock
*spin
)
260 struct indefinite_info info
= { 0, 0 };
263 atomic_add_int(&spin
->counta
, -1);
264 #ifdef DEBUG_LOCKS_LATENCY
266 for (j
= spinlocks_add_latency
; j
> 0; --j
)
269 #if defined(INVARIANTS)
270 if (spin_lock_test_mode
> 10 &&
271 spin
->countb
> spin_lock_test_mode
&&
272 (spin_lock_test_mode
& 0xFF) == mycpu
->gd_cpuid
) {
280 /*logspin(beg, spin, 'w');*/
283 * NOTE: Reading spin->counta prior to the swap is extremely
284 * important on multi-chip/many-core boxes. On 48-core
285 * this one change improves fully concurrent all-cores
286 * compiles by 100% or better.
288 * I can't emphasize enough how important the pre-read
289 * is in preventing hw cache bus armageddon on
290 * multi-chip systems. And on single-chip/multi-core
291 * systems it just doesn't hurt.
293 uint32_t ovalue
= spin
->counta
;
297 if (atomic_cmpset_int(&spin
->counta
, 0,
298 SPINLOCK_SHARED
| 1))
300 } else if (ovalue
& SPINLOCK_SHARED
) {
301 if (atomic_cmpset_int(&spin
->counta
, ovalue
,
305 if ((++i
& 0x7F) == 0x7F) {
306 #if defined(INVARIANTS)
309 if (spin_indefinite_check(spin
, &info
))
313 /*logspin(end, spin, 'w');*/
317 * Pool functions (SHARED SPINLOCKS NOT SUPPORTED)
320 _spin_pool_hash(void *ptr
)
324 i
= ((int)(uintptr_t) ptr
>> 5) ^ ((int)(uintptr_t)ptr
>> 12);
325 i
&= SPINLOCK_NUM_POOL_MASK
;
330 _spin_pool_lock(void *chan
)
334 sp
= &pool_spinlocks
[_spin_pool_hash(chan
)].spin
;
339 _spin_pool_unlock(void *chan
)
343 sp
= &pool_spinlocks
[_spin_pool_hash(chan
)].spin
;
350 spin_indefinite_check(struct spinlock
*spin
, struct indefinite_info
*info
)
354 cpu_spinlock_contested();
356 count
= sys_cputimer
->count();
357 if (info
->secs
== 0) {
360 } else if (count
- info
->base
> sys_cputimer
->freq
) {
361 kprintf("spin_lock: %p, indefinite wait (%d secs)!\n",
367 #if defined(INVARIANTS)
368 if (spin_lock_test_mode
) {
373 #if defined(INVARIANTS)
374 if (info
->secs
== 11)
377 if (info
->secs
== 60)
378 panic("spin_lock: %p, indefinite wait!", spin
);
384 * If INVARIANTS is enabled various spinlock timing tests can be run
385 * by setting debug.spin_lock_test:
387 * 1 Test the indefinite wait code
388 * 2 Time the best-case exclusive lock overhead (spin_test_count)
389 * 3 Time the best-case shared lock overhead (spin_test_count)
394 static int spin_test_count
= 10000000;
395 SYSCTL_INT(_debug
, OID_AUTO
, spin_test_count
, CTLFLAG_RW
, &spin_test_count
, 0,
396 "Number of iterations to use for spinlock wait code test");
399 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS
)
401 struct spinlock spin
;
406 if ((error
= priv_check(curthread
, PRIV_ROOT
)) != 0)
408 if ((error
= SYSCTL_IN(req
, &value
, sizeof(value
))) != 0)
412 * Indefinite wait test
416 spin_lock(&spin
); /* force an indefinite wait */
417 spin_lock_test_mode
= 1;
419 spin_unlock(&spin
); /* Clean up the spinlock count */
421 spin_lock_test_mode
= 0;
425 * Time best-case exclusive spinlocks
428 globaldata_t gd
= mycpu
;
431 for (i
= spin_test_count
; i
> 0; --i
) {
432 spin_lock_quick(gd
, &spin
);
433 spin_unlock_quick(gd
, &spin
);
440 SYSCTL_PROC(_debug
, KERN_PROC_ALL
, spin_lock_test
, CTLFLAG_RW
|CTLTYPE_INT
,
441 0, 0, sysctl_spin_lock_test
, "I", "Test spinlock wait code");
443 #endif /* INVARIANTS */