kernel - Spiff up locks a bit
[dragonfly.git] / sys / kern / kern_spinlock.c
blobc086c68b862c57c733a0aa43310016fc2f0f4081
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu. and Matthew Dillon
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
34 * The implementation is designed to avoid looping when compatible operations
35 * are executed.
37 * To acquire a spinlock we first increment counta. Then we check if counta
38 * meets our requirements. For an exclusive spinlock it must be 1, of a
39 * shared spinlock it must either be 1 or the SHARED_SPINLOCK bit must be set.
41 * Shared spinlock failure case: Decrement the count, loop until we can
42 * transition from 0 to SHARED_SPINLOCK|1, or until we find SHARED_SPINLOCK
43 * is set and increment the count.
45 * Exclusive spinlock failure case: While maintaining the count, clear the
46 * SHARED_SPINLOCK flag unconditionally. Then use an atomic add to transfer
47 * the count from the low bits to the high bits of counta. Then loop until
48 * all low bits are 0. Once the low bits drop to 0 we can transfer the
49 * count back with an atomic_cmpset_int(), atomically, and return.
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/types.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
56 #ifdef INVARIANTS
57 #include <sys/proc.h>
58 #endif
59 #include <sys/priv.h>
60 #include <machine/atomic.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/specialreg.h>
64 #include <machine/clock.h>
65 #include <sys/spinlock.h>
66 #include <sys/spinlock2.h>
67 #include <sys/ktr.h>
69 #ifdef _KERNEL_VIRTUAL
70 #include <pthread.h>
71 #endif
73 struct spinlock pmap_spin = SPINLOCK_INITIALIZER(pmap_spin, "pmap_spin");
75 struct indefinite_info {
76 sysclock_t base;
77 int secs;
78 const char *ident;
82 * Kernal Trace
84 #if !defined(KTR_SPIN_CONTENTION)
85 #define KTR_SPIN_CONTENTION KTR_ALL
86 #endif
87 #define SPIN_STRING "spin=%p type=%c"
88 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int))
90 KTR_INFO_MASTER(spin);
91 #if 0
92 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE);
93 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE);
94 #endif
96 #define logspin(name, spin, type) \
97 KTR_LOG(spin_ ## name, spin, type)
99 #ifdef INVARIANTS
100 static int spin_lock_test_mode;
101 #endif
103 #ifdef DEBUG_LOCKS_LATENCY
105 static long spinlocks_add_latency;
106 SYSCTL_LONG(_debug, OID_AUTO, spinlocks_add_latency, CTLFLAG_RW,
107 &spinlocks_add_latency, 0,
108 "Add spinlock latency");
110 #endif
114 * We need a fairly large pool to avoid contention on large SMP systems,
115 * particularly multi-chip systems.
117 /*#define SPINLOCK_NUM_POOL 8101*/
118 #define SPINLOCK_NUM_POOL 8192
119 #define SPINLOCK_NUM_POOL_MASK (SPINLOCK_NUM_POOL - 1)
121 static __cachealign struct {
122 struct spinlock spin;
123 char filler[32 - sizeof(struct spinlock)];
124 } pool_spinlocks[SPINLOCK_NUM_POOL];
126 static int spin_indefinite_check(struct spinlock *spin,
127 struct indefinite_info *info);
130 * We contested due to another exclusive lock holder. We lose.
132 * We have to unwind the attempt and may acquire the spinlock
133 * anyway while doing so.
136 spin_trylock_contested(struct spinlock *spin)
138 globaldata_t gd = mycpu;
141 * Handle degenerate case, else fail.
143 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1))
144 return TRUE;
145 /*atomic_add_int(&spin->counta, -1);*/
146 --gd->gd_spinlocks;
147 --gd->gd_curthread->td_critcount;
148 return (FALSE);
152 * The spin_lock() inline was unable to acquire the lock and calls this
153 * function with spin->counta already incremented.
155 * atomic_swap_int() is the absolute fastest spinlock instruction, at
156 * least on multi-socket systems. All instructions seem to be about
157 * the same on single-socket multi-core systems. However, atomic_swap_int()
158 * does not result in an even distribution of successful acquisitions.
160 * UNFORTUNATELY we cannot really use atomic_swap_int() when also implementing
161 * shared spin locks, so as we do a better job removing contention we've
162 * moved to atomic_cmpset_int() to be able handle multiple states.
164 * Another problem we have is that (at least on the 48-core opteron we test
165 * with) having all 48 cores contesting the same spin lock reduces
166 * performance to around 600,000 ops/sec, verses millions when fewer cores
167 * are going after the same lock.
169 * Backoff algorithms can create even worse starvation problems, and don't
170 * really improve performance when a lot of cores are contending.
172 * Our solution is to allow the data cache to lazy-update by reading it
173 * non-atomically and only attempting to acquire the lock if the lazy read
174 * looks good. This effectively limits cache bus bandwidth. A cpu_pause()
175 * (for intel/amd anyhow) is not strictly needed as cache bus resource use
176 * is governed by the lazy update.
178 * WARNING!!!! Performance matters here, by a huge margin.
180 * 48-core test with pre-read / -j 48 no-modules kernel compile
181 * with fanned-out inactive and active queues came in at 55 seconds.
183 * 48-core test with pre-read / -j 48 no-modules kernel compile
184 * came in at 75 seconds. Without pre-read it came in at 170 seconds.
186 * 4-core test with pre-read / -j 48 no-modules kernel compile
187 * came in at 83 seconds. Without pre-read it came in at 83 seconds
188 * as well (no difference).
190 void
191 _spin_lock_contested(struct spinlock *spin, const char *ident, int value)
193 struct indefinite_info info = { 0, 0, ident };
194 int i;
197 * Handle degenerate case.
199 if (value == SPINLOCK_SHARED) {
200 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED|0, 1))
201 return;
205 * Transfer our count to the high bits, then loop until we can
206 * acquire the low counter (== 1). No new shared lock can be
207 * acquired while we hold the EXCLWAIT bits.
209 * Force any existing shared locks to exclusive. The shared unlock
210 * understands that this may occur.
212 atomic_add_int(&spin->counta, SPINLOCK_EXCLWAIT - 1);
213 if (value & SPINLOCK_SHARED)
214 atomic_clear_int(&spin->counta, SPINLOCK_SHARED);
216 #ifdef DEBUG_LOCKS_LATENCY
217 long j;
218 for (j = spinlocks_add_latency; j > 0; --j)
219 cpu_ccfence();
220 #endif
221 i = 0;
223 /*logspin(beg, spin, 'w');*/
224 for (;;) {
226 * If the low bits are zero, try to acquire the exclusive lock
227 * by transfering our high bit counter to the low bits.
229 * NOTE: Reading spin->counta prior to the swap is extremely
230 * important on multi-chip/many-core boxes. On 48-core
231 * this one change improves fully concurrent all-cores
232 * compiles by 100% or better.
234 * I can't emphasize enough how important the pre-read
235 * is in preventing hw cache bus armageddon on
236 * multi-chip systems. And on single-chip/multi-core
237 * systems it just doesn't hurt.
239 uint32_t ovalue = spin->counta;
240 cpu_ccfence();
241 if ((ovalue & (SPINLOCK_EXCLWAIT - 1)) == 0 &&
242 atomic_cmpset_int(&spin->counta, ovalue,
243 (ovalue - SPINLOCK_EXCLWAIT) | 1)) {
244 break;
246 if ((++i & 0x7F) == 0x7F) {
247 mycpu->gd_cnt.v_lock_name[0] = 'X';
248 strncpy(mycpu->gd_cnt.v_lock_name + 1,
249 ident,
250 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
251 ++mycpu->gd_cnt.v_lock_colls;
252 if (spin_indefinite_check(spin, &info))
253 break;
255 #ifdef _KERNEL_VIRTUAL
256 pthread_yield();
257 #endif
259 /*logspin(end, spin, 'w');*/
263 * Shared spinlock attempt was contested.
265 * The caller has not modified counta.
267 void
268 _spin_lock_shared_contested(struct spinlock *spin, const char *ident, int value)
270 struct indefinite_info info = { 0, 0, ident };
271 int i;
273 #ifdef DEBUG_LOCKS_LATENCY
274 long j;
275 for (j = spinlocks_add_latency; j > 0; --j)
276 cpu_ccfence();
277 #endif
278 i = 0;
280 /*logspin(beg, spin, 'w');*/
281 for (;;) {
283 * Loop until we can acquire the shared spinlock. Note that
284 * the low bits can be zero while the high EXCLWAIT bits are
285 * non-zero. In this situation exclusive requesters have
286 * priority (otherwise shared users on multiple cpus can hog
287 * the spinlnock).
289 * NOTE: Reading spin->counta prior to the swap is extremely
290 * important on multi-chip/many-core boxes. On 48-core
291 * this one change improves fully concurrent all-cores
292 * compiles by 100% or better.
294 * I can't emphasize enough how important the pre-read
295 * is in preventing hw cache bus armageddon on
296 * multi-chip systems. And on single-chip/multi-core
297 * systems it just doesn't hurt.
299 uint32_t ovalue = spin->counta;
301 cpu_ccfence();
302 if (ovalue == 0) {
303 if (atomic_cmpset_int(&spin->counta, 0,
304 SPINLOCK_SHARED | 1))
305 break;
306 } else if (ovalue & SPINLOCK_SHARED) {
307 if (atomic_cmpset_int(&spin->counta, ovalue,
308 ovalue + 1))
309 break;
311 if ((++i & 0x7F) == 0x7F) {
312 mycpu->gd_cnt.v_lock_name[0] = 'S';
313 strncpy(mycpu->gd_cnt.v_lock_name + 1,
314 ident,
315 sizeof(mycpu->gd_cnt.v_lock_name) - 2);
316 ++mycpu->gd_cnt.v_lock_colls;
317 if (spin_indefinite_check(spin, &info))
318 break;
320 #ifdef _KERNEL_VIRTUAL
321 pthread_yield();
322 #endif
324 /*logspin(end, spin, 'w');*/
328 * Pool functions (SHARED SPINLOCKS NOT SUPPORTED)
330 static __inline int
331 _spin_pool_hash(void *ptr)
333 int i;
335 i = ((int)(uintptr_t) ptr >> 5) ^ ((int)(uintptr_t)ptr >> 12);
336 i &= SPINLOCK_NUM_POOL_MASK;
337 return (i);
340 void
341 _spin_pool_lock(void *chan, const char *ident)
343 struct spinlock *sp;
345 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin;
346 _spin_lock(sp, ident);
349 void
350 _spin_pool_unlock(void *chan)
352 struct spinlock *sp;
354 sp = &pool_spinlocks[_spin_pool_hash(chan)].spin;
355 spin_unlock(sp);
359 static
361 spin_indefinite_check(struct spinlock *spin, struct indefinite_info *info)
363 sysclock_t count;
365 cpu_spinlock_contested();
367 count = sys_cputimer->count();
368 if (info->secs == 0) {
369 info->base = count;
370 ++info->secs;
371 } else if (count - info->base > sys_cputimer->freq) {
372 kprintf("spin_lock: %s(%p), indefinite wait (%d secs)!\n",
373 info->ident, spin, info->secs);
374 info->base = count;
375 ++info->secs;
376 if (panicstr)
377 return (TRUE);
378 #if defined(INVARIANTS)
379 if (spin_lock_test_mode) {
380 print_backtrace(-1);
381 return (TRUE);
383 #endif
384 #if defined(INVARIANTS)
385 if (info->secs == 11)
386 print_backtrace(-1);
387 #endif
388 if (info->secs == 60)
389 panic("spin_lock: %s(%p), indefinite wait!",
390 info->ident, spin);
392 return (FALSE);
396 * If INVARIANTS is enabled various spinlock timing tests can be run
397 * by setting debug.spin_lock_test:
399 * 1 Test the indefinite wait code
400 * 2 Time the best-case exclusive lock overhead (spin_test_count)
401 * 3 Time the best-case shared lock overhead (spin_test_count)
404 #ifdef INVARIANTS
406 static int spin_test_count = 10000000;
407 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0,
408 "Number of iterations to use for spinlock wait code test");
410 static int
411 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
413 struct spinlock spin;
414 int error;
415 int value = 0;
416 int i;
418 if ((error = priv_check(curthread, PRIV_ROOT)) != 0)
419 return (error);
420 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0)
421 return (error);
424 * Indefinite wait test
426 if (value == 1) {
427 spin_init(&spin, "sysctllock");
428 spin_lock(&spin); /* force an indefinite wait */
429 spin_lock_test_mode = 1;
430 spin_lock(&spin);
431 spin_unlock(&spin); /* Clean up the spinlock count */
432 spin_unlock(&spin);
433 spin_lock_test_mode = 0;
437 * Time best-case exclusive spinlocks
439 if (value == 2) {
440 globaldata_t gd = mycpu;
442 spin_init(&spin, "sysctllocktest");
443 for (i = spin_test_count; i > 0; --i) {
444 _spin_lock_quick(gd, &spin, "test");
445 spin_unlock_quick(gd, &spin);
449 return (0);
452 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT,
453 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code");
455 #endif /* INVARIANTS */