- Randomize spinlock exponential backoff value, which reduces the chance of
[dfdiff.git] / sys / kern / kern_spinlock.c
blob43416af7df61e6af2221477e726473dfb852cc40
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu. and Matthew Dillon
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $DragonFly: src/sys/kern/kern_spinlock.c,v 1.12 2008/05/04 04:48:47 sephe Exp $
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #ifdef INVARIANTS
41 #include <sys/proc.h>
42 #endif
43 #include <ddb/ddb.h>
44 #include <machine/atomic.h>
45 #include <machine/cpufunc.h>
46 #include <machine/specialreg.h>
47 #include <machine/clock.h>
48 #include <sys/spinlock.h>
49 #include <sys/spinlock2.h>
50 #include <sys/ktr.h>
52 #define BACKOFF_INITIAL 1
53 #define BACKOFF_LIMIT 256
55 #ifdef SMP
58 * Kernal Trace
60 #if !defined(KTR_SPIN_CONTENTION)
61 #define KTR_SPIN_CONTENTION KTR_ALL
62 #endif
63 #define SPIN_STRING "spin=%p type=%c"
64 #define SPIN_ARG_SIZE (sizeof(void *) + sizeof(int))
66 KTR_INFO_MASTER(spin);
67 KTR_INFO(KTR_SPIN_CONTENTION, spin, beg, 0, SPIN_STRING, SPIN_ARG_SIZE);
68 KTR_INFO(KTR_SPIN_CONTENTION, spin, end, 1, SPIN_STRING, SPIN_ARG_SIZE);
69 KTR_INFO(KTR_SPIN_CONTENTION, spin, backoff, 2,
70 "spin=%p bo1=%d thr=%p bo=%d",
71 ((2 * sizeof(void *)) + (2 * sizeof(int))));
72 KTR_INFO(KTR_SPIN_CONTENTION, spin, bofail, 3, SPIN_STRING, SPIN_ARG_SIZE);
74 #define logspin(name, mtx, type) \
75 KTR_LOG(spin_ ## name, mtx, type)
77 #define logspin_backoff(mtx, bo1, thr, bo) \
78 KTR_LOG(spin_backoff, mtx, bo1, thr, bo)
80 #ifdef INVARIANTS
81 static int spin_lock_test_mode;
82 #endif
84 static int64_t spinlocks_contested1;
85 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested1, CTLFLAG_RD,
86 &spinlocks_contested1, 0, "");
88 static int64_t spinlocks_contested2;
89 SYSCTL_QUAD(_debug, OID_AUTO, spinlocks_contested2, CTLFLAG_RD,
90 &spinlocks_contested2, 0, "");
92 static int spinlocks_backoff_limit = BACKOFF_LIMIT;
93 SYSCTL_INT(_debug, OID_AUTO, spinlocks_bolim, CTLFLAG_RW,
94 &spinlocks_backoff_limit, 0, "");
96 struct exponential_backoff {
97 int backoff;
98 int nsec;
99 struct spinlock *mtx;
100 sysclock_t base;
102 static int exponential_backoff(struct exponential_backoff *bo);
104 static __inline
105 void
106 exponential_init(struct exponential_backoff *bo, struct spinlock *mtx)
108 bo->backoff = BACKOFF_INITIAL;
109 bo->nsec = 0;
110 bo->mtx = mtx;
114 * We were either contested due to another exclusive lock holder,
115 * or due to the presence of shared locks. We have to undo the mess
116 * we created by returning the shared locks.
118 * If there was another exclusive lock holder only the exclusive bit
119 * in value will be the only bit set. We don't have to do anything since
120 * restoration does not involve any work.
122 * Otherwise we successfully obtained the exclusive bit. Attempt to
123 * clear the shared bits. If we are able to clear the shared bits
124 * we win. Otherwise we lose and we have to restore the shared bits
125 * we couldn't clear (and also clear our exclusive bit).
128 spin_trylock_wr_contested(struct spinlock *mtx, int value)
130 int bit;
132 ++spinlocks_contested1;
133 if ((value & SPINLOCK_EXCLUSIVE) == 0) {
134 while (value) {
135 bit = bsfl(value);
136 if (globaldata_find(bit)->gd_spinlock_rd != mtx) {
137 atomic_swap_int(&mtx->lock, value);
138 return (FALSE);
140 value &= ~(1 << bit);
142 return (TRUE);
144 return (FALSE);
148 * We were either contested due to another exclusive lock holder,
149 * or due to the presence of shared locks
151 * NOTE: If value indicates an exclusively held mutex, no shared bits
152 * would have been set and we can throw away value.
154 void
155 spin_lock_wr_contested(struct spinlock *mtx, int value)
157 struct exponential_backoff backoff;
158 globaldata_t gd = mycpu;
159 int bit;
160 int mask;
163 * Wait until we can gain exclusive access vs another exclusive
164 * holder.
166 exponential_init(&backoff, mtx);
167 ++spinlocks_contested1;
168 logspin(beg, mtx, 'w');
170 while (value & SPINLOCK_EXCLUSIVE) {
171 value = atomic_swap_int(&mtx->lock, SPINLOCK_EXCLUSIVE);
172 if (exponential_backoff(&backoff)) {
173 value &= ~SPINLOCK_EXCLUSIVE;
174 break;
179 * Kill the cached shared bit for our own cpu. This is the most
180 * common case and there's no sense wasting cpu on it. Since
181 * spinlocks aren't recursive, we can't own a shared ref on the
182 * spinlock while trying to get an exclusive one.
184 * If multiple bits are set do not stall on any single cpu. Check
185 * all cpus that have the cache bit set, then loop and check again,
186 * until we've cleaned all the bits.
188 value &= ~gd->gd_cpumask;
190 while ((mask = value) != 0) {
191 while (mask) {
192 bit = bsfl(value);
193 if (globaldata_find(bit)->gd_spinlock_rd != mtx) {
194 value &= ~(1 << bit);
195 } else if (exponential_backoff(&backoff)) {
196 value = 0;
197 break;
199 mask &= ~(1 << bit);
202 logspin(end, mtx, 'w');
206 * The cache bit wasn't set for our cpu. Loop until we can set the bit.
207 * As with the spin_lock_rd() inline we need a memory fence after setting
208 * gd_spinlock_rd to interlock against exclusive spinlocks waiting for
209 * that field to clear.
211 void
212 spin_lock_rd_contested(struct spinlock *mtx)
214 struct exponential_backoff backoff;
215 globaldata_t gd = mycpu;
216 int value = mtx->lock;
219 * Shortcut the op if we can just set the cache bit. This case
220 * occurs when the last lock was an exclusive lock.
222 while ((value & SPINLOCK_EXCLUSIVE) == 0) {
223 if (atomic_cmpset_int(&mtx->lock, value, value|gd->gd_cpumask))
224 return;
225 value = mtx->lock;
228 exponential_init(&backoff, mtx);
229 ++spinlocks_contested1;
231 logspin(beg, mtx, 'r');
233 while ((value & gd->gd_cpumask) == 0) {
234 if (value & SPINLOCK_EXCLUSIVE) {
235 gd->gd_spinlock_rd = NULL;
236 if (exponential_backoff(&backoff)) {
237 gd->gd_spinlock_rd = mtx;
238 break;
240 gd->gd_spinlock_rd = mtx;
241 cpu_mfence();
242 } else {
243 if (atomic_cmpset_int(&mtx->lock, value, value|gd->gd_cpumask))
244 break;
246 value = mtx->lock;
248 logspin(end, mtx, 'r');
252 * Handle exponential backoff and indefinite waits.
254 * If the system is handling a panic we hand the spinlock over to the caller
255 * after 1 second. After 10 seconds we attempt to print a debugger
256 * backtrace. We also run pending interrupts in order to allow a console
257 * break into DDB.
259 static
261 exponential_backoff(struct exponential_backoff *bo)
263 sysclock_t count;
264 int backoff;
266 #ifdef _RDTSC_SUPPORTED_
267 if (cpu_feature & CPUID_TSC) {
268 backoff =
269 ((u_long)rdtsc() ^ (((u_long)curthread) >> 5)) % bo->backoff
270 + BACKOFF_INITIAL;
271 } else
272 #endif
273 backoff = bo->backoff;
274 logspin_backoff(bo->mtx, bo->backoff, curthread, backoff);
277 * Quick backoff
279 for (; backoff; --backoff)
280 cpu_nop();
281 if (bo->backoff < spinlocks_backoff_limit) {
282 bo->backoff <<= 1;
283 return (FALSE);
284 } else {
285 bo->backoff = BACKOFF_INITIAL;
288 logspin(bofail, bo->mtx, 'u');
291 * Indefinite
293 ++spinlocks_contested2;
294 cpu_spinlock_contested();
295 if (bo->nsec == 0) {
296 bo->base = sys_cputimer->count();
297 bo->nsec = 1;
300 count = sys_cputimer->count();
301 if (count - bo->base > sys_cputimer->freq) {
302 kprintf("spin_lock: %p, indefinite wait!\n", bo->mtx);
303 if (panicstr)
304 return (TRUE);
305 #if defined(INVARIANTS) && defined(DDB)
306 if (spin_lock_test_mode) {
307 db_print_backtrace();
308 return (TRUE);
310 #endif
311 ++bo->nsec;
312 #if defined(INVARIANTS) && defined(DDB)
313 if (bo->nsec == 11)
314 db_print_backtrace();
315 #endif
316 if (bo->nsec == 60)
317 panic("spin_lock: %p, indefinite wait!\n", bo->mtx);
318 splz();
319 bo->base = count;
321 return (FALSE);
325 * If INVARIANTS is enabled various spinlock timing tests can be run
326 * by setting debug.spin_lock_test:
328 * 1 Test the indefinite wait code
329 * 2 Time the best-case exclusive lock overhead (spin_test_count)
330 * 3 Time the best-case shared lock overhead (spin_test_count)
333 #ifdef INVARIANTS
335 static int spin_test_count = 10000000;
336 SYSCTL_INT(_debug, OID_AUTO, spin_test_count, CTLFLAG_RW, &spin_test_count, 0, "");
338 static int
339 sysctl_spin_lock_test(SYSCTL_HANDLER_ARGS)
341 struct spinlock mtx;
342 int error;
343 int value = 0;
344 int i;
346 if ((error = suser(curthread)) != 0)
347 return (error);
348 if ((error = SYSCTL_IN(req, &value, sizeof(value))) != 0)
349 return (error);
352 * Indefinite wait test
354 if (value == 1) {
355 spin_init(&mtx);
356 spin_lock_wr(&mtx); /* force an indefinite wait */
357 spin_lock_test_mode = 1;
358 spin_lock_wr(&mtx);
359 spin_unlock_wr(&mtx); /* Clean up the spinlock count */
360 spin_unlock_wr(&mtx);
361 spin_lock_test_mode = 0;
365 * Time best-case exclusive spinlocks
367 if (value == 2) {
368 globaldata_t gd = mycpu;
370 spin_init(&mtx);
371 for (i = spin_test_count; i > 0; --i) {
372 spin_lock_wr_quick(gd, &mtx);
373 spin_unlock_wr_quick(gd, &mtx);
378 * Time best-case shared spinlocks
380 if (value == 3) {
381 globaldata_t gd = mycpu;
383 spin_init(&mtx);
384 for (i = spin_test_count; i > 0; --i) {
385 spin_lock_rd_quick(gd, &mtx);
386 spin_unlock_rd_quick(gd, &mtx);
389 return (0);
392 SYSCTL_PROC(_debug, KERN_PROC_ALL, spin_lock_test, CTLFLAG_RW|CTLTYPE_INT,
393 0, 0, sysctl_spin_lock_test, "I", "Test spinlock wait code");
395 #endif /* INVARIANTS */
396 #endif /* SMP */