kernel - Force NFSv3 for diskless nfs mount
[dragonfly.git] / sys / kern / kern_mutex.c
blob58259add33822d7808c76509214954f0d5df562b
1 /*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
41 * conditions.
43 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
44 * functions here.
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
53 #include <machine/cpufunc.h>
55 #include <sys/thread2.h>
56 #include <sys/mutex2.h>
58 static __int64_t mtx_contention_count;
59 static __int64_t mtx_collision_count;
60 static __int64_t mtx_wakeup_count;
62 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
63 &mtx_contention_count, 0, "");
64 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
65 &mtx_collision_count, 0, "");
66 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
67 &mtx_wakeup_count, 0, "");
69 static void mtx_chain_link(mtx_t mtx);
70 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
73 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
75 * Returns 0 on success, or the tsleep() return code on failure.
76 * An error can only be returned if PCATCH is specified in the flags.
78 static __inline int
79 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
81 u_int lock;
82 u_int nlock;
83 int error;
85 for (;;) {
86 lock = mtx->mtx_lock;
87 if (lock == 0) {
88 nlock = MTX_EXCLUSIVE | 1;
89 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
90 mtx->mtx_owner = curthread;
91 error = 0;
92 break;
94 } else if ((lock & MTX_EXCLUSIVE) &&
95 mtx->mtx_owner == curthread) {
96 KKASSERT((lock & MTX_MASK) != MTX_MASK);
97 nlock = lock + 1;
98 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
99 error = 0;
100 break;
102 } else {
104 * Clearing MTX_EXLINK in lock causes us to loop until
105 * MTX_EXLINK is available. However, to avoid
106 * unnecessary cpu cache traffic we poll instead.
108 * Setting MTX_EXLINK in nlock causes us to loop until
109 * we can acquire MTX_EXLINK.
111 * Also set MTX_EXWANTED coincident with EXLINK, if
112 * not already set.
114 thread_t td;
116 if (lock & MTX_EXLINK) {
117 cpu_pause();
118 ++mtx_collision_count;
119 continue;
121 td = curthread;
122 /*lock &= ~MTX_EXLINK;*/
123 nlock = lock | MTX_EXWANTED | MTX_EXLINK;
124 ++td->td_critcount;
125 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
127 * Check for early abort
129 if (link->state == MTX_LINK_ABORTED) {
130 atomic_clear_int(&mtx->mtx_lock,
131 MTX_EXLINK);
132 --td->td_critcount;
133 error = ENOLCK;
134 if (mtx->mtx_link == NULL) {
135 atomic_clear_int(&mtx->mtx_lock,
136 MTX_EXWANTED);
138 break;
142 * Success. Link in our structure then
143 * release EXLINK and sleep.
145 link->owner = td;
146 link->state = MTX_LINK_LINKED;
147 if (mtx->mtx_link) {
148 link->next = mtx->mtx_link;
149 link->prev = link->next->prev;
150 link->next->prev = link;
151 link->prev->next = link;
152 } else {
153 link->next = link;
154 link->prev = link;
155 mtx->mtx_link = link;
157 tsleep_interlock(link, 0);
158 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
159 --td->td_critcount;
161 error = tsleep(link, flags, ident, to);
162 ++mtx_contention_count;
165 * Normal unlink, we should own the exclusive
166 * lock now.
168 if (link->state == MTX_LINK_LINKED)
169 mtx_delete_link(mtx, link);
170 if (link->state == MTX_LINK_ACQUIRED) {
171 KKASSERT(mtx->mtx_owner == link->owner);
172 error = 0;
173 break;
177 * Aborted lock (mtx_abort_ex called).
179 if (link->state == MTX_LINK_ABORTED) {
180 error = ENOLCK;
181 break;
185 * tsleep error, else retry.
187 if (error)
188 break;
189 } else {
190 --td->td_critcount;
193 ++mtx_collision_count;
195 return (error);
199 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
200 const char *ident, int flags, int to)
202 return(__mtx_lock_ex(mtx, link, ident, flags, to));
206 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
208 struct mtx_link link;
210 mtx_link_init(&link);
211 return(__mtx_lock_ex(mtx, &link, ident, flags, to));
215 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
217 struct mtx_link link;
219 mtx_link_init(&link);
220 return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
224 * Share-lock a mutex, block until acquired. Recursion is allowed.
226 * Returns 0 on success, or the tsleep() return code on failure.
227 * An error can only be returned if PCATCH is specified in the flags.
229 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
230 * do not have to chain the wakeup().
232 static __inline int
233 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
235 u_int lock;
236 u_int nlock;
237 int error;
239 for (;;) {
240 lock = mtx->mtx_lock;
241 if ((lock & MTX_EXCLUSIVE) == 0) {
242 KKASSERT((lock & MTX_MASK) != MTX_MASK);
243 nlock = lock + 1;
244 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
245 error = 0;
246 break;
248 } else {
249 nlock = lock | MTX_SHWANTED;
250 tsleep_interlock(mtx, 0);
251 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
252 error = tsleep(mtx, flags, ident, to);
253 if (error)
254 break;
255 ++mtx_contention_count;
256 /* retry */
257 } else {
258 tsleep_remove(curthread);
261 ++mtx_collision_count;
263 return (error);
267 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
269 return (__mtx_lock_sh(mtx, ident, flags, to));
273 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
275 return (__mtx_lock_sh(mtx, ident, 0, 0));
278 void
279 _mtx_spinlock_ex(mtx_t mtx)
281 u_int lock;
282 u_int nlock;
283 int bb = 1;
284 int bo;
286 for (;;) {
287 lock = mtx->mtx_lock;
288 if (lock == 0) {
289 nlock = MTX_EXCLUSIVE | 1;
290 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
291 mtx->mtx_owner = curthread;
292 break;
294 } else if ((lock & MTX_EXCLUSIVE) &&
295 mtx->mtx_owner == curthread) {
296 KKASSERT((lock & MTX_MASK) != MTX_MASK);
297 nlock = lock + 1;
298 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
299 break;
300 } else {
301 /* MWAIT here */
302 if (bb < 1000)
303 ++bb;
304 cpu_pause();
305 for (bo = 0; bo < bb; ++bo)
307 ++mtx_contention_count;
309 cpu_pause();
310 ++mtx_collision_count;
314 void
315 _mtx_spinlock_sh(mtx_t mtx)
317 u_int lock;
318 u_int nlock;
319 int bb = 1;
320 int bo;
322 for (;;) {
323 lock = mtx->mtx_lock;
324 if ((lock & MTX_EXCLUSIVE) == 0) {
325 KKASSERT((lock & MTX_MASK) != MTX_MASK);
326 nlock = lock + 1;
327 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
328 break;
329 } else {
330 /* MWAIT here */
331 if (bb < 1000)
332 ++bb;
333 cpu_pause();
334 for (bo = 0; bo < bb; ++bo)
336 ++mtx_contention_count;
338 cpu_pause();
339 ++mtx_collision_count;
344 _mtx_lock_ex_try(mtx_t mtx)
346 u_int lock;
347 u_int nlock;
348 int error = 0;
350 for (;;) {
351 lock = mtx->mtx_lock;
352 if (lock == 0) {
353 nlock = MTX_EXCLUSIVE | 1;
354 if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
355 mtx->mtx_owner = curthread;
356 break;
358 } else if ((lock & MTX_EXCLUSIVE) &&
359 mtx->mtx_owner == curthread) {
360 KKASSERT((lock & MTX_MASK) != MTX_MASK);
361 nlock = lock + 1;
362 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
363 break;
364 } else {
365 error = EAGAIN;
366 break;
368 cpu_pause();
369 ++mtx_collision_count;
371 return (error);
375 _mtx_lock_sh_try(mtx_t mtx)
377 u_int lock;
378 u_int nlock;
379 int error = 0;
381 for (;;) {
382 lock = mtx->mtx_lock;
383 if ((lock & MTX_EXCLUSIVE) == 0) {
384 KKASSERT((lock & MTX_MASK) != MTX_MASK);
385 nlock = lock + 1;
386 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
387 break;
388 } else {
389 error = EAGAIN;
390 break;
392 cpu_pause();
393 ++mtx_collision_count;
395 return (error);
399 * If the lock is held exclusively it must be owned by the caller. If the
400 * lock is already a shared lock this operation is a NOP. A panic will
401 * occur if the lock is not held either shared or exclusive.
403 * The exclusive count is converted to a shared count.
405 void
406 _mtx_downgrade(mtx_t mtx)
408 u_int lock;
409 u_int nlock;
411 for (;;) {
412 lock = mtx->mtx_lock;
413 if ((lock & MTX_EXCLUSIVE) == 0) {
414 KKASSERT((lock & MTX_MASK) > 0);
415 break;
417 KKASSERT(mtx->mtx_owner == curthread);
418 nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
419 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
420 if (lock & MTX_SHWANTED) {
421 wakeup(mtx);
422 ++mtx_wakeup_count;
424 break;
426 cpu_pause();
427 ++mtx_collision_count;
432 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
433 * the shared lock has a count other then 1. Optimize the most likely case
434 * but note that a single cmpset can fail due to WANTED races.
436 * If the lock is held exclusively it must be owned by the caller and
437 * this function will simply return without doing anything. A panic will
438 * occur if the lock is held exclusively by someone other then the caller.
440 * Returns 0 on success, EDEADLK on failure.
443 _mtx_upgrade_try(mtx_t mtx)
445 u_int lock;
446 u_int nlock;
447 int error = 0;
449 for (;;) {
450 lock = mtx->mtx_lock;
452 if ((lock & ~MTX_EXWANTED) == 1) {
453 nlock = lock | MTX_EXCLUSIVE;
454 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
455 mtx->mtx_owner = curthread;
456 break;
458 } else if (lock & MTX_EXCLUSIVE) {
459 KKASSERT(mtx->mtx_owner == curthread);
460 break;
461 } else {
462 error = EDEADLK;
463 break;
465 cpu_pause();
466 ++mtx_collision_count;
468 return (error);
472 * Unlock a lock. The caller must hold the lock either shared or exclusive.
474 * Any release which makes the lock available when others want an exclusive
475 * lock causes us to chain the owner to the next exclusive lock instead of
476 * releasing the lock.
478 void
479 _mtx_unlock(mtx_t mtx)
481 u_int lock;
482 u_int nlock;
484 for (;;) {
485 lock = mtx->mtx_lock;
486 nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
488 if (nlock == 1) {
490 * Last release, shared lock, no exclusive waiters.
492 nlock = lock & MTX_EXLINK;
493 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
494 break;
495 } else if (nlock == (MTX_EXCLUSIVE | 1)) {
497 * Last release, exclusive lock, no exclusive waiters.
498 * Wake up any shared waiters.
500 mtx->mtx_owner = NULL;
501 nlock = lock & MTX_EXLINK;
502 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
503 if (lock & MTX_SHWANTED) {
504 wakeup(mtx);
505 ++mtx_wakeup_count;
507 break;
509 } else if (nlock == (MTX_EXWANTED | 1)) {
511 * Last release, shared lock, with exclusive
512 * waiters.
514 * Wait for EXLINK to clear, then acquire it.
515 * We could use the cmpset for this but polling
516 * is better on the cpu caches.
518 * Acquire an exclusive lock leaving the lockcount
519 * set to 1, and get EXLINK for access to mtx_link.
521 thread_t td;
523 if (lock & MTX_EXLINK) {
524 cpu_pause();
525 ++mtx_collision_count;
526 continue;
528 td = curthread;
529 /*lock &= ~MTX_EXLINK;*/
530 nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
531 nlock |= (lock & MTX_SHWANTED);
532 ++td->td_critcount;
533 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
534 mtx_chain_link(mtx);
535 --td->td_critcount;
536 break;
538 --td->td_critcount;
539 } else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
541 * Last release, exclusive lock, with exclusive
542 * waiters.
544 * leave the exclusive lock intact and the lockcount
545 * set to 1, and get EXLINK for access to mtx_link.
547 thread_t td;
549 if (lock & MTX_EXLINK) {
550 cpu_pause();
551 ++mtx_collision_count;
552 continue;
554 td = curthread;
555 /*lock &= ~MTX_EXLINK;*/
556 nlock |= MTX_EXLINK;
557 nlock |= (lock & MTX_SHWANTED);
558 ++td->td_critcount;
559 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
560 mtx_chain_link(mtx);
561 --td->td_critcount;
562 break;
564 --td->td_critcount;
565 } else {
567 * Not the last release (shared or exclusive)
569 nlock = lock - 1;
570 KKASSERT((nlock & MTX_MASK) != MTX_MASK);
571 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
572 break;
574 cpu_pause();
575 ++mtx_collision_count;
580 * Chain mtx_chain_link. Called with the lock held exclusively with a
581 * single ref count, and also with MTX_EXLINK held.
583 static void
584 mtx_chain_link(mtx_t mtx)
586 mtx_link_t link;
587 u_int lock;
588 u_int nlock;
589 u_int clock; /* bits we own and want to clear */
592 * Chain the exclusive lock to the next link. The caller cleared
593 * SHWANTED so if there is no link we have to wake up any shared
594 * waiters.
596 clock = MTX_EXLINK;
597 if ((link = mtx->mtx_link) != NULL) {
598 KKASSERT(link->state == MTX_LINK_LINKED);
599 if (link->next == link) {
600 mtx->mtx_link = NULL;
601 clock |= MTX_EXWANTED;
602 } else {
603 mtx->mtx_link = link->next;
604 link->next->prev = link->prev;
605 link->prev->next = link->next;
607 link->state = MTX_LINK_ACQUIRED;
608 mtx->mtx_owner = link->owner;
609 } else {
611 * Chain was empty, release the exclusive lock's last count
612 * as well the bits shown.
614 clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
618 * We have to uset cmpset here to deal with MTX_SHWANTED. If
619 * we just clear the bits we can miss a wakeup or, worse,
620 * leave mtx_lock unlocked with MTX_SHWANTED still set.
622 for (;;) {
623 lock = mtx->mtx_lock;
624 nlock = lock & ~clock;
626 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
627 if (link) {
629 * Wakeup new exclusive holder. Leave
630 * SHWANTED intact.
632 wakeup(link);
633 } else if (lock & MTX_SHWANTED) {
635 * Signal any shared waiters (and we also
636 * clear SHWANTED).
638 mtx->mtx_owner = NULL;
639 wakeup(mtx);
640 ++mtx_wakeup_count;
642 break;
644 cpu_pause();
645 ++mtx_collision_count;
650 * Delete a link structure after tsleep has failed. This code is not
651 * in the critical path as most exclusive waits are chained.
653 static
654 void
655 mtx_delete_link(mtx_t mtx, mtx_link_t link)
657 thread_t td = curthread;
658 u_int lock;
659 u_int nlock;
662 * Acquire MTX_EXLINK.
664 * Do not use cmpxchg to wait for EXLINK to clear as this might
665 * result in too much cpu cache traffic.
667 ++td->td_critcount;
668 for (;;) {
669 lock = mtx->mtx_lock;
670 if (lock & MTX_EXLINK) {
671 cpu_pause();
672 ++mtx_collision_count;
673 continue;
675 /* lock &= ~MTX_EXLINK; */
676 nlock = lock | MTX_EXLINK;
677 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
678 break;
679 cpu_pause();
680 ++mtx_collision_count;
684 * Delete the link and release EXLINK.
686 if (link->state == MTX_LINK_LINKED) {
687 if (link->next == link) {
688 mtx->mtx_link = NULL;
689 } else {
690 mtx->mtx_link = link->next;
691 link->next->prev = link->prev;
692 link->prev->next = link->next;
694 link->state = MTX_LINK_IDLE;
696 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
697 --td->td_critcount;
701 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
702 * return ENOLCK. This may be called at any time after the
703 * mtx_link is initialized, including both before and after the call
704 * to mtx_lock_ex_link().
706 void
707 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
709 thread_t td = curthread;
710 u_int lock;
711 u_int nlock;
714 * Acquire MTX_EXLINK
716 ++td->td_critcount;
717 for (;;) {
718 lock = mtx->mtx_lock;
719 if (lock & MTX_EXLINK) {
720 cpu_pause();
721 ++mtx_collision_count;
722 continue;
724 /* lock &= ~MTX_EXLINK; */
725 nlock = lock | MTX_EXLINK;
726 if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
727 break;
728 cpu_pause();
729 ++mtx_collision_count;
733 * Do the abort
735 switch(link->state) {
736 case MTX_LINK_IDLE:
738 * Link not started yet
740 link->state = MTX_LINK_ABORTED;
741 break;
742 case MTX_LINK_LINKED:
744 * de-link, mark aborted, and wakeup the thread.
746 if (link->next == link) {
747 mtx->mtx_link = NULL;
748 } else {
749 mtx->mtx_link = link->next;
750 link->next->prev = link->prev;
751 link->prev->next = link->next;
753 link->state = MTX_LINK_ABORTED;
754 wakeup(link);
755 break;
756 case MTX_LINK_ACQUIRED:
758 * Too late, the lock was acquired. Let it complete.
760 break;
761 default:
763 * link already aborted, do nothing.
765 break;
767 atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
768 --td->td_critcount;