2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * - Exclusive priority over shared to prevent SMP starvation.
44 * - locks can be aborted (async callback, if any, will be made w/ENOLCK).
45 * - locks can be asynchronous.
46 * - synchronous fast path if no blocking occurs (async callback is not
49 * Generally speaking any caller-supplied link state must be properly
50 * initialized before use.
52 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/sysctl.h>
60 #include <sys/thread.h>
62 #include <machine/cpufunc.h>
64 #include <sys/thread2.h>
65 #include <sys/mutex2.h>
67 static int64_t mtx_contention_count
;
68 static int64_t mtx_collision_count
;
69 static int64_t mtx_wakeup_count
;
71 SYSCTL_QUAD(_kern
, OID_AUTO
, mtx_contention_count
, CTLFLAG_RW
,
72 &mtx_contention_count
, 0, "");
73 SYSCTL_QUAD(_kern
, OID_AUTO
, mtx_collision_count
, CTLFLAG_RW
,
74 &mtx_collision_count
, 0, "");
75 SYSCTL_QUAD(_kern
, OID_AUTO
, mtx_wakeup_count
, CTLFLAG_RW
,
76 &mtx_wakeup_count
, 0, "");
78 static int mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
);
79 static int mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
, int addcount
);
80 static void mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
);
83 * Exclusive-lock a mutex, block until acquired unless link is async.
84 * Recursion is allowed.
86 * Returns 0 on success, the tsleep() return code on failure, EINPROGRESS
87 * if async. If immediately successful an async exclusive lock will return 0
88 * and not issue the async callback or link the link structure. The caller
89 * must handle this case (typically this is an optimal code path).
91 * A tsleep() error can only be returned if PCATCH is specified in the flags.
94 __mtx_lock_ex(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
103 lock
= mtx
->mtx_lock
;
107 nlock
= MTX_EXCLUSIVE
| 1;
108 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
109 mtx
->mtx_owner
= curthread
;
110 link
->state
= MTX_LINK_ACQUIRED
;
116 if ((lock
& MTX_EXCLUSIVE
) && mtx
->mtx_owner
== curthread
) {
117 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
119 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
120 link
->state
= MTX_LINK_ACQUIRED
;
128 * We need MTX_LINKSPIN to manipulate exlink or
131 * We must set MTX_EXWANTED with MTX_LINKSPIN to indicate
132 * pending shared requests. It cannot be set as a separate
133 * operation prior to acquiring MTX_LINKSPIN.
135 * To avoid unnecessary cpu cache traffic we poll
136 * for collisions. It is also possible that EXWANTED
137 * state failing the above test was spurious, so all the
138 * tests must be repeated if we cannot obtain LINKSPIN
139 * with the prior state tests intact (i.e. don't reload
140 * the (lock) variable here, for heaven's sake!).
142 if (lock
& MTX_LINKSPIN
) {
144 ++mtx_collision_count
;
148 nlock
= lock
| MTX_EXWANTED
| MTX_LINKSPIN
;
150 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
156 * Check for early abort.
158 if (link
->state
== MTX_LINK_ABORTED
) {
159 if (mtx
->mtx_exlink
== NULL
) {
160 atomic_clear_int(&mtx
->mtx_lock
,
164 atomic_clear_int(&mtx
->mtx_lock
,
168 link
->state
= MTX_LINK_IDLE
;
174 * Add our link to the exlink list and release LINKSPIN.
177 link
->state
= MTX_LINK_LINKED_EX
;
178 if (mtx
->mtx_exlink
) {
179 link
->next
= mtx
->mtx_exlink
;
180 link
->prev
= link
->next
->prev
;
181 link
->next
->prev
= link
;
182 link
->prev
->next
= link
;
186 mtx
->mtx_exlink
= link
;
188 isasync
= (link
->callback
!= NULL
);
189 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
193 * If asynchronous lock request return without
194 * blocking, leave link structure linked.
204 error
= mtx_wait_link(mtx
, link
, flags
, to
);
211 _mtx_lock_ex_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
213 return(__mtx_lock_ex(mtx
, link
, flags
, to
));
217 _mtx_lock_ex(mtx_t
*mtx
, int flags
, int to
)
221 mtx_link_init(&link
);
222 return(__mtx_lock_ex(mtx
, &link
, flags
, to
));
226 _mtx_lock_ex_quick(mtx_t
*mtx
)
230 mtx_link_init(&link
);
231 return(__mtx_lock_ex(mtx
, &link
, 0, 0));
235 * Share-lock a mutex, block until acquired. Recursion is allowed.
237 * Returns 0 on success, or the tsleep() return code on failure.
238 * An error can only be returned if PCATCH is specified in the flags.
240 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
241 * do not have to chain the wakeup().
244 __mtx_lock_sh(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
253 lock
= mtx
->mtx_lock
;
258 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
260 link
->state
= MTX_LINK_ACQUIRED
;
265 if ((lock
& (MTX_EXCLUSIVE
| MTX_EXWANTED
)) == 0) {
266 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
268 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
270 link
->state
= MTX_LINK_ACQUIRED
;
277 * We need MTX_LINKSPIN to manipulate exlink or
280 * We must set MTX_SHWANTED with MTX_LINKSPIN to indicate
281 * pending shared requests. It cannot be set as a separate
282 * operation prior to acquiring MTX_LINKSPIN.
284 * To avoid unnecessary cpu cache traffic we poll
285 * for collisions. It is also possible that EXWANTED
286 * state failing the above test was spurious, so all the
287 * tests must be repeated if we cannot obtain LINKSPIN
288 * with the prior state tests intact (i.e. don't reload
289 * the (lock) variable here, for heaven's sake!).
291 if (lock
& MTX_LINKSPIN
) {
293 ++mtx_collision_count
;
297 nlock
= lock
| MTX_SHWANTED
| MTX_LINKSPIN
;
299 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
305 * Check for early abort.
307 if (link
->state
== MTX_LINK_ABORTED
) {
308 if (mtx
->mtx_exlink
== NULL
) {
309 atomic_clear_int(&mtx
->mtx_lock
,
313 atomic_clear_int(&mtx
->mtx_lock
,
317 link
->state
= MTX_LINK_IDLE
;
323 * Add our link to the exlink list and release LINKSPIN.
326 link
->state
= MTX_LINK_LINKED_SH
;
327 if (mtx
->mtx_shlink
) {
328 link
->next
= mtx
->mtx_shlink
;
329 link
->prev
= link
->next
->prev
;
330 link
->next
->prev
= link
;
331 link
->prev
->next
= link
;
335 mtx
->mtx_shlink
= link
;
337 isasync
= (link
->callback
!= NULL
);
338 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
342 * If asynchronous lock request return without
343 * blocking, leave link structure linked.
353 error
= mtx_wait_link(mtx
, link
, flags
, to
);
360 _mtx_lock_sh_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
362 return(__mtx_lock_sh(mtx
, link
, flags
, to
));
366 _mtx_lock_sh(mtx_t
*mtx
, int flags
, int to
)
370 mtx_link_init(&link
);
371 return(__mtx_lock_sh(mtx
, &link
, flags
, to
));
375 _mtx_lock_sh_quick(mtx_t
*mtx
)
379 mtx_link_init(&link
);
380 return(__mtx_lock_sh(mtx
, &link
, 0, 0));
384 * Get an exclusive spinlock the hard way.
387 _mtx_spinlock(mtx_t
*mtx
)
395 lock
= mtx
->mtx_lock
;
397 nlock
= MTX_EXCLUSIVE
| 1;
398 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
399 mtx
->mtx_owner
= curthread
;
402 } else if ((lock
& MTX_EXCLUSIVE
) &&
403 mtx
->mtx_owner
== curthread
) {
404 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
406 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
413 for (bo
= 0; bo
< bb
; ++bo
)
415 ++mtx_contention_count
;
418 ++mtx_collision_count
;
423 * Attempt to acquire a spinlock, if we fail we must undo the
424 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
426 * Returns 0 on success, EAGAIN on failure.
429 _mtx_spinlock_try(mtx_t
*mtx
)
431 globaldata_t gd
= mycpu
;
437 lock
= mtx
->mtx_lock
;
439 nlock
= MTX_EXCLUSIVE
| 1;
440 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
441 mtx
->mtx_owner
= gd
->gd_curthread
;
444 } else if ((lock
& MTX_EXCLUSIVE
) &&
445 mtx
->mtx_owner
== gd
->gd_curthread
) {
446 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
448 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
453 --gd
->gd_curthread
->td_critcount
;
458 ++mtx_collision_count
;
466 _mtx_spinlock_sh(mtx_t
*mtx
)
474 lock
= mtx
->mtx_lock
;
475 if ((lock
& MTX_EXCLUSIVE
) == 0) {
476 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
478 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
485 for (bo
= 0; bo
< bb
; ++bo
)
487 ++mtx_contention_count
;
490 ++mtx_collision_count
;
497 _mtx_lock_ex_try(mtx_t
*mtx
)
504 lock
= mtx
->mtx_lock
;
506 nlock
= MTX_EXCLUSIVE
| 1;
507 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
508 mtx
->mtx_owner
= curthread
;
512 } else if ((lock
& MTX_EXCLUSIVE
) &&
513 mtx
->mtx_owner
== curthread
) {
514 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
516 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
525 ++mtx_collision_count
;
531 _mtx_lock_sh_try(mtx_t
*mtx
)
538 lock
= mtx
->mtx_lock
;
539 if ((lock
& MTX_EXCLUSIVE
) == 0) {
540 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
542 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
549 ++mtx_collision_count
;
555 * If the lock is held exclusively it must be owned by the caller. If the
556 * lock is already a shared lock this operation is a NOP. A panic will
557 * occur if the lock is not held either shared or exclusive.
559 * The exclusive count is converted to a shared count.
562 _mtx_downgrade(mtx_t
*mtx
)
568 lock
= mtx
->mtx_lock
;
572 * NOP if already shared.
574 if ((lock
& MTX_EXCLUSIVE
) == 0) {
575 KKASSERT((lock
& MTX_MASK
) > 0);
580 * Transfer count to shared. Any additional pending shared
581 * waiters must be woken up.
583 if (lock
& MTX_SHWANTED
) {
584 if (mtx_chain_link_sh(mtx
, lock
, 1))
588 nlock
= lock
& ~MTX_EXCLUSIVE
;
589 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
594 ++mtx_collision_count
;
599 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
600 * the shared lock has a count other then 1. Optimize the most likely case
601 * but note that a single cmpset can fail due to WANTED races.
603 * If the lock is held exclusively it must be owned by the caller and
604 * this function will simply return without doing anything. A panic will
605 * occur if the lock is held exclusively by someone other then the caller.
607 * Returns 0 on success, EDEADLK on failure.
610 _mtx_upgrade_try(mtx_t
*mtx
)
617 lock
= mtx
->mtx_lock
;
619 if ((lock
& ~MTX_EXWANTED
) == 1) {
620 nlock
= lock
| MTX_EXCLUSIVE
;
621 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
622 mtx
->mtx_owner
= curthread
;
625 } else if (lock
& MTX_EXCLUSIVE
) {
626 KKASSERT(mtx
->mtx_owner
== curthread
);
633 ++mtx_collision_count
;
639 * Unlock a lock. The caller must hold the lock either shared or exclusive.
641 * On the last release we handle any pending chains.
644 _mtx_unlock(mtx_t
*mtx
)
650 lock
= mtx
->mtx_lock
;
654 case MTX_EXCLUSIVE
| 1:
656 * Last release, exclusive lock.
657 * No exclusive or shared requests pending.
659 mtx
->mtx_owner
= NULL
;
661 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
664 case MTX_EXCLUSIVE
| MTX_EXWANTED
| 1:
665 case MTX_EXCLUSIVE
| MTX_EXWANTED
| MTX_SHWANTED
| 1:
667 * Last release, exclusive lock.
668 * Exclusive requests pending.
669 * Exclusive requests have priority over shared reqs.
671 if (mtx_chain_link_ex(mtx
, lock
))
674 case MTX_EXCLUSIVE
| MTX_SHWANTED
| 1:
676 * Last release, exclusive lock.
678 * Shared requests are pending. Transfer our count (1)
679 * to the first shared request, wakeup all shared reqs.
681 if (mtx_chain_link_sh(mtx
, lock
, 0))
686 * Last release, shared lock.
687 * No exclusive or shared requests pending.
690 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
693 case MTX_EXWANTED
| 1:
694 case MTX_EXWANTED
| MTX_SHWANTED
| 1:
696 * Last release, shared lock.
698 * Exclusive requests are pending. Transfer our
699 * count (1) to the next exclusive request.
701 * Exclusive requests have priority over shared reqs.
703 if (mtx_chain_link_ex(mtx
, lock
))
706 case MTX_SHWANTED
| 1:
708 * Last release, shared lock.
709 * Shared requests pending.
711 if (mtx_chain_link_sh(mtx
, lock
, 0))
716 * We have to loop if this is the last release but
717 * someone is fiddling with LINKSPIN.
719 if ((lock
& MTX_MASK
) == 1) {
720 KKASSERT(lock
& MTX_LINKSPIN
);
725 * Not the last release (shared or exclusive)
728 KKASSERT((nlock
& MTX_MASK
) != MTX_MASK
);
729 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
735 ++mtx_collision_count
;
742 * Chain pending links. Called on the last release of an exclusive or
743 * shared lock when the appropriate WANTED bit is set. mtx_lock old state
744 * is passed in with the count left at 1, which we can inherit, and other
745 * bits which we must adjust in a single atomic operation.
747 * Return non-zero on success, 0 if caller needs to retry.
749 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
750 * acquiring LINKSPIN as all other cases will also need to acquire
751 * LINKSPIN when handling the EXWANTED case.
754 mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
)
756 thread_t td
= curthread
;
760 olock
&= ~MTX_LINKSPIN
;
761 nlock
= olock
| MTX_LINKSPIN
| MTX_EXCLUSIVE
;
763 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
764 link
= mtx
->mtx_exlink
;
765 KKASSERT(link
!= NULL
);
766 if (link
->next
== link
) {
767 mtx
->mtx_exlink
= NULL
;
768 nlock
= MTX_LINKSPIN
| MTX_EXWANTED
; /* to clear */
770 mtx
->mtx_exlink
= link
->next
;
771 link
->next
->prev
= link
->prev
;
772 link
->prev
->next
= link
->next
;
773 nlock
= MTX_LINKSPIN
; /* to clear */
775 KKASSERT(link
->state
== MTX_LINK_LINKED_EX
);
776 mtx
->mtx_owner
= link
->owner
;
780 * WARNING! The callback can only be safely
781 * made with LINKSPIN still held
782 * and in a critical section.
784 * WARNING! The link can go away after the
785 * state is set, or after the
788 if (link
->callback
) {
789 link
->state
= MTX_LINK_CALLEDBACK
;
790 link
->callback(link
, link
->arg
, 0);
792 link
->state
= MTX_LINK_ACQUIRED
;
795 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
806 * Flush waiting shared locks. The lock's prior state is passed in and must
807 * be adjusted atomically only if it matches.
809 * If addcount is 0, the count for the first shared lock in the chain is
810 * assumed to have already been accounted for.
812 * If addcount is 1, the count for the first shared lock in the chain has
813 * not yet been accounted for.
816 mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
, int addcount
)
818 thread_t td
= curthread
;
822 olock
&= ~MTX_LINKSPIN
;
823 nlock
= olock
| MTX_LINKSPIN
;
824 nlock
&= ~MTX_EXCLUSIVE
;
826 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
827 KKASSERT(mtx
->mtx_shlink
!= NULL
);
829 link
= mtx
->mtx_shlink
;
830 atomic_add_int(&mtx
->mtx_lock
, addcount
);
831 KKASSERT(link
->state
== MTX_LINK_LINKED_SH
);
832 if (link
->next
== link
) {
833 mtx
->mtx_shlink
= NULL
;
837 * WARNING! The callback can only be safely
838 * made with LINKSPIN still held
839 * and in a critical section.
841 * WARNING! The link can go away after the
842 * state is set, or after the
845 if (link
->callback
) {
846 link
->state
= MTX_LINK_CALLEDBACK
;
847 link
->callback(link
, link
->arg
, 0);
849 link
->state
= MTX_LINK_ACQUIRED
;
855 mtx
->mtx_shlink
= link
->next
;
856 link
->next
->prev
= link
->prev
;
857 link
->prev
->next
= link
->next
;
859 link
->state
= MTX_LINK_ACQUIRED
;
860 /* link can go away */
865 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
|
876 * Delete a link structure after tsleep has failed. This code is not
877 * in the critical path as most exclusive waits are chained.
881 mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
)
883 thread_t td
= curthread
;
888 * Acquire MTX_LINKSPIN.
890 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
891 * result in too much cpu cache traffic.
895 lock
= mtx
->mtx_lock
;
896 if (lock
& MTX_LINKSPIN
) {
898 ++mtx_collision_count
;
901 nlock
= lock
| MTX_LINKSPIN
;
902 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
905 ++mtx_collision_count
;
909 * Delete the link and release LINKSPIN.
911 nlock
= MTX_LINKSPIN
; /* to clear */
913 switch(link
->state
) {
914 case MTX_LINK_LINKED_EX
:
915 if (link
->next
== link
) {
916 mtx
->mtx_exlink
= NULL
;
917 nlock
|= MTX_EXWANTED
; /* to clear */
919 mtx
->mtx_exlink
= link
->next
;
920 link
->next
->prev
= link
->prev
;
921 link
->prev
->next
= link
->next
;
924 case MTX_LINK_LINKED_SH
:
925 if (link
->next
== link
) {
926 mtx
->mtx_shlink
= NULL
;
927 nlock
|= MTX_SHWANTED
; /* to clear */
929 mtx
->mtx_shlink
= link
->next
;
930 link
->next
->prev
= link
->prev
;
931 link
->prev
->next
= link
->next
;
938 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
943 * Wait for async lock completion or abort. Returns ENOLCK if an abort
947 mtx_wait_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
952 * Sleep. Handle false wakeups, interruptions, etc.
953 * The link may also have been aborted.
956 while (link
->state
& MTX_LINK_LINKED
) {
957 tsleep_interlock(link
, 0);
959 if (link
->state
& MTX_LINK_LINKED
) {
960 ++mtx_contention_count
;
961 if (link
->state
& MTX_LINK_LINKED_SH
)
962 mycpu
->gd_cnt
.v_lock_name
[0] = 'S';
964 mycpu
->gd_cnt
.v_lock_name
[0] = 'X';
965 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
967 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
968 ++mycpu
->gd_cnt
.v_lock_colls
;
970 error
= tsleep(link
, flags
| PINTERLOCKED
,
978 * We are done, make sure the link structure is unlinked.
979 * It may still be on the list due to e.g. EINTR or
982 * It is possible for the tsleep to race an ABORT and cause
985 * The tsleep() can be woken up for numerous reasons and error
986 * might be zero in situations where we intend to return an error.
988 * (This is the synchronous case so state cannot be CALLEDBACK)
990 switch(link
->state
) {
991 case MTX_LINK_ACQUIRED
:
992 case MTX_LINK_CALLEDBACK
:
995 case MTX_LINK_ABORTED
:
998 case MTX_LINK_LINKED_EX
:
999 case MTX_LINK_LINKED_SH
:
1000 mtx_delete_link(mtx
, link
);
1004 error
= EWOULDBLOCK
;
1009 * Clear state on status returned.
1011 link
->state
= MTX_LINK_IDLE
;
1017 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
1018 * return ENOLCK. This may be called at any time after the mtx_link
1019 * is initialized or the status from a previous lock has been
1020 * returned. If called prior to the next (non-try) lock attempt, the
1021 * next lock attempt using this link structure will abort instantly.
1023 * Caller must still wait for the operation to complete, either from a
1024 * blocking call that is still in progress or by calling mtx_wait_link().
1026 * If an asynchronous lock request is possibly in-progress, the caller
1027 * should call mtx_wait_link() synchronously. Note that the asynchronous
1028 * lock callback will NOT be called if a successful abort occurred. XXX
1031 mtx_abort_link(mtx_t
*mtx
, mtx_link_t
*link
)
1033 thread_t td
= curthread
;
1038 * Acquire MTX_LINKSPIN
1042 lock
= mtx
->mtx_lock
;
1043 if (lock
& MTX_LINKSPIN
) {
1045 ++mtx_collision_count
;
1048 nlock
= lock
| MTX_LINKSPIN
;
1049 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
1052 ++mtx_collision_count
;
1058 * WARNING! Link structure can disappear once link->state is set.
1060 nlock
= MTX_LINKSPIN
; /* to clear */
1062 switch(link
->state
) {
1065 * Link not started yet
1067 link
->state
= MTX_LINK_ABORTED
;
1069 case MTX_LINK_LINKED_EX
:
1071 * de-link, mark aborted, and potentially wakeup the thread
1072 * or issue the callback.
1074 if (link
->next
== link
) {
1075 if (mtx
->mtx_exlink
== link
) {
1076 mtx
->mtx_exlink
= NULL
;
1077 nlock
|= MTX_EXWANTED
; /* to clear */
1080 if (mtx
->mtx_exlink
== link
)
1081 mtx
->mtx_exlink
= link
->next
;
1082 link
->next
->prev
= link
->prev
;
1083 link
->prev
->next
= link
->next
;
1087 * When aborting the async callback is still made. We must
1088 * not set the link status to ABORTED in the callback case
1089 * since there is nothing else to clear its status if the
1092 if (link
->callback
) {
1093 link
->state
= MTX_LINK_CALLEDBACK
;
1094 link
->callback(link
, link
->arg
, ENOLCK
);
1096 link
->state
= MTX_LINK_ABORTED
;
1101 case MTX_LINK_LINKED_SH
:
1103 * de-link, mark aborted, and potentially wakeup the thread
1104 * or issue the callback.
1106 if (link
->next
== link
) {
1107 if (mtx
->mtx_shlink
== link
) {
1108 mtx
->mtx_shlink
= NULL
;
1109 nlock
|= MTX_SHWANTED
; /* to clear */
1112 if (mtx
->mtx_shlink
== link
)
1113 mtx
->mtx_shlink
= link
->next
;
1114 link
->next
->prev
= link
->prev
;
1115 link
->prev
->next
= link
->next
;
1119 * When aborting the async callback is still made. We must
1120 * not set the link status to ABORTED in the callback case
1121 * since there is nothing else to clear its status if the
1124 if (link
->callback
) {
1125 link
->state
= MTX_LINK_CALLEDBACK
;
1126 link
->callback(link
, link
->arg
, ENOLCK
);
1128 link
->state
= MTX_LINK_ABORTED
;
1133 case MTX_LINK_ACQUIRED
:
1134 case MTX_LINK_CALLEDBACK
:
1136 * Too late, the lock was acquired. Let it complete.
1141 * link already aborted, do nothing.
1145 atomic_clear_int(&mtx
->mtx_lock
, nlock
);