2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * - Exclusive priority over shared to prevent SMP starvation.
44 * - locks can be aborted (async callback, if any, will be made w/ENOLCK).
45 * - locks can be asynchronous.
46 * - synchronous fast path if no blocking occurs (async callback is not
49 * Generally speaking any caller-supplied link state must be properly
50 * initialized before use.
52 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/sysctl.h>
60 #include <sys/thread.h>
62 #include <machine/cpufunc.h>
64 #include <sys/thread2.h>
65 #include <sys/mutex2.h>
67 static int mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
);
68 static int mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
, int addcount
);
69 static void mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
);
72 * Exclusive-lock a mutex, block until acquired unless link is async.
73 * Recursion is allowed.
75 * Returns 0 on success, the tsleep() return code on failure, EINPROGRESS
76 * if async. If immediately successful an async exclusive lock will return 0
77 * and not issue the async callback or link the link structure. The caller
78 * must handle this case (typically this is an optimal code path).
80 * A tsleep() error can only be returned if PCATCH is specified in the flags.
83 __mtx_lock_ex(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
96 nlock
= MTX_EXCLUSIVE
| 1;
97 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
98 mtx
->mtx_owner
= curthread
;
99 link
->state
= MTX_LINK_ACQUIRED
;
105 if ((lock
& MTX_EXCLUSIVE
) && mtx
->mtx_owner
== curthread
) {
106 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
108 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
109 link
->state
= MTX_LINK_ACQUIRED
;
117 * We need MTX_LINKSPIN to manipulate exlink or
120 * We must set MTX_EXWANTED with MTX_LINKSPIN to indicate
121 * pending shared requests. It cannot be set as a separate
122 * operation prior to acquiring MTX_LINKSPIN.
124 * To avoid unnecessary cpu cache traffic we poll
125 * for collisions. It is also possible that EXWANTED
126 * state failing the above test was spurious, so all the
127 * tests must be repeated if we cannot obtain LINKSPIN
128 * with the prior state tests intact (i.e. don't reload
129 * the (lock) variable here, for heaven's sake!).
131 if (lock
& MTX_LINKSPIN
) {
136 nlock
= lock
| MTX_EXWANTED
| MTX_LINKSPIN
;
138 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
144 * Check for early abort.
146 if (link
->state
== MTX_LINK_ABORTED
) {
147 if (mtx
->mtx_exlink
== NULL
) {
148 atomic_clear_int(&mtx
->mtx_lock
,
152 atomic_clear_int(&mtx
->mtx_lock
,
156 link
->state
= MTX_LINK_IDLE
;
162 * Add our link to the exlink list and release LINKSPIN.
165 link
->state
= MTX_LINK_LINKED_EX
;
166 if (mtx
->mtx_exlink
) {
167 link
->next
= mtx
->mtx_exlink
;
168 link
->prev
= link
->next
->prev
;
169 link
->next
->prev
= link
;
170 link
->prev
->next
= link
;
174 mtx
->mtx_exlink
= link
;
176 isasync
= (link
->callback
!= NULL
);
177 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
181 * If asynchronous lock request return without
182 * blocking, leave link structure linked.
192 error
= mtx_wait_link(mtx
, link
, flags
, to
);
199 _mtx_lock_ex_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
201 return(__mtx_lock_ex(mtx
, link
, flags
, to
));
205 _mtx_lock_ex(mtx_t
*mtx
, int flags
, int to
)
209 mtx_link_init(&link
);
210 return(__mtx_lock_ex(mtx
, &link
, flags
, to
));
214 _mtx_lock_ex_quick(mtx_t
*mtx
)
218 mtx_link_init(&link
);
219 return(__mtx_lock_ex(mtx
, &link
, 0, 0));
223 * Share-lock a mutex, block until acquired. Recursion is allowed.
225 * Returns 0 on success, or the tsleep() return code on failure.
226 * An error can only be returned if PCATCH is specified in the flags.
228 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
229 * do not have to chain the wakeup().
232 __mtx_lock_sh(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
241 lock
= mtx
->mtx_lock
;
246 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
248 link
->state
= MTX_LINK_ACQUIRED
;
253 if ((lock
& (MTX_EXCLUSIVE
| MTX_EXWANTED
)) == 0) {
254 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
256 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
258 link
->state
= MTX_LINK_ACQUIRED
;
265 * We need MTX_LINKSPIN to manipulate exlink or
268 * We must set MTX_SHWANTED with MTX_LINKSPIN to indicate
269 * pending shared requests. It cannot be set as a separate
270 * operation prior to acquiring MTX_LINKSPIN.
272 * To avoid unnecessary cpu cache traffic we poll
273 * for collisions. It is also possible that EXWANTED
274 * state failing the above test was spurious, so all the
275 * tests must be repeated if we cannot obtain LINKSPIN
276 * with the prior state tests intact (i.e. don't reload
277 * the (lock) variable here, for heaven's sake!).
279 if (lock
& MTX_LINKSPIN
) {
284 nlock
= lock
| MTX_SHWANTED
| MTX_LINKSPIN
;
286 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
292 * Check for early abort.
294 if (link
->state
== MTX_LINK_ABORTED
) {
295 if (mtx
->mtx_exlink
== NULL
) {
296 atomic_clear_int(&mtx
->mtx_lock
,
300 atomic_clear_int(&mtx
->mtx_lock
,
304 link
->state
= MTX_LINK_IDLE
;
310 * Add our link to the exlink list and release LINKSPIN.
313 link
->state
= MTX_LINK_LINKED_SH
;
314 if (mtx
->mtx_shlink
) {
315 link
->next
= mtx
->mtx_shlink
;
316 link
->prev
= link
->next
->prev
;
317 link
->next
->prev
= link
;
318 link
->prev
->next
= link
;
322 mtx
->mtx_shlink
= link
;
324 isasync
= (link
->callback
!= NULL
);
325 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
329 * If asynchronous lock request return without
330 * blocking, leave link structure linked.
340 error
= mtx_wait_link(mtx
, link
, flags
, to
);
347 _mtx_lock_sh_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
349 return(__mtx_lock_sh(mtx
, link
, flags
, to
));
353 _mtx_lock_sh(mtx_t
*mtx
, int flags
, int to
)
357 mtx_link_init(&link
);
358 return(__mtx_lock_sh(mtx
, &link
, flags
, to
));
362 _mtx_lock_sh_quick(mtx_t
*mtx
)
366 mtx_link_init(&link
);
367 return(__mtx_lock_sh(mtx
, &link
, 0, 0));
371 * Get an exclusive spinlock the hard way.
374 _mtx_spinlock(mtx_t
*mtx
)
382 lock
= mtx
->mtx_lock
;
384 nlock
= MTX_EXCLUSIVE
| 1;
385 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
386 mtx
->mtx_owner
= curthread
;
389 } else if ((lock
& MTX_EXCLUSIVE
) &&
390 mtx
->mtx_owner
== curthread
) {
391 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
393 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
400 for (bo
= 0; bo
< bb
; ++bo
)
408 * Attempt to acquire a spinlock, if we fail we must undo the
409 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
411 * Returns 0 on success, EAGAIN on failure.
414 _mtx_spinlock_try(mtx_t
*mtx
)
416 globaldata_t gd
= mycpu
;
422 lock
= mtx
->mtx_lock
;
424 nlock
= MTX_EXCLUSIVE
| 1;
425 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
426 mtx
->mtx_owner
= gd
->gd_curthread
;
429 } else if ((lock
& MTX_EXCLUSIVE
) &&
430 mtx
->mtx_owner
== gd
->gd_curthread
) {
431 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
433 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
438 --gd
->gd_curthread
->td_critcount
;
450 _mtx_spinlock_sh(mtx_t
*mtx
)
458 lock
= mtx
->mtx_lock
;
459 if ((lock
& MTX_EXCLUSIVE
) == 0) {
460 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
462 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
469 for (bo
= 0; bo
< bb
; ++bo
)
479 _mtx_lock_ex_try(mtx_t
*mtx
)
486 lock
= mtx
->mtx_lock
;
488 nlock
= MTX_EXCLUSIVE
| 1;
489 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
490 mtx
->mtx_owner
= curthread
;
494 } else if ((lock
& MTX_EXCLUSIVE
) &&
495 mtx
->mtx_owner
== curthread
) {
496 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
498 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
512 _mtx_lock_sh_try(mtx_t
*mtx
)
519 lock
= mtx
->mtx_lock
;
520 if ((lock
& MTX_EXCLUSIVE
) == 0) {
521 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
523 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
535 * If the lock is held exclusively it must be owned by the caller. If the
536 * lock is already a shared lock this operation is a NOP. A panic will
537 * occur if the lock is not held either shared or exclusive.
539 * The exclusive count is converted to a shared count.
542 _mtx_downgrade(mtx_t
*mtx
)
548 lock
= mtx
->mtx_lock
;
552 * NOP if already shared.
554 if ((lock
& MTX_EXCLUSIVE
) == 0) {
555 KKASSERT((lock
& MTX_MASK
) > 0);
560 * Transfer count to shared. Any additional pending shared
561 * waiters must be woken up.
563 if (lock
& MTX_SHWANTED
) {
564 if (mtx_chain_link_sh(mtx
, lock
, 1))
568 nlock
= lock
& ~MTX_EXCLUSIVE
;
569 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
578 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
579 * the shared lock has a count other then 1. Optimize the most likely case
580 * but note that a single cmpset can fail due to WANTED races.
582 * If the lock is held exclusively it must be owned by the caller and
583 * this function will simply return without doing anything. A panic will
584 * occur if the lock is held exclusively by someone other then the caller.
586 * Returns 0 on success, EDEADLK on failure.
589 _mtx_upgrade_try(mtx_t
*mtx
)
596 lock
= mtx
->mtx_lock
;
598 if ((lock
& ~MTX_EXWANTED
) == 1) {
599 nlock
= lock
| MTX_EXCLUSIVE
;
600 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
601 mtx
->mtx_owner
= curthread
;
604 } else if (lock
& MTX_EXCLUSIVE
) {
605 KKASSERT(mtx
->mtx_owner
== curthread
);
617 * Unlock a lock. The caller must hold the lock either shared or exclusive.
619 * On the last release we handle any pending chains.
622 _mtx_unlock(mtx_t
*mtx
)
628 lock
= mtx
->mtx_lock
;
632 case MTX_EXCLUSIVE
| 1:
634 * Last release, exclusive lock.
635 * No exclusive or shared requests pending.
637 mtx
->mtx_owner
= NULL
;
639 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
642 case MTX_EXCLUSIVE
| MTX_EXWANTED
| 1:
643 case MTX_EXCLUSIVE
| MTX_EXWANTED
| MTX_SHWANTED
| 1:
645 * Last release, exclusive lock.
646 * Exclusive requests pending.
647 * Exclusive requests have priority over shared reqs.
649 if (mtx_chain_link_ex(mtx
, lock
))
652 case MTX_EXCLUSIVE
| MTX_SHWANTED
| 1:
654 * Last release, exclusive lock.
656 * Shared requests are pending. Transfer our count (1)
657 * to the first shared request, wakeup all shared reqs.
659 if (mtx_chain_link_sh(mtx
, lock
, 0))
664 * Last release, shared lock.
665 * No exclusive or shared requests pending.
668 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
671 case MTX_EXWANTED
| 1:
672 case MTX_EXWANTED
| MTX_SHWANTED
| 1:
674 * Last release, shared lock.
676 * Exclusive requests are pending. Transfer our
677 * count (1) to the next exclusive request.
679 * Exclusive requests have priority over shared reqs.
681 if (mtx_chain_link_ex(mtx
, lock
))
684 case MTX_SHWANTED
| 1:
686 * Last release, shared lock.
687 * Shared requests pending.
689 if (mtx_chain_link_sh(mtx
, lock
, 0))
694 * We have to loop if this is the last release but
695 * someone is fiddling with LINKSPIN.
697 if ((lock
& MTX_MASK
) == 1) {
698 KKASSERT(lock
& MTX_LINKSPIN
);
703 * Not the last release (shared or exclusive)
706 KKASSERT((nlock
& MTX_MASK
) != MTX_MASK
);
707 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
719 * Chain pending links. Called on the last release of an exclusive or
720 * shared lock when the appropriate WANTED bit is set. mtx_lock old state
721 * is passed in with the count left at 1, which we can inherit, and other
722 * bits which we must adjust in a single atomic operation.
724 * Return non-zero on success, 0 if caller needs to retry.
726 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
727 * acquiring LINKSPIN as all other cases will also need to acquire
728 * LINKSPIN when handling the EXWANTED case.
731 mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
)
733 thread_t td
= curthread
;
737 olock
&= ~MTX_LINKSPIN
;
738 nlock
= olock
| MTX_LINKSPIN
| MTX_EXCLUSIVE
;
740 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
741 link
= mtx
->mtx_exlink
;
742 KKASSERT(link
!= NULL
);
743 if (link
->next
== link
) {
744 mtx
->mtx_exlink
= NULL
;
745 nlock
= MTX_LINKSPIN
| MTX_EXWANTED
; /* to clear */
747 mtx
->mtx_exlink
= link
->next
;
748 link
->next
->prev
= link
->prev
;
749 link
->prev
->next
= link
->next
;
750 nlock
= MTX_LINKSPIN
; /* to clear */
752 KKASSERT(link
->state
== MTX_LINK_LINKED_EX
);
753 mtx
->mtx_owner
= link
->owner
;
757 * WARNING! The callback can only be safely
758 * made with LINKSPIN still held
759 * and in a critical section.
761 * WARNING! The link can go away after the
762 * state is set, or after the
765 if (link
->callback
) {
766 link
->state
= MTX_LINK_CALLEDBACK
;
767 link
->callback(link
, link
->arg
, 0);
769 link
->state
= MTX_LINK_ACQUIRED
;
772 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
782 * Flush waiting shared locks. The lock's prior state is passed in and must
783 * be adjusted atomically only if it matches.
785 * If addcount is 0, the count for the first shared lock in the chain is
786 * assumed to have already been accounted for.
788 * If addcount is 1, the count for the first shared lock in the chain has
789 * not yet been accounted for.
792 mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
, int addcount
)
794 thread_t td
= curthread
;
798 olock
&= ~MTX_LINKSPIN
;
799 nlock
= olock
| MTX_LINKSPIN
;
800 nlock
&= ~MTX_EXCLUSIVE
;
802 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
803 KKASSERT(mtx
->mtx_shlink
!= NULL
);
805 link
= mtx
->mtx_shlink
;
806 atomic_add_int(&mtx
->mtx_lock
, addcount
);
807 KKASSERT(link
->state
== MTX_LINK_LINKED_SH
);
808 if (link
->next
== link
) {
809 mtx
->mtx_shlink
= NULL
;
813 * WARNING! The callback can only be safely
814 * made with LINKSPIN still held
815 * and in a critical section.
817 * WARNING! The link can go away after the
818 * state is set, or after the
821 if (link
->callback
) {
822 link
->state
= MTX_LINK_CALLEDBACK
;
823 link
->callback(link
, link
->arg
, 0);
825 link
->state
= MTX_LINK_ACQUIRED
;
830 mtx
->mtx_shlink
= link
->next
;
831 link
->next
->prev
= link
->prev
;
832 link
->prev
->next
= link
->next
;
834 link
->state
= MTX_LINK_ACQUIRED
;
835 /* link can go away */
839 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
|
850 * Delete a link structure after tsleep has failed. This code is not
851 * in the critical path as most exclusive waits are chained.
855 mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
)
857 thread_t td
= curthread
;
862 * Acquire MTX_LINKSPIN.
864 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
865 * result in too much cpu cache traffic.
869 lock
= mtx
->mtx_lock
;
870 if (lock
& MTX_LINKSPIN
) {
874 nlock
= lock
| MTX_LINKSPIN
;
875 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
881 * Delete the link and release LINKSPIN.
883 nlock
= MTX_LINKSPIN
; /* to clear */
885 switch(link
->state
) {
886 case MTX_LINK_LINKED_EX
:
887 if (link
->next
== link
) {
888 mtx
->mtx_exlink
= NULL
;
889 nlock
|= MTX_EXWANTED
; /* to clear */
891 mtx
->mtx_exlink
= link
->next
;
892 link
->next
->prev
= link
->prev
;
893 link
->prev
->next
= link
->next
;
896 case MTX_LINK_LINKED_SH
:
897 if (link
->next
== link
) {
898 mtx
->mtx_shlink
= NULL
;
899 nlock
|= MTX_SHWANTED
; /* to clear */
901 mtx
->mtx_shlink
= link
->next
;
902 link
->next
->prev
= link
->prev
;
903 link
->prev
->next
= link
->next
;
910 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
915 * Wait for async lock completion or abort. Returns ENOLCK if an abort
919 mtx_wait_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
924 * Sleep. Handle false wakeups, interruptions, etc.
925 * The link may also have been aborted.
928 while (link
->state
& MTX_LINK_LINKED
) {
929 tsleep_interlock(link
, 0);
931 if (link
->state
& MTX_LINK_LINKED
) {
932 if (link
->state
& MTX_LINK_LINKED_SH
)
933 mycpu
->gd_cnt
.v_lock_name
[0] = 'S';
935 mycpu
->gd_cnt
.v_lock_name
[0] = 'X';
936 strncpy(mycpu
->gd_cnt
.v_lock_name
+ 1,
938 sizeof(mycpu
->gd_cnt
.v_lock_name
) - 2);
939 ++mycpu
->gd_cnt
.v_lock_colls
;
941 error
= tsleep(link
, flags
| PINTERLOCKED
,
949 * We are done, make sure the link structure is unlinked.
950 * It may still be on the list due to e.g. EINTR or
953 * It is possible for the tsleep to race an ABORT and cause
956 * The tsleep() can be woken up for numerous reasons and error
957 * might be zero in situations where we intend to return an error.
959 * (This is the synchronous case so state cannot be CALLEDBACK)
961 switch(link
->state
) {
962 case MTX_LINK_ACQUIRED
:
963 case MTX_LINK_CALLEDBACK
:
966 case MTX_LINK_ABORTED
:
969 case MTX_LINK_LINKED_EX
:
970 case MTX_LINK_LINKED_SH
:
971 mtx_delete_link(mtx
, link
);
980 * Clear state on status returned.
982 link
->state
= MTX_LINK_IDLE
;
988 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
989 * return ENOLCK. This may be called at any time after the mtx_link
990 * is initialized or the status from a previous lock has been
991 * returned. If called prior to the next (non-try) lock attempt, the
992 * next lock attempt using this link structure will abort instantly.
994 * Caller must still wait for the operation to complete, either from a
995 * blocking call that is still in progress or by calling mtx_wait_link().
997 * If an asynchronous lock request is possibly in-progress, the caller
998 * should call mtx_wait_link() synchronously. Note that the asynchronous
999 * lock callback will NOT be called if a successful abort occurred. XXX
1002 mtx_abort_link(mtx_t
*mtx
, mtx_link_t
*link
)
1004 thread_t td
= curthread
;
1009 * Acquire MTX_LINKSPIN
1013 lock
= mtx
->mtx_lock
;
1014 if (lock
& MTX_LINKSPIN
) {
1018 nlock
= lock
| MTX_LINKSPIN
;
1019 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
1027 * WARNING! Link structure can disappear once link->state is set.
1029 nlock
= MTX_LINKSPIN
; /* to clear */
1031 switch(link
->state
) {
1034 * Link not started yet
1036 link
->state
= MTX_LINK_ABORTED
;
1038 case MTX_LINK_LINKED_EX
:
1040 * de-link, mark aborted, and potentially wakeup the thread
1041 * or issue the callback.
1043 if (link
->next
== link
) {
1044 if (mtx
->mtx_exlink
== link
) {
1045 mtx
->mtx_exlink
= NULL
;
1046 nlock
|= MTX_EXWANTED
; /* to clear */
1049 if (mtx
->mtx_exlink
== link
)
1050 mtx
->mtx_exlink
= link
->next
;
1051 link
->next
->prev
= link
->prev
;
1052 link
->prev
->next
= link
->next
;
1056 * When aborting the async callback is still made. We must
1057 * not set the link status to ABORTED in the callback case
1058 * since there is nothing else to clear its status if the
1061 if (link
->callback
) {
1062 link
->state
= MTX_LINK_CALLEDBACK
;
1063 link
->callback(link
, link
->arg
, ENOLCK
);
1065 link
->state
= MTX_LINK_ABORTED
;
1069 case MTX_LINK_LINKED_SH
:
1071 * de-link, mark aborted, and potentially wakeup the thread
1072 * or issue the callback.
1074 if (link
->next
== link
) {
1075 if (mtx
->mtx_shlink
== link
) {
1076 mtx
->mtx_shlink
= NULL
;
1077 nlock
|= MTX_SHWANTED
; /* to clear */
1080 if (mtx
->mtx_shlink
== link
)
1081 mtx
->mtx_shlink
= link
->next
;
1082 link
->next
->prev
= link
->prev
;
1083 link
->prev
->next
= link
->next
;
1087 * When aborting the async callback is still made. We must
1088 * not set the link status to ABORTED in the callback case
1089 * since there is nothing else to clear its status if the
1092 if (link
->callback
) {
1093 link
->state
= MTX_LINK_CALLEDBACK
;
1094 link
->callback(link
, link
->arg
, ENOLCK
);
1096 link
->state
= MTX_LINK_ABORTED
;
1100 case MTX_LINK_ACQUIRED
:
1101 case MTX_LINK_CALLEDBACK
:
1103 * Too late, the lock was acquired. Let it complete.
1108 * link already aborted, do nothing.
1112 atomic_clear_int(&mtx
->mtx_lock
, nlock
);