2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Implement fast persistent locks based on atomic_cmpset_int() with
36 * semantics similar to lockmgr locks but faster and taking up much less
37 * space. Taken from HAMMER's lock implementation.
39 * These are meant to complement our LWKT tokens. Tokens are only held
40 * while the thread is running. Mutexes can be held across blocking
43 * - Exclusive priority over shared to prevent SMP starvation.
44 * - locks can be aborted (async callback, if any, will be made w/ENOLCK).
45 * - locks can be asynchronous.
46 * - synchronous fast path if no blocking occurs (async callback is not
49 * Generally speaking any caller-supplied link state must be properly
50 * initialized before use.
52 * Most of the support is in sys/mutex[2].h. We mostly provide backoff
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/sysctl.h>
60 #include <sys/indefinite.h>
61 #include <sys/thread.h>
63 #include <machine/cpufunc.h>
65 #include <sys/thread2.h>
66 #include <sys/mutex2.h>
67 #include <sys/indefinite2.h>
69 static int mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
);
70 static int mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
);
71 static void mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
);
74 * Exclusive-lock a mutex, block until acquired unless link is async.
75 * Recursion is allowed.
77 * Returns 0 on success, the tsleep() return code on failure, EINPROGRESS
78 * if async. If immediately successful an async exclusive lock will return 0
79 * and not issue the async callback or link the link structure. The caller
80 * must handle this case (typically this is an optimal code path).
82 * A tsleep() error can only be returned if PCATCH is specified in the flags.
85 __mtx_lock_ex(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
98 nlock
= MTX_EXCLUSIVE
| 1;
99 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
100 mtx
->mtx_owner
= curthread
;
102 link
->state
= MTX_LINK_ACQUIRED
;
108 if ((lock
& MTX_EXCLUSIVE
) && mtx
->mtx_owner
== curthread
) {
109 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
111 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
113 link
->state
= MTX_LINK_ACQUIRED
;
121 * We need MTX_LINKSPIN to manipulate exlink or
124 * We must set MTX_EXWANTED with MTX_LINKSPIN to indicate
125 * pending exclusive requests. It cannot be set as a separate
126 * operation prior to acquiring MTX_LINKSPIN.
128 * To avoid unnecessary cpu cache traffic we poll
129 * for collisions. It is also possible that EXWANTED
130 * state failing the above test was spurious, so all the
131 * tests must be repeated if we cannot obtain LINKSPIN
132 * with the prior state tests intact (i.e. don't reload
133 * the (lock) variable here, for heaven's sake!).
135 if (lock
& MTX_LINKSPIN
) {
140 nlock
= lock
| MTX_EXWANTED
| MTX_LINKSPIN
;
142 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
148 * Check for early abort.
150 if (link
->state
== MTX_LINK_ABORTED
) {
151 if (mtx
->mtx_exlink
== NULL
) {
152 atomic_clear_int(&mtx
->mtx_lock
,
156 atomic_clear_int(&mtx
->mtx_lock
,
160 link
->state
= MTX_LINK_IDLE
;
166 * Add our link to the exlink list and release LINKSPIN.
169 link
->state
= MTX_LINK_LINKED_EX
;
170 if (mtx
->mtx_exlink
) {
171 link
->next
= mtx
->mtx_exlink
;
172 link
->prev
= link
->next
->prev
;
173 link
->next
->prev
= link
;
174 link
->prev
->next
= link
;
178 mtx
->mtx_exlink
= link
;
180 isasync
= (link
->callback
!= NULL
);
181 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
185 * If asynchronous lock request return without
186 * blocking, leave link structure linked.
196 error
= mtx_wait_link(mtx
, link
, flags
, to
);
203 _mtx_lock_ex_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
205 return(__mtx_lock_ex(mtx
, link
, flags
, to
));
209 _mtx_lock_ex(mtx_t
*mtx
, int flags
, int to
)
213 mtx_link_init(&link
);
214 return(__mtx_lock_ex(mtx
, &link
, flags
, to
));
218 _mtx_lock_ex_quick(mtx_t
*mtx
)
222 mtx_link_init(&link
);
223 return(__mtx_lock_ex(mtx
, &link
, 0, 0));
227 * Share-lock a mutex, block until acquired. Recursion is allowed.
229 * Returns 0 on success, or the tsleep() return code on failure.
230 * An error can only be returned if PCATCH is specified in the flags.
232 * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
233 * do not have to chain the wakeup().
236 __mtx_lock_sh(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
245 lock
= mtx
->mtx_lock
;
250 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
253 link
->state
= MTX_LINK_ACQUIRED
;
258 if ((lock
& (MTX_EXCLUSIVE
| MTX_EXWANTED
)) == 0) {
259 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
261 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
264 link
->state
= MTX_LINK_ACQUIRED
;
271 * We need MTX_LINKSPIN to manipulate exlink or
274 * We must set MTX_SHWANTED with MTX_LINKSPIN to indicate
275 * pending shared requests. It cannot be set as a separate
276 * operation prior to acquiring MTX_LINKSPIN.
278 * To avoid unnecessary cpu cache traffic we poll
279 * for collisions. It is also possible that EXWANTED
280 * state failing the above test was spurious, so all the
281 * tests must be repeated if we cannot obtain LINKSPIN
282 * with the prior state tests intact (i.e. don't reload
283 * the (lock) variable here, for heaven's sake!).
285 if (lock
& MTX_LINKSPIN
) {
290 nlock
= lock
| MTX_SHWANTED
| MTX_LINKSPIN
;
292 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
) == 0) {
298 * Check for early abort. Other shared lock requestors
299 * could have sneaked in before we set LINKSPIN so make
300 * sure we undo the state properly.
302 if (link
->state
== MTX_LINK_ABORTED
) {
303 if (mtx
->mtx_shlink
) {
304 atomic_clear_int(&mtx
->mtx_lock
,
307 atomic_clear_int(&mtx
->mtx_lock
,
312 link
->state
= MTX_LINK_IDLE
;
318 * Add our link to the shlink list and release LINKSPIN.
321 link
->state
= MTX_LINK_LINKED_SH
;
322 if (mtx
->mtx_shlink
) {
323 link
->next
= mtx
->mtx_shlink
;
324 link
->prev
= link
->next
->prev
;
325 link
->next
->prev
= link
;
326 link
->prev
->next
= link
;
330 mtx
->mtx_shlink
= link
;
332 isasync
= (link
->callback
!= NULL
);
333 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
);
337 * If asynchronous lock request return without
338 * blocking, leave link structure linked.
348 error
= mtx_wait_link(mtx
, link
, flags
, to
);
355 _mtx_lock_sh_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
357 return(__mtx_lock_sh(mtx
, link
, flags
, to
));
361 _mtx_lock_sh(mtx_t
*mtx
, int flags
, int to
)
365 mtx_link_init(&link
);
366 return(__mtx_lock_sh(mtx
, &link
, flags
, to
));
370 _mtx_lock_sh_quick(mtx_t
*mtx
)
374 mtx_link_init(&link
);
375 return(__mtx_lock_sh(mtx
, &link
, 0, 0));
379 * Get an exclusive spinlock the hard way.
382 _mtx_spinlock(mtx_t
*mtx
)
390 lock
= mtx
->mtx_lock
;
392 nlock
= MTX_EXCLUSIVE
| 1;
393 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
394 mtx
->mtx_owner
= curthread
;
397 } else if ((lock
& MTX_EXCLUSIVE
) &&
398 mtx
->mtx_owner
== curthread
) {
399 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
401 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
408 for (bo
= 0; bo
< bb
; ++bo
)
416 * Attempt to acquire a spinlock, if we fail we must undo the
417 * gd->gd_spinlocks/gd->gd_curthead->td_critcount predisposition.
419 * Returns 0 on success, EAGAIN on failure.
422 _mtx_spinlock_try(mtx_t
*mtx
)
424 globaldata_t gd
= mycpu
;
430 lock
= mtx
->mtx_lock
;
432 nlock
= MTX_EXCLUSIVE
| 1;
433 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
434 mtx
->mtx_owner
= gd
->gd_curthread
;
437 } else if ((lock
& MTX_EXCLUSIVE
) &&
438 mtx
->mtx_owner
== gd
->gd_curthread
) {
439 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
441 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
446 crit_exit_raw(gd
->gd_curthread
);
458 _mtx_spinlock_sh(mtx_t
*mtx
)
466 lock
= mtx
->mtx_lock
;
467 if ((lock
& MTX_EXCLUSIVE
) == 0) {
468 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
470 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
477 for (bo
= 0; bo
< bb
; ++bo
)
487 _mtx_lock_ex_try(mtx_t
*mtx
)
494 lock
= mtx
->mtx_lock
;
496 nlock
= MTX_EXCLUSIVE
| 1;
497 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, nlock
)) {
498 mtx
->mtx_owner
= curthread
;
502 } else if ((lock
& MTX_EXCLUSIVE
) &&
503 mtx
->mtx_owner
== curthread
) {
504 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
506 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
520 _mtx_lock_sh_try(mtx_t
*mtx
)
527 lock
= mtx
->mtx_lock
;
528 if ((lock
& MTX_EXCLUSIVE
) == 0) {
529 KKASSERT((lock
& MTX_MASK
) != MTX_MASK
);
531 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
543 * If the lock is held exclusively it must be owned by the caller. If the
544 * lock is already a shared lock this operation is a NOP. A panic will
545 * occur if the lock is not held either shared or exclusive.
547 * The exclusive count is converted to a shared count.
550 _mtx_downgrade(mtx_t
*mtx
)
556 lock
= mtx
->mtx_lock
;
560 * NOP if already shared.
562 if ((lock
& MTX_EXCLUSIVE
) == 0) {
563 KKASSERT((lock
& MTX_MASK
) > 0);
568 * Transfer count to shared. Any additional pending shared
569 * waiters must be woken up.
571 if (lock
& MTX_SHWANTED
) {
572 if (mtx_chain_link_sh(mtx
, lock
))
576 nlock
= lock
& ~MTX_EXCLUSIVE
;
577 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
586 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
587 * the shared lock has a count other then 1. Optimize the most likely case
588 * but note that a single cmpset can fail due to WANTED races.
590 * If the lock is held exclusively it must be owned by the caller and
591 * this function will simply return without doing anything. A panic will
592 * occur if the lock is held exclusively by someone other then the caller.
594 * Returns 0 on success, EDEADLK on failure.
597 _mtx_upgrade_try(mtx_t
*mtx
)
604 lock
= mtx
->mtx_lock
;
607 if ((lock
& ~MTX_EXWANTED
) == 1) {
608 nlock
= lock
| MTX_EXCLUSIVE
;
609 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
)) {
610 mtx
->mtx_owner
= curthread
;
613 } else if (lock
& MTX_EXCLUSIVE
) {
614 KKASSERT(mtx
->mtx_owner
== curthread
);
626 * Unlock a lock. The caller must hold the lock either shared or exclusive.
628 * On the last release we handle any pending chains.
631 _mtx_unlock(mtx_t
*mtx
)
637 lock
= mtx
->mtx_lock
;
641 case MTX_EXCLUSIVE
| 1:
643 * Last release, exclusive lock.
644 * No exclusive or shared requests pending.
646 mtx
->mtx_owner
= NULL
;
648 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
651 case MTX_EXCLUSIVE
| MTX_EXWANTED
| 1:
652 case MTX_EXCLUSIVE
| MTX_EXWANTED
| MTX_SHWANTED
| 1:
654 * Last release, exclusive lock.
655 * Exclusive requests pending.
656 * Exclusive requests have priority over shared reqs.
658 if (mtx_chain_link_ex(mtx
, lock
))
661 case MTX_EXCLUSIVE
| MTX_SHWANTED
| 1:
663 * Last release, exclusive lock.
665 * Shared requests are pending. Transfer our count (1)
666 * to the first shared request, wakeup all shared reqs.
668 if (mtx_chain_link_sh(mtx
, lock
))
673 * Last release, shared lock.
674 * No exclusive or shared requests pending.
677 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
680 case MTX_EXWANTED
| 1:
681 case MTX_EXWANTED
| MTX_SHWANTED
| 1:
683 * Last release, shared lock.
685 * Exclusive requests are pending. Upgrade this
686 * final shared lock to exclusive and transfer our
687 * count (1) to the next exclusive request.
689 * Exclusive requests have priority over shared reqs.
691 if (mtx_chain_link_ex(mtx
, lock
))
694 case MTX_SHWANTED
| 1:
696 * Last release, shared lock.
697 * Shared requests pending.
699 if (mtx_chain_link_sh(mtx
, lock
))
704 * We have to loop if this is the last release but
705 * someone is fiddling with LINKSPIN.
707 if ((lock
& MTX_MASK
) == 1) {
708 KKASSERT(lock
& MTX_LINKSPIN
);
713 * Not the last release (shared or exclusive)
716 KKASSERT((nlock
& MTX_MASK
) != MTX_MASK
);
717 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
729 * Chain pending links. Called on the last release of an exclusive or
730 * shared lock when the appropriate WANTED bit is set. mtx_lock old state
731 * is passed in with the count left at 1, which we can inherit, and other
732 * bits which we must adjust in a single atomic operation.
734 * Return non-zero on success, 0 if caller needs to retry.
736 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
737 * acquiring LINKSPIN as all other cases will also need to acquire
738 * LINKSPIN when handling the EXWANTED case.
741 mtx_chain_link_ex(mtx_t
*mtx
, u_int olock
)
743 thread_t td
= curthread
;
747 olock
&= ~MTX_LINKSPIN
;
748 nlock
= olock
| MTX_LINKSPIN
| MTX_EXCLUSIVE
; /* upgrade if necc */
750 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
751 link
= mtx
->mtx_exlink
;
752 KKASSERT(link
!= NULL
);
753 if (link
->next
== link
) {
754 mtx
->mtx_exlink
= NULL
;
755 nlock
= MTX_LINKSPIN
| MTX_EXWANTED
; /* to clear */
757 mtx
->mtx_exlink
= link
->next
;
758 link
->next
->prev
= link
->prev
;
759 link
->prev
->next
= link
->next
;
760 nlock
= MTX_LINKSPIN
; /* to clear */
762 KKASSERT(link
->state
== MTX_LINK_LINKED_EX
);
763 mtx
->mtx_owner
= link
->owner
;
767 * WARNING! The callback can only be safely
768 * made with LINKSPIN still held
769 * and in a critical section.
771 * WARNING! The link can go away after the
772 * state is set, or after the
775 if (link
->callback
) {
776 link
->state
= MTX_LINK_CALLEDBACK
;
777 link
->callback(link
, link
->arg
, 0);
779 link
->state
= MTX_LINK_ACQUIRED
;
782 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
793 * Flush waiting shared locks. The lock's prior state is passed in and must
794 * be adjusted atomically only if it matches and LINKSPIN is not set.
796 * IMPORTANT! The caller has left one active count on the lock for us to
797 * consume. We will apply this to the first link, but must add
798 * additional counts for any other links.
801 mtx_chain_link_sh(mtx_t
*mtx
, u_int olock
)
803 thread_t td
= curthread
;
808 olock
&= ~MTX_LINKSPIN
;
809 nlock
= olock
| MTX_LINKSPIN
;
810 nlock
&= ~MTX_EXCLUSIVE
;
812 if (atomic_cmpset_int(&mtx
->mtx_lock
, olock
, nlock
)) {
814 * It should not be possible for SHWANTED to be set without
817 KKASSERT(mtx
->mtx_shlink
!= NULL
);
820 * We have to process the count for all shared locks before
821 * we process any of the links. Count the additional shared
822 * locks beyond the first link (which is already accounted
823 * for) and associate the full count with the lock
827 for (link
= mtx
->mtx_shlink
->next
; link
!= mtx
->mtx_shlink
;
832 atomic_add_int(&mtx
->mtx_lock
, addcount
);
835 * We can wakeup all waiting shared locks.
837 while ((link
= mtx
->mtx_shlink
) != NULL
) {
838 KKASSERT(link
->state
== MTX_LINK_LINKED_SH
);
839 if (link
->next
== link
) {
840 mtx
->mtx_shlink
= NULL
;
842 mtx
->mtx_shlink
= link
->next
;
843 link
->next
->prev
= link
->prev
;
844 link
->prev
->next
= link
->next
;
849 if (link
->callback
) {
850 link
->state
= MTX_LINK_CALLEDBACK
;
851 link
->callback(link
, link
->arg
, 0);
854 link
->state
= MTX_LINK_ACQUIRED
;
858 atomic_clear_int(&mtx
->mtx_lock
, MTX_LINKSPIN
|
870 * Delete a link structure after tsleep has failed. This code is not
871 * in the critical path as most exclusive waits are chained.
875 mtx_delete_link(mtx_t
*mtx
, mtx_link_t
*link
)
877 thread_t td
= curthread
;
882 * Acquire MTX_LINKSPIN.
884 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
885 * result in too much cpu cache traffic.
889 lock
= mtx
->mtx_lock
;
890 if (lock
& MTX_LINKSPIN
) {
894 nlock
= lock
| MTX_LINKSPIN
;
895 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
901 * Delete the link and release LINKSPIN.
903 nlock
= MTX_LINKSPIN
; /* to clear */
905 switch(link
->state
) {
906 case MTX_LINK_LINKED_EX
:
907 if (link
->next
== link
) {
908 mtx
->mtx_exlink
= NULL
;
909 nlock
|= MTX_EXWANTED
; /* to clear */
911 mtx
->mtx_exlink
= link
->next
;
912 link
->next
->prev
= link
->prev
;
913 link
->prev
->next
= link
->next
;
916 case MTX_LINK_LINKED_SH
:
917 if (link
->next
== link
) {
918 mtx
->mtx_shlink
= NULL
;
919 nlock
|= MTX_SHWANTED
; /* to clear */
921 mtx
->mtx_shlink
= link
->next
;
922 link
->next
->prev
= link
->prev
;
923 link
->prev
->next
= link
->next
;
930 atomic_clear_int(&mtx
->mtx_lock
, nlock
);
935 * Wait for async lock completion or abort. Returns ENOLCK if an abort
939 mtx_wait_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
941 indefinite_info_t info
;
944 indefinite_init(&info
, mtx
->mtx_ident
, 1,
945 ((link
->state
& MTX_LINK_LINKED_SH
) ? 'm' : 'M'));
948 * Sleep. Handle false wakeups, interruptions, etc.
949 * The link may also have been aborted. The LINKED
950 * bit was set by this cpu so we can test it without
954 while (link
->state
& MTX_LINK_LINKED
) {
955 tsleep_interlock(link
, 0);
957 if (link
->state
& MTX_LINK_LINKED
) {
958 error
= tsleep(link
, flags
| PINTERLOCKED
,
963 if ((mtx
->mtx_flags
& MTXF_NOCOLLSTATS
) == 0)
964 indefinite_check(&info
);
968 * We need at least a lfence (load fence) to ensure our cpu does not
969 * reorder loads (of data outside the lock structure) prior to the
970 * remote cpu's release, since the above test may have run without
971 * any atomic interactions.
973 * If we do not do this then state updated by the other cpu before
974 * releasing its lock may not be read cleanly by our cpu when this
975 * function returns. Even though the other cpu ordered its stores,
976 * our loads can still be out of order.
981 * We are done, make sure the link structure is unlinked.
982 * It may still be on the list due to e.g. EINTR or
985 * It is possible for the tsleep to race an ABORT and cause
988 * The tsleep() can be woken up for numerous reasons and error
989 * might be zero in situations where we intend to return an error.
991 * (This is the synchronous case so state cannot be CALLEDBACK)
993 switch(link
->state
) {
994 case MTX_LINK_ACQUIRED
:
995 case MTX_LINK_CALLEDBACK
:
998 case MTX_LINK_ABORTED
:
1001 case MTX_LINK_LINKED_EX
:
1002 case MTX_LINK_LINKED_SH
:
1003 mtx_delete_link(mtx
, link
);
1007 error
= EWOULDBLOCK
;
1012 * Clear state on status returned.
1014 link
->state
= MTX_LINK_IDLE
;
1016 if ((mtx
->mtx_flags
& MTXF_NOCOLLSTATS
) == 0)
1017 indefinite_done(&info
);
1023 * Abort a mutex locking operation, causing mtx_lock_ex_link() to
1024 * return ENOLCK. This may be called at any time after the mtx_link
1025 * is initialized or the status from a previous lock has been
1026 * returned. If called prior to the next (non-try) lock attempt, the
1027 * next lock attempt using this link structure will abort instantly.
1029 * Caller must still wait for the operation to complete, either from a
1030 * blocking call that is still in progress or by calling mtx_wait_link().
1032 * If an asynchronous lock request is possibly in-progress, the caller
1033 * should call mtx_wait_link() synchronously. Note that the asynchronous
1034 * lock callback will NOT be called if a successful abort occurred. XXX
1037 mtx_abort_link(mtx_t
*mtx
, mtx_link_t
*link
)
1039 thread_t td
= curthread
;
1044 * Acquire MTX_LINKSPIN
1048 lock
= mtx
->mtx_lock
;
1049 if (lock
& MTX_LINKSPIN
) {
1053 nlock
= lock
| MTX_LINKSPIN
;
1054 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, nlock
))
1062 * WARNING! Link structure can disappear once link->state is set.
1064 nlock
= MTX_LINKSPIN
; /* to clear */
1066 switch(link
->state
) {
1069 * Link not started yet
1071 link
->state
= MTX_LINK_ABORTED
;
1073 case MTX_LINK_LINKED_EX
:
1075 * de-link, mark aborted, and potentially wakeup the thread
1076 * or issue the callback.
1078 if (link
->next
== link
) {
1079 if (mtx
->mtx_exlink
== link
) {
1080 mtx
->mtx_exlink
= NULL
;
1081 nlock
|= MTX_EXWANTED
; /* to clear */
1084 if (mtx
->mtx_exlink
== link
)
1085 mtx
->mtx_exlink
= link
->next
;
1086 link
->next
->prev
= link
->prev
;
1087 link
->prev
->next
= link
->next
;
1091 * When aborting the async callback is still made. We must
1092 * not set the link status to ABORTED in the callback case
1093 * since there is nothing else to clear its status if the
1096 if (link
->callback
) {
1097 link
->state
= MTX_LINK_CALLEDBACK
;
1098 link
->callback(link
, link
->arg
, ENOLCK
);
1100 link
->state
= MTX_LINK_ABORTED
;
1104 case MTX_LINK_LINKED_SH
:
1106 * de-link, mark aborted, and potentially wakeup the thread
1107 * or issue the callback.
1109 if (link
->next
== link
) {
1110 if (mtx
->mtx_shlink
== link
) {
1111 mtx
->mtx_shlink
= NULL
;
1112 nlock
|= MTX_SHWANTED
; /* to clear */
1115 if (mtx
->mtx_shlink
== link
)
1116 mtx
->mtx_shlink
= link
->next
;
1117 link
->next
->prev
= link
->prev
;
1118 link
->prev
->next
= link
->next
;
1122 * When aborting the async callback is still made. We must
1123 * not set the link status to ABORTED in the callback case
1124 * since there is nothing else to clear its status if the
1127 if (link
->callback
) {
1128 link
->state
= MTX_LINK_CALLEDBACK
;
1129 link
->callback(link
, link
->arg
, ENOLCK
);
1131 link
->state
= MTX_LINK_ABORTED
;
1135 case MTX_LINK_ACQUIRED
:
1136 case MTX_LINK_CALLEDBACK
:
1138 * Too late, the lock was acquired. Let it complete.
1143 * link already aborted, do nothing.
1147 atomic_clear_int(&mtx
->mtx_lock
, nlock
);