1 /* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
3 * arch/arm/include/asm/mutex.h
5 * ARM optimized mutex locking primitives
7 * Please look into asm-generic/mutex-xchg.h for a formal definition.
12 #if __LINUX_ARM_ARCH__ < 6
13 /* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
14 # include <asm-generic/mutex-xchg.h>
18 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
19 * atomic decrement (it is not a reliable atomic decrement but it satisfies
20 * the defined semantics for our purpose, while being smaller and faster
21 * than a real atomic decrement or atomic swap. The idea is to attempt
22 * decrementing the lock value only once. If once decremented it isn't zero,
23 * or if its store-back fails due to a dispute on the exclusive store, we
24 * simply bail out immediately through the slow path where the lock will be
25 * reattempted until it succeeds.
28 __mutex_fastpath_lock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
38 : "=&r" (__res
), "=&r" (__ex_flag
)
39 : "r" (&(count
)->counter
)
43 if (unlikely(__res
!= 0))
48 __mutex_fastpath_lock_retval(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
58 : "=&r" (__res
), "=&r" (__ex_flag
)
59 : "r" (&(count
)->counter
)
63 if (unlikely(__res
!= 0))
64 __res
= fail_fn(count
);
69 * Same trick is used for the unlock fast path. However the original value,
70 * rather than the result, is used to test for success in order to have
71 * better generated assembly.
74 __mutex_fastpath_unlock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
76 int __ex_flag
, __res
, __orig
;
84 : "=&r" (__orig
), "=&r" (__res
), "=&r" (__ex_flag
)
85 : "r" (&(count
)->counter
)
89 if (unlikely(__orig
!= 0))
94 * If the unlock was done on a contended lock, or if the unlock simply fails
95 * then the mutex remains locked.
97 #define __mutex_slowpath_needs_to_unlock() 1
100 * For __mutex_fastpath_trylock we use another construct which could be
101 * described as a "single value cmpxchg".
103 * This provides the needed trylock semantics like cmpxchg would, but it is
104 * lighter and less generic than a true cmpxchg implementation.
107 __mutex_fastpath_trylock(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
109 int __ex_flag
, __res
, __orig
;
113 "1: ldrex %0, [%3] \n\t"
114 "subs %1, %0, #1 \n\t"
115 "strexeq %2, %1, [%3] \n\t"
120 : "=&r" (__orig
), "=&r" (__res
), "=&r" (__ex_flag
)
121 : "r" (&count
->counter
)