2 #include <linux/percpu.h>
3 #include <linux/mutex.h>
4 #include <linux/sched.h>
5 #include "mcs_spinlock.h"
10 * An MCS like lock especially tailored for optimistic spinning for sleeping
11 * lock implementations (mutex, rwsem, etc).
13 * Using a single mcs node per CPU is safe because sleeping locks should not be
14 * called from interrupt context and we have preemption disabled while
17 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue
, osq_node
);
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead.
23 static inline struct optimistic_spin_queue
*
24 osq_wait_next(struct optimistic_spin_queue
**lock
,
25 struct optimistic_spin_queue
*node
,
26 struct optimistic_spin_queue
*prev
)
28 struct optimistic_spin_queue
*next
= NULL
;
31 if (*lock
== node
&& cmpxchg(lock
, node
, prev
) == node
) {
33 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its
41 * We must xchg() the @node->next value, because if we were to
42 * leave it in, a concurrent unlock()/unqueue() from
43 * @node->next might complete Step-A and think its @prev is
46 * If the concurrent unlock()/unqueue() wins the race, we'll
47 * wait for either @lock to point to us, through its Step-B, or
48 * wait for a new @node->next from its Step-C.
51 next
= xchg(&node
->next
, NULL
);
56 arch_mutex_cpu_relax();
62 bool osq_lock(struct optimistic_spin_queue
**lock
)
64 struct optimistic_spin_queue
*node
= this_cpu_ptr(&osq_node
);
65 struct optimistic_spin_queue
*prev
, *next
;
70 node
->prev
= prev
= xchg(lock
, node
);
71 if (likely(prev
== NULL
))
74 ACCESS_ONCE(prev
->next
) = node
;
77 * Normally @prev is untouchable after the above store; because at that
78 * moment unlock can proceed and wipe the node element from stack.
80 * However, since our nodes are static per-cpu storage, we're
81 * guaranteed their existence -- this allows us to apply
82 * cmpxchg in an attempt to undo our queueing.
85 while (!smp_load_acquire(&node
->locked
)) {
87 * If we need to reschedule bail... so we can block.
92 arch_mutex_cpu_relax();
98 * Step - A -- stabilize @prev
100 * Undo our @prev->next assignment; this will make @prev's
101 * unlock()/unqueue() wait for a next pointer since @lock points to us
106 if (prev
->next
== node
&&
107 cmpxchg(&prev
->next
, node
, NULL
) == node
)
111 * We can only fail the cmpxchg() racing against an unlock(),
112 * in which case we should observe @node->locked becomming
115 if (smp_load_acquire(&node
->locked
))
118 arch_mutex_cpu_relax();
121 * Or we race against a concurrent unqueue()'s step-B, in which
122 * case its step-C will write us a new @node->prev pointer.
124 prev
= ACCESS_ONCE(node
->prev
);
128 * Step - B -- stabilize @next
130 * Similar to unlock(), wait for @node->next or move @lock from @node
134 next
= osq_wait_next(lock
, node
, prev
);
141 * @prev is stable because its still waiting for a new @prev->next
142 * pointer, @next is stable because our @node->next pointer is NULL and
143 * it will wait in Step-A.
146 ACCESS_ONCE(next
->prev
) = prev
;
147 ACCESS_ONCE(prev
->next
) = next
;
152 void osq_unlock(struct optimistic_spin_queue
**lock
)
154 struct optimistic_spin_queue
*node
= this_cpu_ptr(&osq_node
);
155 struct optimistic_spin_queue
*next
;
158 * Fast path for the uncontended case.
160 if (likely(cmpxchg(lock
, node
, NULL
) == node
))
164 * Second most likely case.
166 next
= xchg(&node
->next
, NULL
);
168 ACCESS_ONCE(next
->locked
) = 1;
172 next
= osq_wait_next(lock
, node
, NULL
);
174 ACCESS_ONCE(next
->locked
) = 1;