1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
9 #include <linux/rwsem.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
15 * Initialize an rwsem:
17 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
18 struct lock_class_key
*key
)
20 #ifdef CONFIG_DEBUG_LOCK_ALLOC
22 * Make sure we are not reinitializing a held semaphore:
24 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
25 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
27 sem
->count
= RWSEM_UNLOCKED_VALUE
;
28 raw_spin_lock_init(&sem
->wait_lock
);
29 INIT_LIST_HEAD(&sem
->wait_list
);
32 EXPORT_SYMBOL(__init_rwsem
);
34 enum rwsem_waiter_type
{
35 RWSEM_WAITING_FOR_WRITE
,
36 RWSEM_WAITING_FOR_READ
40 struct list_head list
;
41 struct task_struct
*task
;
42 enum rwsem_waiter_type type
;
45 enum rwsem_wake_type
{
46 RWSEM_WAKE_ANY
, /* Wake whatever's at head of wait list */
47 RWSEM_WAKE_READERS
, /* Wake readers only */
48 RWSEM_WAKE_READ_OWNED
/* Waker thread holds the read lock */
52 * handle the lock release when processes blocked on it that can now run
53 * - if we come here from up_xxxx(), then:
54 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
55 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
56 * - there must be someone on the queue
57 * - the spinlock must be held by the caller
58 * - woken process blocks are discarded from the list after having task zeroed
59 * - writers are only woken if downgrading is false
61 static struct rw_semaphore
*
62 __rwsem_do_wake(struct rw_semaphore
*sem
, enum rwsem_wake_type wake_type
)
64 struct rwsem_waiter
*waiter
;
65 struct task_struct
*tsk
;
66 struct list_head
*next
;
67 long oldcount
, woken
, loop
, adjustment
;
69 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
70 if (waiter
->type
== RWSEM_WAITING_FOR_WRITE
) {
71 if (wake_type
== RWSEM_WAKE_ANY
)
72 /* Wake writer at the front of the queue, but do not
73 * grant it the lock yet as we want other writers
74 * to be able to steal it. Readers, on the other hand,
75 * will block as they will notice the queued writer.
77 wake_up_process(waiter
->task
);
81 /* Writers might steal the lock before we grant it to the next reader.
82 * We prefer to do the first reader grant before counting readers
83 * so we can bail out early if a writer stole the lock.
86 if (wake_type
!= RWSEM_WAKE_READ_OWNED
) {
87 adjustment
= RWSEM_ACTIVE_READ_BIAS
;
89 oldcount
= rwsem_atomic_update(adjustment
, sem
) - adjustment
;
90 if (unlikely(oldcount
< RWSEM_WAITING_BIAS
)) {
91 /* A writer stole the lock. Undo our reader grant. */
92 if (rwsem_atomic_update(-adjustment
, sem
) &
95 /* Last active locker left. Retry waking readers. */
96 goto try_reader_grant
;
100 /* Grant an infinite number of read locks to the readers at the front
101 * of the queue. Note we increment the 'active part' of the count by
102 * the number of readers before waking any processes up.
108 if (waiter
->list
.next
== &sem
->wait_list
)
111 waiter
= list_entry(waiter
->list
.next
,
112 struct rwsem_waiter
, list
);
114 } while (waiter
->type
!= RWSEM_WAITING_FOR_WRITE
);
116 adjustment
= woken
* RWSEM_ACTIVE_READ_BIAS
- adjustment
;
117 if (waiter
->type
!= RWSEM_WAITING_FOR_WRITE
)
118 /* hit end of list above */
119 adjustment
-= RWSEM_WAITING_BIAS
;
122 rwsem_atomic_add(adjustment
, sem
);
124 next
= sem
->wait_list
.next
;
127 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
128 next
= waiter
->list
.next
;
132 wake_up_process(tsk
);
133 put_task_struct(tsk
);
136 sem
->wait_list
.next
= next
;
137 next
->prev
= &sem
->wait_list
;
144 * wait for the read lock to be granted
146 struct rw_semaphore __sched
*rwsem_down_read_failed(struct rw_semaphore
*sem
)
148 long count
, adjustment
= -RWSEM_ACTIVE_READ_BIAS
;
149 struct rwsem_waiter waiter
;
150 struct task_struct
*tsk
= current
;
152 /* set up my own style of waitqueue */
154 waiter
.type
= RWSEM_WAITING_FOR_READ
;
155 get_task_struct(tsk
);
157 raw_spin_lock_irq(&sem
->wait_lock
);
158 if (list_empty(&sem
->wait_list
))
159 adjustment
+= RWSEM_WAITING_BIAS
;
160 list_add_tail(&waiter
.list
, &sem
->wait_list
);
162 /* we're now waiting on the lock, but no longer actively locking */
163 count
= rwsem_atomic_update(adjustment
, sem
);
165 /* If there are no active locks, wake the front queued process(es).
167 * If there are no writers and we are first in the queue,
168 * wake our own waiter to join the existing active readers !
170 if (count
== RWSEM_WAITING_BIAS
||
171 (count
> RWSEM_WAITING_BIAS
&&
172 adjustment
!= -RWSEM_ACTIVE_READ_BIAS
))
173 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_ANY
);
175 raw_spin_unlock_irq(&sem
->wait_lock
);
177 /* wait to be given the lock */
179 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
185 tsk
->state
= TASK_RUNNING
;
191 * wait until we successfully acquire the write lock
193 struct rw_semaphore __sched
*rwsem_down_write_failed(struct rw_semaphore
*sem
)
195 long count
, adjustment
= -RWSEM_ACTIVE_WRITE_BIAS
;
196 struct rwsem_waiter waiter
;
197 struct task_struct
*tsk
= current
;
199 /* set up my own style of waitqueue */
201 waiter
.type
= RWSEM_WAITING_FOR_WRITE
;
203 raw_spin_lock_irq(&sem
->wait_lock
);
204 if (list_empty(&sem
->wait_list
))
205 adjustment
+= RWSEM_WAITING_BIAS
;
206 list_add_tail(&waiter
.list
, &sem
->wait_list
);
208 /* we're now waiting on the lock, but no longer actively locking */
209 count
= rwsem_atomic_update(adjustment
, sem
);
211 /* If there were already threads queued before us and there are no
212 * active writers, the lock must be read owned; so we try to wake
213 * any read locks that were queued ahead of us. */
214 if (count
> RWSEM_WAITING_BIAS
&&
215 adjustment
== -RWSEM_ACTIVE_WRITE_BIAS
)
216 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_READERS
);
218 /* wait until we successfully acquire the lock */
219 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
221 if (!(count
& RWSEM_ACTIVE_MASK
)) {
222 /* Try acquiring the write lock. */
223 count
= RWSEM_ACTIVE_WRITE_BIAS
;
224 if (!list_is_singular(&sem
->wait_list
))
225 count
+= RWSEM_WAITING_BIAS
;
227 if (sem
->count
== RWSEM_WAITING_BIAS
&&
228 cmpxchg(&sem
->count
, RWSEM_WAITING_BIAS
, count
) ==
233 raw_spin_unlock_irq(&sem
->wait_lock
);
235 /* Block until there are no active lockers. */
238 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
239 } while ((count
= sem
->count
) & RWSEM_ACTIVE_MASK
);
241 raw_spin_lock_irq(&sem
->wait_lock
);
244 list_del(&waiter
.list
);
245 raw_spin_unlock_irq(&sem
->wait_lock
);
246 tsk
->state
= TASK_RUNNING
;
252 * handle waking up a waiter on the semaphore
253 * - up_read/up_write has decremented the active part of count if we come here
255 struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
)
259 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
261 /* do nothing if list empty */
262 if (!list_empty(&sem
->wait_list
))
263 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_ANY
);
265 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
271 * downgrade a write lock into a read lock
272 * - caller incremented waiting part of count and discovered it still negative
273 * - just wake up any readers at the front of the queue
275 struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
279 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
281 /* do nothing if list empty */
282 if (!list_empty(&sem
->wait_list
))
283 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_READ_OWNED
);
285 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
290 EXPORT_SYMBOL(rwsem_down_read_failed
);
291 EXPORT_SYMBOL(rwsem_down_write_failed
);
292 EXPORT_SYMBOL(rwsem_wake
);
293 EXPORT_SYMBOL(rwsem_downgrade_wake
);