1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
14 * Initialize an rwsem:
16 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
17 struct lock_class_key
*key
)
19 #ifdef CONFIG_DEBUG_LOCK_ALLOC
21 * Make sure we are not reinitializing a held semaphore:
23 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
24 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
26 sem
->count
= RWSEM_UNLOCKED_VALUE
;
27 raw_spin_lock_init(&sem
->wait_lock
);
28 INIT_LIST_HEAD(&sem
->wait_list
);
31 EXPORT_SYMBOL(__init_rwsem
);
33 enum rwsem_waiter_type
{
34 RWSEM_WAITING_FOR_WRITE
,
35 RWSEM_WAITING_FOR_READ
39 struct list_head list
;
40 struct task_struct
*task
;
41 enum rwsem_waiter_type type
;
44 /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and
45 * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
46 * since the rwsem value was observed.
48 #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */
49 #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
50 #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
53 * handle the lock release when processes blocked on it that can now run
54 * - if we come here from up_xxxx(), then:
55 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
56 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
57 * - there must be someone on the queue
58 * - the spinlock must be held by the caller
59 * - woken process blocks are discarded from the list after having task zeroed
60 * - writers are only woken if downgrading is false
62 static struct rw_semaphore
*
63 __rwsem_do_wake(struct rw_semaphore
*sem
, int wake_type
)
65 struct rwsem_waiter
*waiter
;
66 struct task_struct
*tsk
;
67 struct list_head
*next
;
68 signed long woken
, loop
, adjustment
;
70 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
71 if (waiter
->type
!= RWSEM_WAITING_FOR_WRITE
)
74 if (wake_type
== RWSEM_WAKE_READ_OWNED
)
75 /* Another active reader was observed, so wakeup is not
76 * likely to succeed. Save the atomic op.
80 /* Wake up the writing waiter and let the task grab the sem: */
81 wake_up_process(waiter
->task
);
85 /* If we come here from up_xxxx(), another thread might have reached
86 * rwsem_down_failed_common() before we acquired the spinlock and
87 * woken up a waiter, making it now active. We prefer to check for
88 * this first in order to not spend too much time with the spinlock
89 * held if we're not going to be able to wake up readers in the end.
91 * Note that we do not need to update the rwsem count: any writer
92 * trying to acquire rwsem will run rwsem_down_write_failed() due
93 * to the waiting threads and block trying to acquire the spinlock.
95 * We use a dummy atomic update in order to acquire the cache line
96 * exclusively since we expect to succeed and run the final rwsem
97 * count adjustment pretty soon.
99 if (wake_type
== RWSEM_WAKE_ANY
&&
100 rwsem_atomic_update(0, sem
) < RWSEM_WAITING_BIAS
)
101 /* Someone grabbed the sem for write already */
104 /* Grant an infinite number of read locks to the readers at the front
105 * of the queue. Note we increment the 'active part' of the count by
106 * the number of readers before waking any processes up.
112 if (waiter
->list
.next
== &sem
->wait_list
)
115 waiter
= list_entry(waiter
->list
.next
,
116 struct rwsem_waiter
, list
);
118 } while (waiter
->type
!= RWSEM_WAITING_FOR_WRITE
);
120 adjustment
= woken
* RWSEM_ACTIVE_READ_BIAS
;
121 if (waiter
->type
!= RWSEM_WAITING_FOR_WRITE
)
122 /* hit end of list above */
123 adjustment
-= RWSEM_WAITING_BIAS
;
125 rwsem_atomic_add(adjustment
, sem
);
127 next
= sem
->wait_list
.next
;
128 for (loop
= woken
; loop
> 0; loop
--) {
129 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
130 next
= waiter
->list
.next
;
134 wake_up_process(tsk
);
135 put_task_struct(tsk
);
138 sem
->wait_list
.next
= next
;
139 next
->prev
= &sem
->wait_list
;
146 * wait for the read lock to be granted
148 struct rw_semaphore __sched
*rwsem_down_read_failed(struct rw_semaphore
*sem
)
150 signed long adjustment
= -RWSEM_ACTIVE_READ_BIAS
;
151 struct rwsem_waiter waiter
;
152 struct task_struct
*tsk
= current
;
155 /* set up my own style of waitqueue */
157 waiter
.type
= RWSEM_WAITING_FOR_READ
;
158 get_task_struct(tsk
);
160 raw_spin_lock_irq(&sem
->wait_lock
);
161 if (list_empty(&sem
->wait_list
))
162 adjustment
+= RWSEM_WAITING_BIAS
;
163 list_add_tail(&waiter
.list
, &sem
->wait_list
);
165 /* we're now waiting on the lock, but no longer actively locking */
166 count
= rwsem_atomic_update(adjustment
, sem
);
168 /* If there are no active locks, wake the front queued process(es). */
169 if (count
== RWSEM_WAITING_BIAS
)
170 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_NO_ACTIVE
);
172 raw_spin_unlock_irq(&sem
->wait_lock
);
174 /* wait to be given the lock */
176 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
182 tsk
->state
= TASK_RUNNING
;
188 * wait until we successfully acquire the write lock
190 struct rw_semaphore __sched
*rwsem_down_write_failed(struct rw_semaphore
*sem
)
192 signed long adjustment
= -RWSEM_ACTIVE_WRITE_BIAS
;
193 struct rwsem_waiter waiter
;
194 struct task_struct
*tsk
= current
;
197 /* set up my own style of waitqueue */
199 waiter
.type
= RWSEM_WAITING_FOR_WRITE
;
201 raw_spin_lock_irq(&sem
->wait_lock
);
202 if (list_empty(&sem
->wait_list
))
203 adjustment
+= RWSEM_WAITING_BIAS
;
204 list_add_tail(&waiter
.list
, &sem
->wait_list
);
206 /* we're now waiting on the lock, but no longer actively locking */
207 count
= rwsem_atomic_update(adjustment
, sem
);
209 /* If there were already threads queued before us and there are no
210 * active writers, the lock must be read owned; so we try to wake
211 * any read locks that were queued ahead of us. */
212 if (count
> RWSEM_WAITING_BIAS
&&
213 adjustment
== -RWSEM_ACTIVE_WRITE_BIAS
)
214 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_READ_OWNED
);
216 /* wait until we successfully acquire the lock */
217 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
219 if (!(count
& RWSEM_ACTIVE_MASK
)) {
220 /* Try acquiring the write lock. */
221 count
= RWSEM_ACTIVE_WRITE_BIAS
;
222 if (!list_is_singular(&sem
->wait_list
))
223 count
+= RWSEM_WAITING_BIAS
;
224 if (cmpxchg(&sem
->count
, RWSEM_WAITING_BIAS
, count
) ==
229 raw_spin_unlock_irq(&sem
->wait_lock
);
231 /* Block until there are no active lockers. */
234 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
235 } while ((count
= sem
->count
) & RWSEM_ACTIVE_MASK
);
237 raw_spin_lock_irq(&sem
->wait_lock
);
240 list_del(&waiter
.list
);
241 raw_spin_unlock_irq(&sem
->wait_lock
);
242 tsk
->state
= TASK_RUNNING
;
248 * handle waking up a waiter on the semaphore
249 * - up_read/up_write has decremented the active part of count if we come here
251 struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
)
255 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
257 /* do nothing if list empty */
258 if (!list_empty(&sem
->wait_list
))
259 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_ANY
);
261 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
267 * downgrade a write lock into a read lock
268 * - caller incremented waiting part of count and discovered it still negative
269 * - just wake up any readers at the front of the queue
271 struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
275 raw_spin_lock_irqsave(&sem
->wait_lock
, flags
);
277 /* do nothing if list empty */
278 if (!list_empty(&sem
->wait_list
))
279 sem
= __rwsem_do_wake(sem
, RWSEM_WAKE_READ_OWNED
);
281 raw_spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
286 EXPORT_SYMBOL(rwsem_down_read_failed
);
287 EXPORT_SYMBOL(rwsem_down_write_failed
);
288 EXPORT_SYMBOL(rwsem_wake
);
289 EXPORT_SYMBOL(rwsem_downgrade_wake
);