1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 * Initialize an rwsem:
14 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
15 struct lock_class_key
*key
)
17 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19 * Make sure we are not reinitializing a held semaphore:
21 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
22 lockdep_init_map(&sem
->dep_map
, name
, key
);
24 sem
->count
= RWSEM_UNLOCKED_VALUE
;
25 spin_lock_init(&sem
->wait_lock
);
26 INIT_LIST_HEAD(&sem
->wait_list
);
29 EXPORT_SYMBOL(__init_rwsem
);
32 struct list_head list
;
33 struct task_struct
*task
;
35 #define RWSEM_WAITING_FOR_READ 0x00000001
36 #define RWSEM_WAITING_FOR_WRITE 0x00000002
40 * handle the lock release when processes blocked on it that can now run
41 * - if we come here from up_xxxx(), then:
42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
44 * - there must be someone on the queue
45 * - the spinlock must be held by the caller
46 * - woken process blocks are discarded from the list after having task zeroed
47 * - writers are only woken if downgrading is false
49 static inline struct rw_semaphore
*
50 __rwsem_do_wake(struct rw_semaphore
*sem
, int downgrading
)
52 struct rwsem_waiter
*waiter
;
53 struct task_struct
*tsk
;
54 struct list_head
*next
;
55 signed long oldcount
, woken
, loop
;
58 goto dont_wake_writers
;
60 /* if we came through an up_xxxx() call, we only only wake someone up
61 * if we can transition the active part of the count from 0 -> 1
64 oldcount
= rwsem_atomic_update(RWSEM_ACTIVE_BIAS
, sem
)
66 if (oldcount
& RWSEM_ACTIVE_MASK
)
69 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
71 /* try to grant a single write lock if there's a writer at the front
72 * of the queue - note we leave the 'active part' of the count
73 * incremented by 1 and the waiting part incremented by 0x00010000
75 if (!(waiter
->flags
& RWSEM_WAITING_FOR_WRITE
))
78 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
79 * It is an allocated on the waiter's stack and may become invalid at
80 * any time after that point (due to a wakeup from another source).
82 list_del(&waiter
->list
);
90 /* don't want to wake any writers */
92 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
93 if (waiter
->flags
& RWSEM_WAITING_FOR_WRITE
)
96 /* grant an infinite number of read locks to the readers at the front
98 * - note we increment the 'active part' of the count by the number of
99 * readers before waking any processes up
106 if (waiter
->list
.next
== &sem
->wait_list
)
109 waiter
= list_entry(waiter
->list
.next
,
110 struct rwsem_waiter
, list
);
112 } while (waiter
->flags
& RWSEM_WAITING_FOR_READ
);
115 woken
*= RWSEM_ACTIVE_BIAS
- RWSEM_WAITING_BIAS
;
117 /* we'd already done one increment earlier */
118 woken
-= RWSEM_ACTIVE_BIAS
;
120 rwsem_atomic_add(woken
, sem
);
122 next
= sem
->wait_list
.next
;
123 for (; loop
> 0; loop
--) {
124 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
125 next
= waiter
->list
.next
;
129 wake_up_process(tsk
);
130 put_task_struct(tsk
);
133 sem
->wait_list
.next
= next
;
134 next
->prev
= &sem
->wait_list
;
139 /* undo the change to count, but check for a transition 1->0 */
141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS
, sem
) != 0)
147 * wait for a lock to be granted
149 static struct rw_semaphore
*
150 rwsem_down_failed_common(struct rw_semaphore
*sem
,
151 struct rwsem_waiter
*waiter
, signed long adjustment
)
153 struct task_struct
*tsk
= current
;
156 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
158 /* set up my own style of waitqueue */
159 spin_lock_irq(&sem
->wait_lock
);
161 get_task_struct(tsk
);
163 list_add_tail(&waiter
->list
, &sem
->wait_list
);
165 /* we're now waiting on the lock, but no longer actively read-locking */
166 count
= rwsem_atomic_update(adjustment
, sem
);
168 /* if there are no active locks, wake the front queued process(es) up */
169 if (!(count
& RWSEM_ACTIVE_MASK
))
170 sem
= __rwsem_do_wake(sem
, 0);
172 spin_unlock_irq(&sem
->wait_lock
);
174 /* wait to be given the lock */
179 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
182 tsk
->state
= TASK_RUNNING
;
188 * wait for the read lock to be granted
190 struct rw_semaphore fastcall __sched
*
191 rwsem_down_read_failed(struct rw_semaphore
*sem
)
193 struct rwsem_waiter waiter
;
195 waiter
.flags
= RWSEM_WAITING_FOR_READ
;
196 rwsem_down_failed_common(sem
, &waiter
,
197 RWSEM_WAITING_BIAS
- RWSEM_ACTIVE_BIAS
);
202 * wait for the write lock to be granted
204 struct rw_semaphore fastcall __sched
*
205 rwsem_down_write_failed(struct rw_semaphore
*sem
)
207 struct rwsem_waiter waiter
;
209 waiter
.flags
= RWSEM_WAITING_FOR_WRITE
;
210 rwsem_down_failed_common(sem
, &waiter
, -RWSEM_ACTIVE_BIAS
);
216 * handle waking up a waiter on the semaphore
217 * - up_read/up_write has decremented the active part of count if we come here
219 struct rw_semaphore fastcall
*rwsem_wake(struct rw_semaphore
*sem
)
223 spin_lock_irqsave(&sem
->wait_lock
, flags
);
225 /* do nothing if list empty */
226 if (!list_empty(&sem
->wait_list
))
227 sem
= __rwsem_do_wake(sem
, 0);
229 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
235 * downgrade a write lock into a read lock
236 * - caller incremented waiting part of count and discovered it still negative
237 * - just wake up any readers at the front of the queue
239 struct rw_semaphore fastcall
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
243 spin_lock_irqsave(&sem
->wait_lock
, flags
);
245 /* do nothing if list empty */
246 if (!list_empty(&sem
->wait_list
))
247 sem
= __rwsem_do_wake(sem
, 1);
249 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
254 EXPORT_SYMBOL(rwsem_down_read_failed
);
255 EXPORT_SYMBOL(rwsem_down_write_failed
);
256 EXPORT_SYMBOL(rwsem_wake
);
257 EXPORT_SYMBOL(rwsem_downgrade_wake
);