1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
13 struct list_head list
;
14 struct task_struct
*task
;
16 #define RWSEM_WAITING_FOR_READ 0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
21 * initialise the semaphore
23 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
24 struct lock_class_key
*key
)
26 #ifdef CONFIG_DEBUG_LOCK_ALLOC
28 * Make sure we are not reinitializing a held semaphore:
30 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
31 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
34 spin_lock_init(&sem
->wait_lock
);
35 INIT_LIST_HEAD(&sem
->wait_list
);
39 * handle the lock release when processes blocked on it that can now run
40 * - if we come here, then:
41 * - the 'active count' _reached_ zero
42 * - the 'waiting count' is non-zero
43 * - the spinlock must be held by the caller
44 * - woken process blocks are discarded from the list after having task zeroed
45 * - writers are only woken if wakewrite is non-zero
47 static inline struct rw_semaphore
*
48 __rwsem_do_wake(struct rw_semaphore
*sem
, int wakewrite
)
50 struct rwsem_waiter
*waiter
;
51 struct task_struct
*tsk
;
54 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
57 if (waiter
->flags
& RWSEM_WAITING_FOR_WRITE
)
59 goto dont_wake_writers
;
62 /* if we are allowed to wake writers try to grant a single write lock
63 * if there's a writer at the front of the queue
64 * - we leave the 'waiting count' incremented to signify potential
67 if (waiter
->flags
& RWSEM_WAITING_FOR_WRITE
) {
69 list_del(&waiter
->list
);
71 /* Don't touch waiter after ->task has been NULLed */
79 /* grant an infinite number of read locks to the front of the queue */
82 while (waiter
->flags
& RWSEM_WAITING_FOR_READ
) {
83 struct list_head
*next
= waiter
->list
.next
;
85 list_del(&waiter
->list
);
92 if (list_empty(&sem
->wait_list
))
94 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
97 sem
->activity
+= woken
;
104 * wake a single writer
106 static inline struct rw_semaphore
*
107 __rwsem_wake_one_writer(struct rw_semaphore
*sem
)
109 struct rwsem_waiter
*waiter
;
110 struct task_struct
*tsk
;
114 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
115 list_del(&waiter
->list
);
120 wake_up_process(tsk
);
121 put_task_struct(tsk
);
126 * get a read lock on the semaphore
128 void __sched
__down_read(struct rw_semaphore
*sem
)
130 struct rwsem_waiter waiter
;
131 struct task_struct
*tsk
;
133 spin_lock_irq(&sem
->wait_lock
);
135 if (sem
->activity
>= 0 && list_empty(&sem
->wait_list
)) {
138 spin_unlock_irq(&sem
->wait_lock
);
143 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
145 /* set up my own style of waitqueue */
147 waiter
.flags
= RWSEM_WAITING_FOR_READ
;
148 get_task_struct(tsk
);
150 list_add_tail(&waiter
.list
, &sem
->wait_list
);
152 /* we don't need to touch the semaphore struct anymore */
153 spin_unlock_irq(&sem
->wait_lock
);
155 /* wait to be given the lock */
160 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
163 tsk
->state
= TASK_RUNNING
;
169 * trylock for reading -- returns 1 if successful, 0 if contention
171 int __down_read_trylock(struct rw_semaphore
*sem
)
177 spin_lock_irqsave(&sem
->wait_lock
, flags
);
179 if (sem
->activity
>= 0 && list_empty(&sem
->wait_list
)) {
185 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
191 * get a write lock on the semaphore
192 * - we increment the waiting count anyway to indicate an exclusive lock
194 void __sched
__down_write_nested(struct rw_semaphore
*sem
, int subclass
)
196 struct rwsem_waiter waiter
;
197 struct task_struct
*tsk
;
199 spin_lock_irq(&sem
->wait_lock
);
201 if (sem
->activity
== 0 && list_empty(&sem
->wait_list
)) {
204 spin_unlock_irq(&sem
->wait_lock
);
209 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
211 /* set up my own style of waitqueue */
213 waiter
.flags
= RWSEM_WAITING_FOR_WRITE
;
214 get_task_struct(tsk
);
216 list_add_tail(&waiter
.list
, &sem
->wait_list
);
218 /* we don't need to touch the semaphore struct anymore */
219 spin_unlock_irq(&sem
->wait_lock
);
221 /* wait to be given the lock */
226 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
229 tsk
->state
= TASK_RUNNING
;
234 void __sched
__down_write(struct rw_semaphore
*sem
)
236 __down_write_nested(sem
, 0);
240 * trylock for writing -- returns 1 if successful, 0 if contention
242 int __down_write_trylock(struct rw_semaphore
*sem
)
247 spin_lock_irqsave(&sem
->wait_lock
, flags
);
249 if (sem
->activity
== 0 && list_empty(&sem
->wait_list
)) {
255 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
261 * release a read lock on the semaphore
263 void __up_read(struct rw_semaphore
*sem
)
267 spin_lock_irqsave(&sem
->wait_lock
, flags
);
269 if (--sem
->activity
== 0 && !list_empty(&sem
->wait_list
))
270 sem
= __rwsem_wake_one_writer(sem
);
272 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
276 * release a write lock on the semaphore
278 void __up_write(struct rw_semaphore
*sem
)
282 spin_lock_irqsave(&sem
->wait_lock
, flags
);
285 if (!list_empty(&sem
->wait_list
))
286 sem
= __rwsem_do_wake(sem
, 1);
288 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
292 * downgrade a write lock into a read lock
293 * - just wake up any readers at the front of the queue
295 void __downgrade_write(struct rw_semaphore
*sem
)
299 spin_lock_irqsave(&sem
->wait_lock
, flags
);
302 if (!list_empty(&sem
->wait_list
))
303 sem
= __rwsem_do_wake(sem
, 0);
305 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
308 EXPORT_SYMBOL(__init_rwsem
);
309 EXPORT_SYMBOL(__down_read
);
310 EXPORT_SYMBOL(__down_read_trylock
);
311 EXPORT_SYMBOL(__down_write_nested
);
312 EXPORT_SYMBOL(__down_write
);
313 EXPORT_SYMBOL(__down_write_trylock
);
314 EXPORT_SYMBOL(__up_read
);
315 EXPORT_SYMBOL(__up_write
);
316 EXPORT_SYMBOL(__downgrade_write
);