1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 struct list_head list
;
13 struct task_struct
*task
;
15 #define RWSEM_WAITING_FOR_READ 0x00000001
16 #define RWSEM_WAITING_FOR_WRITE 0x00000002
21 void rwsemtrace(struct rw_semaphore
*sem
, const char *str
)
23 printk("sem=%p\n", sem
);
24 printk("(sem)=%08lx\n", sem
->count
);
26 printk("[%d] %s({%08lx})\n", current
->pid
, str
, sem
->count
);
31 * handle the lock release when processes blocked on it that can now run
32 * - if we come here from up_xxxx(), then:
33 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
34 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
35 * - there must be someone on the queue
36 * - the spinlock must be held by the caller
37 * - woken process blocks are discarded from the list after having task zeroed
38 * - writers are only woken if downgrading is false
40 static inline struct rw_semaphore
*
41 __rwsem_do_wake(struct rw_semaphore
*sem
, int downgrading
)
43 struct rwsem_waiter
*waiter
;
44 struct task_struct
*tsk
;
45 struct list_head
*next
;
46 signed long oldcount
, woken
, loop
;
48 rwsemtrace(sem
, "Entering __rwsem_do_wake");
51 goto dont_wake_writers
;
53 /* if we came through an up_xxxx() call, we only only wake someone up
54 * if we can transition the active part of the count from 0 -> 1
57 oldcount
= rwsem_atomic_update(RWSEM_ACTIVE_BIAS
, sem
)
59 if (oldcount
& RWSEM_ACTIVE_MASK
)
62 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
64 /* try to grant a single write lock if there's a writer at the front
65 * of the queue - note we leave the 'active part' of the count
66 * incremented by 1 and the waiting part incremented by 0x00010000
68 if (!(waiter
->flags
& RWSEM_WAITING_FOR_WRITE
))
71 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
72 * It is an allocated on the waiter's stack and may become invalid at
73 * any time after that point (due to a wakeup from another source).
75 list_del(&waiter
->list
);
83 /* don't want to wake any writers */
85 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
86 if (waiter
->flags
& RWSEM_WAITING_FOR_WRITE
)
89 /* grant an infinite number of read locks to the readers at the front
91 * - note we increment the 'active part' of the count by the number of
92 * readers before waking any processes up
99 if (waiter
->list
.next
== &sem
->wait_list
)
102 waiter
= list_entry(waiter
->list
.next
,
103 struct rwsem_waiter
, list
);
105 } while (waiter
->flags
& RWSEM_WAITING_FOR_READ
);
108 woken
*= RWSEM_ACTIVE_BIAS
- RWSEM_WAITING_BIAS
;
110 /* we'd already done one increment earlier */
111 woken
-= RWSEM_ACTIVE_BIAS
;
113 rwsem_atomic_add(woken
, sem
);
115 next
= sem
->wait_list
.next
;
116 for (; loop
> 0; loop
--) {
117 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
118 next
= waiter
->list
.next
;
122 wake_up_process(tsk
);
123 put_task_struct(tsk
);
126 sem
->wait_list
.next
= next
;
127 next
->prev
= &sem
->wait_list
;
130 rwsemtrace(sem
, "Leaving __rwsem_do_wake");
133 /* undo the change to count, but check for a transition 1->0 */
135 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS
, sem
) != 0)
141 * wait for a lock to be granted
143 static inline struct rw_semaphore
*
144 rwsem_down_failed_common(struct rw_semaphore
*sem
,
145 struct rwsem_waiter
*waiter
, signed long adjustment
)
147 struct task_struct
*tsk
= current
;
150 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
152 /* set up my own style of waitqueue */
153 spin_lock_irq(&sem
->wait_lock
);
155 get_task_struct(tsk
);
157 list_add_tail(&waiter
->list
, &sem
->wait_list
);
159 /* we're now waiting on the lock, but no longer actively read-locking */
160 count
= rwsem_atomic_update(adjustment
, sem
);
162 /* if there are no active locks, wake the front queued process(es) up */
163 if (!(count
& RWSEM_ACTIVE_MASK
))
164 sem
= __rwsem_do_wake(sem
, 0);
166 spin_unlock_irq(&sem
->wait_lock
);
168 /* wait to be given the lock */
173 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
176 tsk
->state
= TASK_RUNNING
;
182 * wait for the read lock to be granted
184 struct rw_semaphore fastcall __sched
*
185 rwsem_down_read_failed(struct rw_semaphore
*sem
)
187 struct rwsem_waiter waiter
;
189 rwsemtrace(sem
, "Entering rwsem_down_read_failed");
191 waiter
.flags
= RWSEM_WAITING_FOR_READ
;
192 rwsem_down_failed_common(sem
, &waiter
,
193 RWSEM_WAITING_BIAS
- RWSEM_ACTIVE_BIAS
);
195 rwsemtrace(sem
, "Leaving rwsem_down_read_failed");
200 * wait for the write lock to be granted
202 struct rw_semaphore fastcall __sched
*
203 rwsem_down_write_failed(struct rw_semaphore
*sem
)
205 struct rwsem_waiter waiter
;
207 rwsemtrace(sem
, "Entering rwsem_down_write_failed");
209 waiter
.flags
= RWSEM_WAITING_FOR_WRITE
;
210 rwsem_down_failed_common(sem
, &waiter
, -RWSEM_ACTIVE_BIAS
);
212 rwsemtrace(sem
, "Leaving rwsem_down_write_failed");
217 * handle waking up a waiter on the semaphore
218 * - up_read/up_write has decremented the active part of count if we come here
220 struct rw_semaphore fastcall
*rwsem_wake(struct rw_semaphore
*sem
)
224 rwsemtrace(sem
, "Entering rwsem_wake");
226 spin_lock_irqsave(&sem
->wait_lock
, flags
);
228 /* do nothing if list empty */
229 if (!list_empty(&sem
->wait_list
))
230 sem
= __rwsem_do_wake(sem
, 0);
232 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
234 rwsemtrace(sem
, "Leaving rwsem_wake");
240 * downgrade a write lock into a read lock
241 * - caller incremented waiting part of count and discovered it still negative
242 * - just wake up any readers at the front of the queue
244 struct rw_semaphore fastcall
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
248 rwsemtrace(sem
, "Entering rwsem_downgrade_wake");
250 spin_lock_irqsave(&sem
->wait_lock
, flags
);
252 /* do nothing if list empty */
253 if (!list_empty(&sem
->wait_list
))
254 sem
= __rwsem_do_wake(sem
, 1);
256 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
258 rwsemtrace(sem
, "Leaving rwsem_downgrade_wake");
262 EXPORT_SYMBOL(rwsem_down_read_failed
);
263 EXPORT_SYMBOL(rwsem_down_write_failed
);
264 EXPORT_SYMBOL(rwsem_wake
);
265 EXPORT_SYMBOL(rwsem_downgrade_wake
);
267 EXPORT_SYMBOL(rwsemtrace
);