1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
11 struct list_head list
;
12 struct task_struct
*task
;
14 #define RWSEM_WAITING_FOR_READ 0x00000001
15 #define RWSEM_WAITING_FOR_WRITE 0x00000002
20 void rwsemtrace(struct rw_semaphore
*sem
, const char *str
)
22 printk("sem=%p\n",sem
);
23 printk("(sem)=%08lx\n",sem
->count
);
25 printk("[%d] %s({%08lx})\n",current
->pid
,str
,sem
->count
);
30 * handle the lock being released whilst there are processes blocked on it that can now run
31 * - if we come here, then:
32 * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
33 * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
34 * - there must be someone on the queue
35 * - the spinlock must be held by the caller
36 * - woken process blocks are discarded from the list after having flags zeroised
37 * - writers are only woken if wakewrite is non-zero
39 static inline struct rw_semaphore
*__rwsem_do_wake(struct rw_semaphore
*sem
, int wakewrite
)
41 struct rwsem_waiter
*waiter
;
42 struct list_head
*next
;
46 rwsemtrace(sem
,"Entering __rwsem_do_wake");
49 goto dont_wake_writers
;
51 /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
53 oldcount
= rwsem_atomic_update(RWSEM_ACTIVE_BIAS
,sem
) - RWSEM_ACTIVE_BIAS
;
54 if (oldcount
& RWSEM_ACTIVE_MASK
)
57 waiter
= list_entry(sem
->wait_list
.next
,struct rwsem_waiter
,list
);
59 /* try to grant a single write lock if there's a writer at the front of the queue
60 * - note we leave the 'active part' of the count incremented by 1 and the waiting part
61 * incremented by 0x00010000
63 if (!(waiter
->flags
& RWSEM_WAITING_FOR_WRITE
))
66 list_del(&waiter
->list
);
68 wake_up_process(waiter
->task
);
71 /* don't want to wake any writers */
73 waiter
= list_entry(sem
->wait_list
.next
,struct rwsem_waiter
,list
);
74 if (waiter
->flags
& RWSEM_WAITING_FOR_WRITE
)
77 /* grant an infinite number of read locks to the readers at the front of the queue
78 * - note we increment the 'active part' of the count by the number of readers (less one
79 * for the activity decrement we've already done) before waking any processes up
86 if (waiter
->list
.next
==&sem
->wait_list
)
89 waiter
= list_entry(waiter
->list
.next
,struct rwsem_waiter
,list
);
91 } while (waiter
->flags
& RWSEM_WAITING_FOR_READ
);
94 woken
*= RWSEM_ACTIVE_BIAS
-RWSEM_WAITING_BIAS
;
95 woken
-= RWSEM_ACTIVE_BIAS
;
96 rwsem_atomic_add(woken
,sem
);
98 next
= sem
->wait_list
.next
;
99 for (; loop
>0; loop
--) {
100 waiter
= list_entry(next
,struct rwsem_waiter
,list
);
101 next
= waiter
->list
.next
;
103 wake_up_process(waiter
->task
);
106 sem
->wait_list
.next
= next
;
107 next
->prev
= &sem
->wait_list
;
110 rwsemtrace(sem
,"Leaving __rwsem_do_wake");
113 /* undo the change to count, but check for a transition 1->0 */
115 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS
,sem
)!=0)
121 * wait for a lock to be granted
123 static inline struct rw_semaphore
*rwsem_down_failed_common(struct rw_semaphore
*sem
,
124 struct rwsem_waiter
*waiter
,
125 signed long adjustment
)
127 struct task_struct
*tsk
= current
;
130 set_task_state(tsk
,TASK_UNINTERRUPTIBLE
);
132 /* set up my own style of waitqueue */
133 spin_lock(&sem
->wait_lock
);
136 list_add_tail(&waiter
->list
,&sem
->wait_list
);
138 /* note that we're now waiting on the lock, but no longer actively read-locking */
139 count
= rwsem_atomic_update(adjustment
,sem
);
141 /* if there are no longer active locks, wake the front queued process(es) up
142 * - it might even be this process, since the waker takes a more active part
144 if (!(count
& RWSEM_ACTIVE_MASK
))
145 sem
= __rwsem_do_wake(sem
,1);
147 spin_unlock(&sem
->wait_lock
);
149 /* wait to be given the lock */
154 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
157 tsk
->state
= TASK_RUNNING
;
163 * wait for the read lock to be granted
165 struct rw_semaphore fastcall
*rwsem_down_read_failed(struct rw_semaphore
*sem
)
167 struct rwsem_waiter waiter
;
169 rwsemtrace(sem
,"Entering rwsem_down_read_failed");
171 waiter
.flags
= RWSEM_WAITING_FOR_READ
;
172 rwsem_down_failed_common(sem
,&waiter
,RWSEM_WAITING_BIAS
-RWSEM_ACTIVE_BIAS
);
174 rwsemtrace(sem
,"Leaving rwsem_down_read_failed");
179 * wait for the write lock to be granted
181 struct rw_semaphore fastcall
*rwsem_down_write_failed(struct rw_semaphore
*sem
)
183 struct rwsem_waiter waiter
;
185 rwsemtrace(sem
,"Entering rwsem_down_write_failed");
187 waiter
.flags
= RWSEM_WAITING_FOR_WRITE
;
188 rwsem_down_failed_common(sem
,&waiter
,-RWSEM_ACTIVE_BIAS
);
190 rwsemtrace(sem
,"Leaving rwsem_down_write_failed");
195 * handle waking up a waiter on the semaphore
196 * - up_read has decremented the active part of the count if we come here
198 struct rw_semaphore fastcall
*rwsem_wake(struct rw_semaphore
*sem
)
200 rwsemtrace(sem
,"Entering rwsem_wake");
202 spin_lock(&sem
->wait_lock
);
204 /* do nothing if list empty */
205 if (!list_empty(&sem
->wait_list
))
206 sem
= __rwsem_do_wake(sem
,1);
208 spin_unlock(&sem
->wait_lock
);
210 rwsemtrace(sem
,"Leaving rwsem_wake");
216 * downgrade a write lock into a read lock
217 * - caller incremented waiting part of count, and discovered it to be still negative
218 * - just wake up any readers at the front of the queue
220 struct rw_semaphore fastcall
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
222 rwsemtrace(sem
,"Entering rwsem_downgrade_wake");
224 spin_lock(&sem
->wait_lock
);
226 /* do nothing if list empty */
227 if (!list_empty(&sem
->wait_list
))
228 sem
= __rwsem_do_wake(sem
,0);
230 spin_unlock(&sem
->wait_lock
);
232 rwsemtrace(sem
,"Leaving rwsem_downgrade_wake");
236 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed
);
237 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed
);
238 EXPORT_SYMBOL_NOVERS(rwsem_wake
);
239 EXPORT_SYMBOL_NOVERS(rwsem_downgrade_wake
);
241 EXPORT_SYMBOL(rwsemtrace
);