ARM: 7668/1: fix memset-related crashes caused by recent GCC (4.7.2) optimizations
[linux-2.6.git] / lib / rwsem-spinlock.c
blob7542afbb22b368c9c693408b9233421b6a89746b
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/export.h>
12 struct rwsem_waiter {
13 struct list_head list;
14 struct task_struct *task;
15 unsigned int flags;
16 #define RWSEM_WAITING_FOR_READ 0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
20 int rwsem_is_locked(struct rw_semaphore *sem)
22 int ret = 1;
23 unsigned long flags;
25 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0);
27 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
29 return ret;
31 EXPORT_SYMBOL(rwsem_is_locked);
34 * initialise the semaphore
36 void __init_rwsem(struct rw_semaphore *sem, const char *name,
37 struct lock_class_key *key)
39 #ifdef CONFIG_DEBUG_LOCK_ALLOC
41 * Make sure we are not reinitializing a held semaphore:
43 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
44 lockdep_init_map(&sem->dep_map, name, key, 0);
45 #endif
46 sem->activity = 0;
47 raw_spin_lock_init(&sem->wait_lock);
48 INIT_LIST_HEAD(&sem->wait_list);
50 EXPORT_SYMBOL(__init_rwsem);
53 * handle the lock release when processes blocked on it that can now run
54 * - if we come here, then:
55 * - the 'active count' _reached_ zero
56 * - the 'waiting count' is non-zero
57 * - the spinlock must be held by the caller
58 * - woken process blocks are discarded from the list after having task zeroed
59 * - writers are only woken if wakewrite is non-zero
61 static inline struct rw_semaphore *
62 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
64 struct rwsem_waiter *waiter;
65 struct task_struct *tsk;
66 int woken;
68 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70 if (!wakewrite) {
71 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
72 goto out;
73 goto dont_wake_writers;
77 * as we support write lock stealing, we can't set sem->activity
78 * to -1 here to indicate we get the lock. Instead, we wake it up
79 * to let it go get it again.
81 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
82 wake_up_process(waiter->task);
83 goto out;
86 /* grant an infinite number of read locks to the front of the queue */
87 dont_wake_writers:
88 woken = 0;
89 while (waiter->flags & RWSEM_WAITING_FOR_READ) {
90 struct list_head *next = waiter->list.next;
92 list_del(&waiter->list);
93 tsk = waiter->task;
94 smp_mb();
95 waiter->task = NULL;
96 wake_up_process(tsk);
97 put_task_struct(tsk);
98 woken++;
99 if (list_empty(&sem->wait_list))
100 break;
101 waiter = list_entry(next, struct rwsem_waiter, list);
104 sem->activity += woken;
106 out:
107 return sem;
111 * wake a single writer
113 static inline struct rw_semaphore *
114 __rwsem_wake_one_writer(struct rw_semaphore *sem)
116 struct rwsem_waiter *waiter;
118 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
119 wake_up_process(waiter->task);
121 return sem;
125 * get a read lock on the semaphore
127 void __sched __down_read(struct rw_semaphore *sem)
129 struct rwsem_waiter waiter;
130 struct task_struct *tsk;
131 unsigned long flags;
133 raw_spin_lock_irqsave(&sem->wait_lock, flags);
135 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
136 /* granted */
137 sem->activity++;
138 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
139 goto out;
142 tsk = current;
143 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
145 /* set up my own style of waitqueue */
146 waiter.task = tsk;
147 waiter.flags = RWSEM_WAITING_FOR_READ;
148 get_task_struct(tsk);
150 list_add_tail(&waiter.list, &sem->wait_list);
152 /* we don't need to touch the semaphore struct anymore */
153 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
155 /* wait to be given the lock */
156 for (;;) {
157 if (!waiter.task)
158 break;
159 schedule();
160 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
163 tsk->state = TASK_RUNNING;
164 out:
169 * trylock for reading -- returns 1 if successful, 0 if contention
171 int __down_read_trylock(struct rw_semaphore *sem)
173 unsigned long flags;
174 int ret = 0;
177 raw_spin_lock_irqsave(&sem->wait_lock, flags);
179 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
180 /* granted */
181 sem->activity++;
182 ret = 1;
185 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
187 return ret;
191 * get a write lock on the semaphore
193 void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
195 struct rwsem_waiter waiter;
196 struct task_struct *tsk;
197 unsigned long flags;
199 raw_spin_lock_irqsave(&sem->wait_lock, flags);
201 /* set up my own style of waitqueue */
202 tsk = current;
203 waiter.task = tsk;
204 waiter.flags = RWSEM_WAITING_FOR_WRITE;
205 list_add_tail(&waiter.list, &sem->wait_list);
207 /* wait for someone to release the lock */
208 for (;;) {
210 * That is the key to support write lock stealing: allows the
211 * task already on CPU to get the lock soon rather than put
212 * itself into sleep and waiting for system woke it or someone
213 * else in the head of the wait list up.
215 if (sem->activity == 0)
216 break;
217 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
218 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
219 schedule();
220 raw_spin_lock_irqsave(&sem->wait_lock, flags);
222 /* got the lock */
223 sem->activity = -1;
224 list_del(&waiter.list);
226 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
229 void __sched __down_write(struct rw_semaphore *sem)
231 __down_write_nested(sem, 0);
235 * trylock for writing -- returns 1 if successful, 0 if contention
237 int __down_write_trylock(struct rw_semaphore *sem)
239 unsigned long flags;
240 int ret = 0;
242 raw_spin_lock_irqsave(&sem->wait_lock, flags);
244 if (sem->activity == 0) {
245 /* got the lock */
246 sem->activity = -1;
247 ret = 1;
250 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
252 return ret;
256 * release a read lock on the semaphore
258 void __up_read(struct rw_semaphore *sem)
260 unsigned long flags;
262 raw_spin_lock_irqsave(&sem->wait_lock, flags);
264 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
265 sem = __rwsem_wake_one_writer(sem);
267 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
271 * release a write lock on the semaphore
273 void __up_write(struct rw_semaphore *sem)
275 unsigned long flags;
277 raw_spin_lock_irqsave(&sem->wait_lock, flags);
279 sem->activity = 0;
280 if (!list_empty(&sem->wait_list))
281 sem = __rwsem_do_wake(sem, 1);
283 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
287 * downgrade a write lock into a read lock
288 * - just wake up any readers at the front of the queue
290 void __downgrade_write(struct rw_semaphore *sem)
292 unsigned long flags;
294 raw_spin_lock_irqsave(&sem->wait_lock, flags);
296 sem->activity = 1;
297 if (!list_empty(&sem->wait_list))
298 sem = __rwsem_do_wake(sem, 0);
300 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);