2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
13 void init_waitqueue_head(wait_queue_head_t
*q
)
15 spin_lock_init(&q
->lock
);
16 INIT_LIST_HEAD(&q
->task_list
);
19 EXPORT_SYMBOL(init_waitqueue_head
);
21 void add_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
25 wait
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
26 spin_lock_irqsave(&q
->lock
, flags
);
27 __add_wait_queue(q
, wait
);
28 spin_unlock_irqrestore(&q
->lock
, flags
);
30 EXPORT_SYMBOL(add_wait_queue
);
32 void add_wait_queue_exclusive(wait_queue_head_t
*q
, wait_queue_t
*wait
)
36 wait
->flags
|= WQ_FLAG_EXCLUSIVE
;
37 spin_lock_irqsave(&q
->lock
, flags
);
38 __add_wait_queue_tail(q
, wait
);
39 spin_unlock_irqrestore(&q
->lock
, flags
);
41 EXPORT_SYMBOL(add_wait_queue_exclusive
);
43 void remove_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
47 spin_lock_irqsave(&q
->lock
, flags
);
48 __remove_wait_queue(q
, wait
);
49 spin_unlock_irqrestore(&q
->lock
, flags
);
51 EXPORT_SYMBOL(remove_wait_queue
);
55 * Note: we use "set_current_state()" _after_ the wait-queue add,
56 * because we need a memory barrier there on SMP, so that any
57 * wake-function that tests for the wait-queue being active
58 * will be guaranteed to see waitqueue addition _or_ subsequent
59 * tests in this thread will see the wakeup having taken place.
61 * The spin_unlock() itself is semi-permeable and only protects
62 * one way (it only protects stuff inside the critical region and
63 * stops them from bleeding out - it would still allow subsequent
64 * loads to move into the critical region).
67 prepare_to_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
)
71 wait
->flags
&= ~WQ_FLAG_EXCLUSIVE
;
72 spin_lock_irqsave(&q
->lock
, flags
);
73 if (list_empty(&wait
->task_list
))
74 __add_wait_queue(q
, wait
);
76 * don't alter the task state if this is just going to
77 * queue an async wait queue callback
79 if (is_sync_wait(wait
))
80 set_current_state(state
);
81 spin_unlock_irqrestore(&q
->lock
, flags
);
83 EXPORT_SYMBOL(prepare_to_wait
);
86 prepare_to_wait_exclusive(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
)
90 wait
->flags
|= WQ_FLAG_EXCLUSIVE
;
91 spin_lock_irqsave(&q
->lock
, flags
);
92 if (list_empty(&wait
->task_list
))
93 __add_wait_queue_tail(q
, wait
);
95 * don't alter the task state if this is just going to
96 * queue an async wait queue callback
98 if (is_sync_wait(wait
))
99 set_current_state(state
);
100 spin_unlock_irqrestore(&q
->lock
, flags
);
102 EXPORT_SYMBOL(prepare_to_wait_exclusive
);
105 * finish_wait - clean up after waiting in a queue
106 * @q: waitqueue waited on
107 * @wait: wait descriptor
109 * Sets current thread back to running state and removes
110 * the wait descriptor from the given waitqueue if still
113 void finish_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
)
117 __set_current_state(TASK_RUNNING
);
119 * We can check for list emptiness outside the lock
121 * - we use the "careful" check that verifies both
122 * the next and prev pointers, so that there cannot
123 * be any half-pending updates in progress on other
124 * CPU's that we haven't seen yet (and that might
125 * still change the stack area.
127 * - all other users take the lock (ie we can only
128 * have _one_ other CPU that looks at or modifies
131 if (!list_empty_careful(&wait
->task_list
)) {
132 spin_lock_irqsave(&q
->lock
, flags
);
133 list_del_init(&wait
->task_list
);
134 spin_unlock_irqrestore(&q
->lock
, flags
);
137 EXPORT_SYMBOL(finish_wait
);
140 * abort_exclusive_wait - abort exclusive waiting in a queue
141 * @q: waitqueue waited on
142 * @wait: wait descriptor
143 * @state: runstate of the waiter to be woken
144 * @key: key to identify a wait bit queue or %NULL
146 * Sets current thread back to running state and removes
147 * the wait descriptor from the given waitqueue if still
150 * Wakes up the next waiter if the caller is concurrently
151 * woken up through the queue.
153 * This prevents waiter starvation where an exclusive waiter
154 * aborts and is woken up concurrently and noone wakes up
157 void abort_exclusive_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
,
158 unsigned int mode
, void *key
)
162 __set_current_state(TASK_RUNNING
);
163 spin_lock_irqsave(&q
->lock
, flags
);
164 if (!list_empty(&wait
->task_list
))
165 list_del_init(&wait
->task_list
);
166 else if (waitqueue_active(q
))
167 __wake_up_common(q
, mode
, 1, 0, key
);
168 spin_unlock_irqrestore(&q
->lock
, flags
);
170 EXPORT_SYMBOL(abort_exclusive_wait
);
172 int autoremove_wake_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
174 int ret
= default_wake_function(wait
, mode
, sync
, key
);
177 list_del_init(&wait
->task_list
);
180 EXPORT_SYMBOL(autoremove_wake_function
);
182 int wake_bit_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *arg
)
184 struct wait_bit_key
*key
= arg
;
185 struct wait_bit_queue
*wait_bit
186 = container_of(wait
, struct wait_bit_queue
, wait
);
188 if (wait_bit
->key
.flags
!= key
->flags
||
189 wait_bit
->key
.bit_nr
!= key
->bit_nr
||
190 test_bit(key
->bit_nr
, key
->flags
))
193 return autoremove_wake_function(wait
, mode
, sync
, key
);
195 EXPORT_SYMBOL(wake_bit_function
);
198 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
199 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
200 * permitted return codes. Nonzero return codes halt waiting and return.
203 __wait_on_bit(wait_queue_head_t
*wq
, struct wait_bit_queue
*q
,
204 int (*action
)(void *), unsigned mode
)
209 prepare_to_wait(wq
, &q
->wait
, mode
);
210 if (test_bit(q
->key
.bit_nr
, q
->key
.flags
))
211 ret
= (*action
)(q
->key
.flags
);
212 } while (test_bit(q
->key
.bit_nr
, q
->key
.flags
) && !ret
);
213 finish_wait(wq
, &q
->wait
);
216 EXPORT_SYMBOL(__wait_on_bit
);
218 int __sched
out_of_line_wait_on_bit(void *word
, int bit
,
219 int (*action
)(void *), unsigned mode
)
221 wait_queue_head_t
*wq
= bit_waitqueue(word
, bit
);
222 DEFINE_WAIT_BIT(wait
, word
, bit
);
224 return __wait_on_bit(wq
, &wait
, action
, mode
);
226 EXPORT_SYMBOL(out_of_line_wait_on_bit
);
229 __wait_on_bit_lock(wait_queue_head_t
*wq
, struct wait_bit_queue
*q
,
230 int (*action
)(void *), unsigned mode
)
235 prepare_to_wait_exclusive(wq
, &q
->wait
, mode
);
236 if (!test_bit(q
->key
.bit_nr
, q
->key
.flags
))
238 ret
= action(q
->key
.flags
);
241 abort_exclusive_wait(wq
, &q
->wait
, mode
, &q
->key
);
243 } while (test_and_set_bit(q
->key
.bit_nr
, q
->key
.flags
));
244 finish_wait(wq
, &q
->wait
);
247 EXPORT_SYMBOL(__wait_on_bit_lock
);
249 int __sched
out_of_line_wait_on_bit_lock(void *word
, int bit
,
250 int (*action
)(void *), unsigned mode
)
252 wait_queue_head_t
*wq
= bit_waitqueue(word
, bit
);
253 DEFINE_WAIT_BIT(wait
, word
, bit
);
255 return __wait_on_bit_lock(wq
, &wait
, action
, mode
);
257 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock
);
259 void __wake_up_bit(wait_queue_head_t
*wq
, void *word
, int bit
)
261 struct wait_bit_key key
= __WAIT_BIT_KEY_INITIALIZER(word
, bit
);
262 if (waitqueue_active(wq
))
263 __wake_up(wq
, TASK_NORMAL
, 1, &key
);
265 EXPORT_SYMBOL(__wake_up_bit
);
268 * wake_up_bit - wake up a waiter on a bit
269 * @word: the word being waited on, a kernel virtual address
270 * @bit: the bit of the word being waited on
272 * There is a standard hashed waitqueue table for generic use. This
273 * is the part of the hashtable's accessor API that wakes up waiters
274 * on a bit. For instance, if one were to have waiters on a bitflag,
275 * one would call wake_up_bit() after clearing the bit.
277 * In order for this to function properly, as it uses waitqueue_active()
278 * internally, some kind of memory barrier must be done prior to calling
279 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
280 * cases where bitflags are manipulated non-atomically under a lock, one
281 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
282 * because spin_unlock() does not guarantee a memory barrier.
284 void wake_up_bit(void *word
, int bit
)
286 __wake_up_bit(bit_waitqueue(word
, bit
), word
, bit
);
288 EXPORT_SYMBOL(wake_up_bit
);
290 wait_queue_head_t
*bit_waitqueue(void *word
, int bit
)
292 const int shift
= BITS_PER_LONG
== 32 ? 5 : 6;
293 const struct zone
*zone
= page_zone(virt_to_page(word
));
294 unsigned long val
= (unsigned long)word
<< shift
| bit
;
296 return &zone
->wait_table
[hash_long(val
, zone
->wait_table_bits
)];
298 EXPORT_SYMBOL(bit_waitqueue
);