Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6.git] / kernel / wait.c
blob791681cfea981d49e5b9f74de41f928c1c8da38b
1 /*
2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
5 */
6 #include <linux/config.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/wait.h>
12 #include <linux/hash.h>
14 void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
16 unsigned long flags;
18 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
19 spin_lock_irqsave(&q->lock, flags);
20 __add_wait_queue(q, wait);
21 spin_unlock_irqrestore(&q->lock, flags);
23 EXPORT_SYMBOL(add_wait_queue);
25 void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
27 unsigned long flags;
29 wait->flags |= WQ_FLAG_EXCLUSIVE;
30 spin_lock_irqsave(&q->lock, flags);
31 __add_wait_queue_tail(q, wait);
32 spin_unlock_irqrestore(&q->lock, flags);
34 EXPORT_SYMBOL(add_wait_queue_exclusive);
36 void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
38 unsigned long flags;
40 spin_lock_irqsave(&q->lock, flags);
41 __remove_wait_queue(q, wait);
42 spin_unlock_irqrestore(&q->lock, flags);
44 EXPORT_SYMBOL(remove_wait_queue);
48 * Note: we use "set_current_state()" _after_ the wait-queue add,
49 * because we need a memory barrier there on SMP, so that any
50 * wake-function that tests for the wait-queue being active
51 * will be guaranteed to see waitqueue addition _or_ subsequent
52 * tests in this thread will see the wakeup having taken place.
54 * The spin_unlock() itself is semi-permeable and only protects
55 * one way (it only protects stuff inside the critical region and
56 * stops them from bleeding out - it would still allow subsequent
57 * loads to move into the the critical region).
59 void fastcall
60 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
62 unsigned long flags;
64 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
65 spin_lock_irqsave(&q->lock, flags);
66 if (list_empty(&wait->task_list))
67 __add_wait_queue(q, wait);
69 * don't alter the task state if this is just going to
70 * queue an async wait queue callback
72 if (is_sync_wait(wait))
73 set_current_state(state);
74 spin_unlock_irqrestore(&q->lock, flags);
76 EXPORT_SYMBOL(prepare_to_wait);
78 void fastcall
79 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
81 unsigned long flags;
83 wait->flags |= WQ_FLAG_EXCLUSIVE;
84 spin_lock_irqsave(&q->lock, flags);
85 if (list_empty(&wait->task_list))
86 __add_wait_queue_tail(q, wait);
88 * don't alter the task state if this is just going to
89 * queue an async wait queue callback
91 if (is_sync_wait(wait))
92 set_current_state(state);
93 spin_unlock_irqrestore(&q->lock, flags);
95 EXPORT_SYMBOL(prepare_to_wait_exclusive);
97 void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
99 unsigned long flags;
101 __set_current_state(TASK_RUNNING);
103 * We can check for list emptiness outside the lock
104 * IFF:
105 * - we use the "careful" check that verifies both
106 * the next and prev pointers, so that there cannot
107 * be any half-pending updates in progress on other
108 * CPU's that we haven't seen yet (and that might
109 * still change the stack area.
110 * and
111 * - all other users take the lock (ie we can only
112 * have _one_ other CPU that looks at or modifies
113 * the list).
115 if (!list_empty_careful(&wait->task_list)) {
116 spin_lock_irqsave(&q->lock, flags);
117 list_del_init(&wait->task_list);
118 spin_unlock_irqrestore(&q->lock, flags);
121 EXPORT_SYMBOL(finish_wait);
123 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
125 int ret = default_wake_function(wait, mode, sync, key);
127 if (ret)
128 list_del_init(&wait->task_list);
129 return ret;
131 EXPORT_SYMBOL(autoremove_wake_function);
133 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
135 struct wait_bit_key *key = arg;
136 struct wait_bit_queue *wait_bit
137 = container_of(wait, struct wait_bit_queue, wait);
139 if (wait_bit->key.flags != key->flags ||
140 wait_bit->key.bit_nr != key->bit_nr ||
141 test_bit(key->bit_nr, key->flags))
142 return 0;
143 else
144 return autoremove_wake_function(wait, mode, sync, key);
146 EXPORT_SYMBOL(wake_bit_function);
149 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
150 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
151 * permitted return codes. Nonzero return codes halt waiting and return.
153 int __sched fastcall
154 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
155 int (*action)(void *), unsigned mode)
157 int ret = 0;
159 do {
160 prepare_to_wait(wq, &q->wait, mode);
161 if (test_bit(q->key.bit_nr, q->key.flags))
162 ret = (*action)(q->key.flags);
163 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
164 finish_wait(wq, &q->wait);
165 return ret;
167 EXPORT_SYMBOL(__wait_on_bit);
169 int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
170 int (*action)(void *), unsigned mode)
172 wait_queue_head_t *wq = bit_waitqueue(word, bit);
173 DEFINE_WAIT_BIT(wait, word, bit);
175 return __wait_on_bit(wq, &wait, action, mode);
177 EXPORT_SYMBOL(out_of_line_wait_on_bit);
179 int __sched fastcall
180 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
181 int (*action)(void *), unsigned mode)
183 int ret = 0;
185 do {
186 prepare_to_wait_exclusive(wq, &q->wait, mode);
187 if (test_bit(q->key.bit_nr, q->key.flags)) {
188 if ((ret = (*action)(q->key.flags)))
189 break;
191 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
192 finish_wait(wq, &q->wait);
193 return ret;
195 EXPORT_SYMBOL(__wait_on_bit_lock);
197 int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
198 int (*action)(void *), unsigned mode)
200 wait_queue_head_t *wq = bit_waitqueue(word, bit);
201 DEFINE_WAIT_BIT(wait, word, bit);
203 return __wait_on_bit_lock(wq, &wait, action, mode);
205 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
207 void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
209 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
210 if (waitqueue_active(wq))
211 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
213 EXPORT_SYMBOL(__wake_up_bit);
216 * wake_up_bit - wake up a waiter on a bit
217 * @word: the word being waited on, a kernel virtual address
218 * @bit: the bit of the word being waited on
220 * There is a standard hashed waitqueue table for generic use. This
221 * is the part of the hashtable's accessor API that wakes up waiters
222 * on a bit. For instance, if one were to have waiters on a bitflag,
223 * one would call wake_up_bit() after clearing the bit.
225 * In order for this to function properly, as it uses waitqueue_active()
226 * internally, some kind of memory barrier must be done prior to calling
227 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
228 * cases where bitflags are manipulated non-atomically under a lock, one
229 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
230 * because spin_unlock() does not guarantee a memory barrier.
232 void fastcall wake_up_bit(void *word, int bit)
234 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
236 EXPORT_SYMBOL(wake_up_bit);
238 fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
240 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
241 const struct zone *zone = page_zone(virt_to_page(word));
242 unsigned long val = (unsigned long)word << shift | bit;
244 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
246 EXPORT_SYMBOL(bit_waitqueue);