KVM: fix lock imbalance in kvm_create_pit()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / wait.h
blob0836ccc5712146f87d13e9e35a9480e980708392
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
4 #define WNOHANG 0x00000001
5 #define WUNTRACED 0x00000002
6 #define WSTOPPED WUNTRACED
7 #define WEXITED 0x00000004
8 #define WCONTINUED 0x00000008
9 #define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
13 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
15 /* First argument to waitid: */
16 #define P_ALL 0
17 #define P_PID 1
18 #define P_PGID 2
20 #ifdef __KERNEL__
22 #include <linux/list.h>
23 #include <linux/stddef.h>
24 #include <linux/spinlock.h>
25 #include <asm/system.h>
26 #include <asm/current.h>
28 typedef struct __wait_queue wait_queue_t;
29 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
30 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
32 struct __wait_queue {
33 unsigned int flags;
34 #define WQ_FLAG_EXCLUSIVE 0x01
35 void *private;
36 wait_queue_func_t func;
37 struct list_head task_list;
40 struct wait_bit_key {
41 void *flags;
42 int bit_nr;
45 struct wait_bit_queue {
46 struct wait_bit_key key;
47 wait_queue_t wait;
50 struct __wait_queue_head {
51 spinlock_t lock;
52 struct list_head task_list;
54 typedef struct __wait_queue_head wait_queue_head_t;
56 struct task_struct;
59 * Macros for declaration and initialisaton of the datatypes
62 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
63 .private = tsk, \
64 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
67 #define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
72 .task_list = { &(name).task_list, &(name).task_list } }
74 #define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
77 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
80 extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
82 #define init_waitqueue_head(q) \
83 do { \
84 static struct lock_class_key __key; \
86 __init_waitqueue_head((q), &__key); \
87 } while (0)
89 #ifdef CONFIG_LOCKDEP
90 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
91 ({ init_waitqueue_head(&name); name; })
92 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
93 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
94 #else
95 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
96 #endif
98 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
100 q->flags = 0;
101 q->private = p;
102 q->func = default_wake_function;
105 static inline void init_waitqueue_func_entry(wait_queue_t *q,
106 wait_queue_func_t func)
108 q->flags = 0;
109 q->private = NULL;
110 q->func = func;
113 static inline int waitqueue_active(wait_queue_head_t *q)
115 return !list_empty(&q->task_list);
118 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
119 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
120 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
122 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
124 list_add(&new->task_list, &head->task_list);
128 * Used for wake-one threads:
130 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue(q, wait);
137 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
138 wait_queue_t *new)
140 list_add_tail(&new->task_list, &head->task_list);
143 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
144 wait_queue_t *wait)
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue_tail(q, wait);
150 static inline void __remove_wait_queue(wait_queue_head_t *head,
151 wait_queue_t *old)
153 list_del(&old->task_list);
156 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
157 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
158 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
159 void *key);
160 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
161 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
162 void __wake_up_bit(wait_queue_head_t *, void *, int);
163 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
164 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
165 void wake_up_bit(void *, int);
166 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
167 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
168 wait_queue_head_t *bit_waitqueue(void *, int);
170 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
171 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
172 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
173 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
175 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
176 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
177 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
178 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
181 * Wakeup macros to be used to report events to the targets.
183 #define wake_up_poll(x, m) \
184 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
185 #define wake_up_locked_poll(x, m) \
186 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
187 #define wake_up_interruptible_poll(x, m) \
188 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
189 #define wake_up_interruptible_sync_poll(x, m) \
190 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
192 #define __wait_event(wq, condition) \
193 do { \
194 DEFINE_WAIT(__wait); \
196 for (;;) { \
197 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
198 if (condition) \
199 break; \
200 schedule(); \
202 finish_wait(&wq, &__wait); \
203 } while (0)
206 * wait_event - sleep until a condition gets true
207 * @wq: the waitqueue to wait on
208 * @condition: a C expression for the event to wait for
210 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
211 * @condition evaluates to true. The @condition is checked each time
212 * the waitqueue @wq is woken up.
214 * wake_up() has to be called after changing any variable that could
215 * change the result of the wait condition.
217 #define wait_event(wq, condition) \
218 do { \
219 if (condition) \
220 break; \
221 __wait_event(wq, condition); \
222 } while (0)
224 #define __wait_event_timeout(wq, condition, ret) \
225 do { \
226 DEFINE_WAIT(__wait); \
228 for (;;) { \
229 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
230 if (condition) \
231 break; \
232 ret = schedule_timeout(ret); \
233 if (!ret) \
234 break; \
236 finish_wait(&wq, &__wait); \
237 } while (0)
240 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
241 * @wq: the waitqueue to wait on
242 * @condition: a C expression for the event to wait for
243 * @timeout: timeout, in jiffies
245 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
246 * @condition evaluates to true. The @condition is checked each time
247 * the waitqueue @wq is woken up.
249 * wake_up() has to be called after changing any variable that could
250 * change the result of the wait condition.
252 * The function returns 0 if the @timeout elapsed, and the remaining
253 * jiffies if the condition evaluated to true before the timeout elapsed.
255 #define wait_event_timeout(wq, condition, timeout) \
256 ({ \
257 long __ret = timeout; \
258 if (!(condition)) \
259 __wait_event_timeout(wq, condition, __ret); \
260 __ret; \
263 #define __wait_event_interruptible(wq, condition, ret) \
264 do { \
265 DEFINE_WAIT(__wait); \
267 for (;;) { \
268 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
269 if (condition) \
270 break; \
271 if (!signal_pending(current)) { \
272 schedule(); \
273 continue; \
275 ret = -ERESTARTSYS; \
276 break; \
278 finish_wait(&wq, &__wait); \
279 } while (0)
282 * wait_event_interruptible - sleep until a condition gets true
283 * @wq: the waitqueue to wait on
284 * @condition: a C expression for the event to wait for
286 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
287 * @condition evaluates to true or a signal is received.
288 * The @condition is checked each time the waitqueue @wq is woken up.
290 * wake_up() has to be called after changing any variable that could
291 * change the result of the wait condition.
293 * The function will return -ERESTARTSYS if it was interrupted by a
294 * signal and 0 if @condition evaluated to true.
296 #define wait_event_interruptible(wq, condition) \
297 ({ \
298 int __ret = 0; \
299 if (!(condition)) \
300 __wait_event_interruptible(wq, condition, __ret); \
301 __ret; \
304 #define __wait_event_interruptible_timeout(wq, condition, ret) \
305 do { \
306 DEFINE_WAIT(__wait); \
308 for (;;) { \
309 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
310 if (condition) \
311 break; \
312 if (!signal_pending(current)) { \
313 ret = schedule_timeout(ret); \
314 if (!ret) \
315 break; \
316 continue; \
318 ret = -ERESTARTSYS; \
319 break; \
321 finish_wait(&wq, &__wait); \
322 } while (0)
325 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
326 * @wq: the waitqueue to wait on
327 * @condition: a C expression for the event to wait for
328 * @timeout: timeout, in jiffies
330 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
331 * @condition evaluates to true or a signal is received.
332 * The @condition is checked each time the waitqueue @wq is woken up.
334 * wake_up() has to be called after changing any variable that could
335 * change the result of the wait condition.
337 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
338 * was interrupted by a signal, and the remaining jiffies otherwise
339 * if the condition evaluated to true before the timeout elapsed.
341 #define wait_event_interruptible_timeout(wq, condition, timeout) \
342 ({ \
343 long __ret = timeout; \
344 if (!(condition)) \
345 __wait_event_interruptible_timeout(wq, condition, __ret); \
346 __ret; \
349 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
350 do { \
351 DEFINE_WAIT(__wait); \
353 for (;;) { \
354 prepare_to_wait_exclusive(&wq, &__wait, \
355 TASK_INTERRUPTIBLE); \
356 if (condition) { \
357 finish_wait(&wq, &__wait); \
358 break; \
360 if (!signal_pending(current)) { \
361 schedule(); \
362 continue; \
364 ret = -ERESTARTSYS; \
365 abort_exclusive_wait(&wq, &__wait, \
366 TASK_INTERRUPTIBLE, NULL); \
367 break; \
369 } while (0)
371 #define wait_event_interruptible_exclusive(wq, condition) \
372 ({ \
373 int __ret = 0; \
374 if (!(condition)) \
375 __wait_event_interruptible_exclusive(wq, condition, __ret);\
376 __ret; \
380 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
381 ({ \
382 int __ret = 0; \
383 DEFINE_WAIT(__wait); \
384 if (exclusive) \
385 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
386 do { \
387 if (likely(list_empty(&__wait.task_list))) \
388 __add_wait_queue_tail(&(wq), &__wait); \
389 set_current_state(TASK_INTERRUPTIBLE); \
390 if (signal_pending(current)) { \
391 __ret = -ERESTARTSYS; \
392 break; \
394 if (irq) \
395 spin_unlock_irq(&(wq).lock); \
396 else \
397 spin_unlock(&(wq).lock); \
398 schedule(); \
399 if (irq) \
400 spin_lock_irq(&(wq).lock); \
401 else \
402 spin_lock(&(wq).lock); \
403 } while (!(condition)); \
404 __remove_wait_queue(&(wq), &__wait); \
405 __set_current_state(TASK_RUNNING); \
406 __ret; \
411 * wait_event_interruptible_locked - sleep until a condition gets true
412 * @wq: the waitqueue to wait on
413 * @condition: a C expression for the event to wait for
415 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
416 * @condition evaluates to true or a signal is received.
417 * The @condition is checked each time the waitqueue @wq is woken up.
419 * It must be called with wq.lock being held. This spinlock is
420 * unlocked while sleeping but @condition testing is done while lock
421 * is held and when this macro exits the lock is held.
423 * The lock is locked/unlocked using spin_lock()/spin_unlock()
424 * functions which must match the way they are locked/unlocked outside
425 * of this macro.
427 * wake_up_locked() has to be called after changing any variable that could
428 * change the result of the wait condition.
430 * The function will return -ERESTARTSYS if it was interrupted by a
431 * signal and 0 if @condition evaluated to true.
433 #define wait_event_interruptible_locked(wq, condition) \
434 ((condition) \
435 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
438 * wait_event_interruptible_locked_irq - sleep until a condition gets true
439 * @wq: the waitqueue to wait on
440 * @condition: a C expression for the event to wait for
442 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
443 * @condition evaluates to true or a signal is received.
444 * The @condition is checked each time the waitqueue @wq is woken up.
446 * It must be called with wq.lock being held. This spinlock is
447 * unlocked while sleeping but @condition testing is done while lock
448 * is held and when this macro exits the lock is held.
450 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
451 * functions which must match the way they are locked/unlocked outside
452 * of this macro.
454 * wake_up_locked() has to be called after changing any variable that could
455 * change the result of the wait condition.
457 * The function will return -ERESTARTSYS if it was interrupted by a
458 * signal and 0 if @condition evaluated to true.
460 #define wait_event_interruptible_locked_irq(wq, condition) \
461 ((condition) \
462 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
465 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
466 * @wq: the waitqueue to wait on
467 * @condition: a C expression for the event to wait for
469 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
470 * @condition evaluates to true or a signal is received.
471 * The @condition is checked each time the waitqueue @wq is woken up.
473 * It must be called with wq.lock being held. This spinlock is
474 * unlocked while sleeping but @condition testing is done while lock
475 * is held and when this macro exits the lock is held.
477 * The lock is locked/unlocked using spin_lock()/spin_unlock()
478 * functions which must match the way they are locked/unlocked outside
479 * of this macro.
481 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
482 * set thus when other process waits process on the list if this
483 * process is awaken further processes are not considered.
485 * wake_up_locked() has to be called after changing any variable that could
486 * change the result of the wait condition.
488 * The function will return -ERESTARTSYS if it was interrupted by a
489 * signal and 0 if @condition evaluated to true.
491 #define wait_event_interruptible_exclusive_locked(wq, condition) \
492 ((condition) \
493 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
496 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
497 * @wq: the waitqueue to wait on
498 * @condition: a C expression for the event to wait for
500 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
501 * @condition evaluates to true or a signal is received.
502 * The @condition is checked each time the waitqueue @wq is woken up.
504 * It must be called with wq.lock being held. This spinlock is
505 * unlocked while sleeping but @condition testing is done while lock
506 * is held and when this macro exits the lock is held.
508 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
509 * functions which must match the way they are locked/unlocked outside
510 * of this macro.
512 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
513 * set thus when other process waits process on the list if this
514 * process is awaken further processes are not considered.
516 * wake_up_locked() has to be called after changing any variable that could
517 * change the result of the wait condition.
519 * The function will return -ERESTARTSYS if it was interrupted by a
520 * signal and 0 if @condition evaluated to true.
522 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
523 ((condition) \
524 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
528 #define __wait_event_killable(wq, condition, ret) \
529 do { \
530 DEFINE_WAIT(__wait); \
532 for (;;) { \
533 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
534 if (condition) \
535 break; \
536 if (!fatal_signal_pending(current)) { \
537 schedule(); \
538 continue; \
540 ret = -ERESTARTSYS; \
541 break; \
543 finish_wait(&wq, &__wait); \
544 } while (0)
547 * wait_event_killable - sleep until a condition gets true
548 * @wq: the waitqueue to wait on
549 * @condition: a C expression for the event to wait for
551 * The process is put to sleep (TASK_KILLABLE) until the
552 * @condition evaluates to true or a signal is received.
553 * The @condition is checked each time the waitqueue @wq is woken up.
555 * wake_up() has to be called after changing any variable that could
556 * change the result of the wait condition.
558 * The function will return -ERESTARTSYS if it was interrupted by a
559 * signal and 0 if @condition evaluated to true.
561 #define wait_event_killable(wq, condition) \
562 ({ \
563 int __ret = 0; \
564 if (!(condition)) \
565 __wait_event_killable(wq, condition, __ret); \
566 __ret; \
570 * These are the old interfaces to sleep waiting for an event.
571 * They are racy. DO NOT use them, use the wait_event* interfaces above.
572 * We plan to remove these interfaces.
574 extern void sleep_on(wait_queue_head_t *q);
575 extern long sleep_on_timeout(wait_queue_head_t *q,
576 signed long timeout);
577 extern void interruptible_sleep_on(wait_queue_head_t *q);
578 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
579 signed long timeout);
582 * Waitqueues which are removed from the waitqueue_head at wakeup time
584 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
585 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
586 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
587 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
588 unsigned int mode, void *key);
589 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
590 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
592 #define DEFINE_WAIT_FUNC(name, function) \
593 wait_queue_t name = { \
594 .private = current, \
595 .func = function, \
596 .task_list = LIST_HEAD_INIT((name).task_list), \
599 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
601 #define DEFINE_WAIT_BIT(name, word, bit) \
602 struct wait_bit_queue name = { \
603 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
604 .wait = { \
605 .private = current, \
606 .func = wake_bit_function, \
607 .task_list = \
608 LIST_HEAD_INIT((name).wait.task_list), \
609 }, \
612 #define init_wait(wait) \
613 do { \
614 (wait)->private = current; \
615 (wait)->func = autoremove_wake_function; \
616 INIT_LIST_HEAD(&(wait)->task_list); \
617 } while (0)
620 * wait_on_bit - wait for a bit to be cleared
621 * @word: the word being waited on, a kernel virtual address
622 * @bit: the bit of the word being waited on
623 * @action: the function used to sleep, which may take special actions
624 * @mode: the task state to sleep in
626 * There is a standard hashed waitqueue table for generic use. This
627 * is the part of the hashtable's accessor API that waits on a bit.
628 * For instance, if one were to have waiters on a bitflag, one would
629 * call wait_on_bit() in threads waiting for the bit to clear.
630 * One uses wait_on_bit() where one is waiting for the bit to clear,
631 * but has no intention of setting it.
633 static inline int wait_on_bit(void *word, int bit,
634 int (*action)(void *), unsigned mode)
636 if (!test_bit(bit, word))
637 return 0;
638 return out_of_line_wait_on_bit(word, bit, action, mode);
642 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
643 * @word: the word being waited on, a kernel virtual address
644 * @bit: the bit of the word being waited on
645 * @action: the function used to sleep, which may take special actions
646 * @mode: the task state to sleep in
648 * There is a standard hashed waitqueue table for generic use. This
649 * is the part of the hashtable's accessor API that waits on a bit
650 * when one intends to set it, for instance, trying to lock bitflags.
651 * For instance, if one were to have waiters trying to set bitflag
652 * and waiting for it to clear before setting it, one would call
653 * wait_on_bit() in threads waiting to be able to set the bit.
654 * One uses wait_on_bit_lock() where one is waiting for the bit to
655 * clear with the intention of setting it, and when done, clearing it.
657 static inline int wait_on_bit_lock(void *word, int bit,
658 int (*action)(void *), unsigned mode)
660 if (!test_and_set_bit(bit, word))
661 return 0;
662 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
665 #endif /* __KERNEL__ */
667 #endif