drivers/rtc/rtc-sirfsoc.c: add rtc drivers for CSR SiRFprimaII and SiRFatlasVI
[linux-2.6.git] / include / linux / wait.h
blobf487a4750b7f36e97acef1542cb6076d76ce7a59
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
5 #include <linux/list.h>
6 #include <linux/stddef.h>
7 #include <linux/spinlock.h>
8 #include <asm/current.h>
9 #include <uapi/linux/wait.h>
11 typedef struct __wait_queue wait_queue_t;
12 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 struct __wait_queue {
16 unsigned int flags;
17 #define WQ_FLAG_EXCLUSIVE 0x01
18 void *private;
19 wait_queue_func_t func;
20 struct list_head task_list;
23 struct wait_bit_key {
24 void *flags;
25 int bit_nr;
26 #define WAIT_ATOMIC_T_BIT_NR -1
29 struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
34 struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
38 typedef struct __wait_queue_head wait_queue_head_t;
40 struct task_struct;
43 * Macros for declaration and initialisaton of the datatypes
46 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
47 .private = tsk, \
48 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
51 #define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
55 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
56 .task_list = { &(name).task_list, &(name).task_list } }
58 #define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
64 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
69 #define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
73 __init_waitqueue_head((q), #q, &__key); \
74 } while (0)
76 #ifdef CONFIG_LOCKDEP
77 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81 #else
82 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83 #endif
85 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87 q->flags = 0;
88 q->private = p;
89 q->func = default_wake_function;
92 static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
95 q->flags = 0;
96 q->private = NULL;
97 q->func = func;
100 static inline int waitqueue_active(wait_queue_head_t *q)
102 return !list_empty(&q->task_list);
105 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
109 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111 list_add(&new->task_list, &head->task_list);
115 * Used for wake-one threads:
117 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
124 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
125 wait_queue_t *new)
127 list_add_tail(&new->task_list, &head->task_list);
130 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
137 static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
140 list_del(&old->task_list);
143 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
147 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149 void __wake_up_bit(wait_queue_head_t *, void *, int);
150 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152 void wake_up_bit(void *, int);
153 void wake_up_atomic_t(atomic_t *);
154 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
156 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157 wait_queue_head_t *bit_waitqueue(void *, int);
159 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
162 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
165 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
168 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
171 * Wakeup macros to be used to report events to the targets.
173 #define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175 #define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177 #define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179 #define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
182 #define __wait_event(wq, condition) \
183 do { \
184 DEFINE_WAIT(__wait); \
186 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
188 if (condition) \
189 break; \
190 schedule(); \
192 finish_wait(&wq, &__wait); \
193 } while (0)
196 * wait_event - sleep until a condition gets true
197 * @wq: the waitqueue to wait on
198 * @condition: a C expression for the event to wait for
200 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
201 * @condition evaluates to true. The @condition is checked each time
202 * the waitqueue @wq is woken up.
204 * wake_up() has to be called after changing any variable that could
205 * change the result of the wait condition.
207 #define wait_event(wq, condition) \
208 do { \
209 if (condition) \
210 break; \
211 __wait_event(wq, condition); \
212 } while (0)
214 #define __wait_event_timeout(wq, condition, ret) \
215 do { \
216 DEFINE_WAIT(__wait); \
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
226 if (!ret && (condition)) \
227 ret = 1; \
228 finish_wait(&wq, &__wait); \
229 } while (0)
232 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
233 * @wq: the waitqueue to wait on
234 * @condition: a C expression for the event to wait for
235 * @timeout: timeout, in jiffies
237 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
238 * @condition evaluates to true. The @condition is checked each time
239 * the waitqueue @wq is woken up.
241 * wake_up() has to be called after changing any variable that could
242 * change the result of the wait condition.
244 * The function returns 0 if the @timeout elapsed, or the remaining
245 * jiffies (at least 1) if the @condition evaluated to %true before
246 * the @timeout elapsed.
248 #define wait_event_timeout(wq, condition, timeout) \
249 ({ \
250 long __ret = timeout; \
251 if (!(condition)) \
252 __wait_event_timeout(wq, condition, __ret); \
253 __ret; \
256 #define __wait_event_interruptible(wq, condition, ret) \
257 do { \
258 DEFINE_WAIT(__wait); \
260 for (;;) { \
261 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
262 if (condition) \
263 break; \
264 if (!signal_pending(current)) { \
265 schedule(); \
266 continue; \
268 ret = -ERESTARTSYS; \
269 break; \
271 finish_wait(&wq, &__wait); \
272 } while (0)
275 * wait_event_interruptible - sleep until a condition gets true
276 * @wq: the waitqueue to wait on
277 * @condition: a C expression for the event to wait for
279 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
280 * @condition evaluates to true or a signal is received.
281 * The @condition is checked each time the waitqueue @wq is woken up.
283 * wake_up() has to be called after changing any variable that could
284 * change the result of the wait condition.
286 * The function will return -ERESTARTSYS if it was interrupted by a
287 * signal and 0 if @condition evaluated to true.
289 #define wait_event_interruptible(wq, condition) \
290 ({ \
291 int __ret = 0; \
292 if (!(condition)) \
293 __wait_event_interruptible(wq, condition, __ret); \
294 __ret; \
297 #define __wait_event_interruptible_timeout(wq, condition, ret) \
298 do { \
299 DEFINE_WAIT(__wait); \
301 for (;;) { \
302 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
303 if (condition) \
304 break; \
305 if (!signal_pending(current)) { \
306 ret = schedule_timeout(ret); \
307 if (!ret) \
308 break; \
309 continue; \
311 ret = -ERESTARTSYS; \
312 break; \
314 if (!ret && (condition)) \
315 ret = 1; \
316 finish_wait(&wq, &__wait); \
317 } while (0)
320 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
321 * @wq: the waitqueue to wait on
322 * @condition: a C expression for the event to wait for
323 * @timeout: timeout, in jiffies
325 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
326 * @condition evaluates to true or a signal is received.
327 * The @condition is checked each time the waitqueue @wq is woken up.
329 * wake_up() has to be called after changing any variable that could
330 * change the result of the wait condition.
332 * Returns:
333 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
334 * a signal, or the remaining jiffies (at least 1) if the @condition
335 * evaluated to %true before the @timeout elapsed.
337 #define wait_event_interruptible_timeout(wq, condition, timeout) \
338 ({ \
339 long __ret = timeout; \
340 if (!(condition)) \
341 __wait_event_interruptible_timeout(wq, condition, __ret); \
342 __ret; \
345 #define __wait_event_hrtimeout(wq, condition, timeout, state) \
346 ({ \
347 int __ret = 0; \
348 DEFINE_WAIT(__wait); \
349 struct hrtimer_sleeper __t; \
351 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
352 HRTIMER_MODE_REL); \
353 hrtimer_init_sleeper(&__t, current); \
354 if ((timeout).tv64 != KTIME_MAX) \
355 hrtimer_start_range_ns(&__t.timer, timeout, \
356 current->timer_slack_ns, \
357 HRTIMER_MODE_REL); \
359 for (;;) { \
360 prepare_to_wait(&wq, &__wait, state); \
361 if (condition) \
362 break; \
363 if (state == TASK_INTERRUPTIBLE && \
364 signal_pending(current)) { \
365 __ret = -ERESTARTSYS; \
366 break; \
368 if (!__t.task) { \
369 __ret = -ETIME; \
370 break; \
372 schedule(); \
375 hrtimer_cancel(&__t.timer); \
376 destroy_hrtimer_on_stack(&__t.timer); \
377 finish_wait(&wq, &__wait); \
378 __ret; \
382 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
383 * @wq: the waitqueue to wait on
384 * @condition: a C expression for the event to wait for
385 * @timeout: timeout, as a ktime_t
387 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
388 * @condition evaluates to true or a signal is received.
389 * The @condition is checked each time the waitqueue @wq is woken up.
391 * wake_up() has to be called after changing any variable that could
392 * change the result of the wait condition.
394 * The function returns 0 if @condition became true, or -ETIME if the timeout
395 * elapsed.
397 #define wait_event_hrtimeout(wq, condition, timeout) \
398 ({ \
399 int __ret = 0; \
400 if (!(condition)) \
401 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
402 TASK_UNINTERRUPTIBLE); \
403 __ret; \
407 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
408 * @wq: the waitqueue to wait on
409 * @condition: a C expression for the event to wait for
410 * @timeout: timeout, as a ktime_t
412 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
413 * @condition evaluates to true or a signal is received.
414 * The @condition is checked each time the waitqueue @wq is woken up.
416 * wake_up() has to be called after changing any variable that could
417 * change the result of the wait condition.
419 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
420 * interrupted by a signal, or -ETIME if the timeout elapsed.
422 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
423 ({ \
424 long __ret = 0; \
425 if (!(condition)) \
426 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
427 TASK_INTERRUPTIBLE); \
428 __ret; \
431 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
432 do { \
433 DEFINE_WAIT(__wait); \
435 for (;;) { \
436 prepare_to_wait_exclusive(&wq, &__wait, \
437 TASK_INTERRUPTIBLE); \
438 if (condition) { \
439 finish_wait(&wq, &__wait); \
440 break; \
442 if (!signal_pending(current)) { \
443 schedule(); \
444 continue; \
446 ret = -ERESTARTSYS; \
447 abort_exclusive_wait(&wq, &__wait, \
448 TASK_INTERRUPTIBLE, NULL); \
449 break; \
451 } while (0)
453 #define wait_event_interruptible_exclusive(wq, condition) \
454 ({ \
455 int __ret = 0; \
456 if (!(condition)) \
457 __wait_event_interruptible_exclusive(wq, condition, __ret);\
458 __ret; \
462 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
463 ({ \
464 int __ret = 0; \
465 DEFINE_WAIT(__wait); \
466 if (exclusive) \
467 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
468 do { \
469 if (likely(list_empty(&__wait.task_list))) \
470 __add_wait_queue_tail(&(wq), &__wait); \
471 set_current_state(TASK_INTERRUPTIBLE); \
472 if (signal_pending(current)) { \
473 __ret = -ERESTARTSYS; \
474 break; \
476 if (irq) \
477 spin_unlock_irq(&(wq).lock); \
478 else \
479 spin_unlock(&(wq).lock); \
480 schedule(); \
481 if (irq) \
482 spin_lock_irq(&(wq).lock); \
483 else \
484 spin_lock(&(wq).lock); \
485 } while (!(condition)); \
486 __remove_wait_queue(&(wq), &__wait); \
487 __set_current_state(TASK_RUNNING); \
488 __ret; \
493 * wait_event_interruptible_locked - sleep until a condition gets true
494 * @wq: the waitqueue to wait on
495 * @condition: a C expression for the event to wait for
497 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
498 * @condition evaluates to true or a signal is received.
499 * The @condition is checked each time the waitqueue @wq is woken up.
501 * It must be called with wq.lock being held. This spinlock is
502 * unlocked while sleeping but @condition testing is done while lock
503 * is held and when this macro exits the lock is held.
505 * The lock is locked/unlocked using spin_lock()/spin_unlock()
506 * functions which must match the way they are locked/unlocked outside
507 * of this macro.
509 * wake_up_locked() has to be called after changing any variable that could
510 * change the result of the wait condition.
512 * The function will return -ERESTARTSYS if it was interrupted by a
513 * signal and 0 if @condition evaluated to true.
515 #define wait_event_interruptible_locked(wq, condition) \
516 ((condition) \
517 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
520 * wait_event_interruptible_locked_irq - sleep until a condition gets true
521 * @wq: the waitqueue to wait on
522 * @condition: a C expression for the event to wait for
524 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
525 * @condition evaluates to true or a signal is received.
526 * The @condition is checked each time the waitqueue @wq is woken up.
528 * It must be called with wq.lock being held. This spinlock is
529 * unlocked while sleeping but @condition testing is done while lock
530 * is held and when this macro exits the lock is held.
532 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
533 * functions which must match the way they are locked/unlocked outside
534 * of this macro.
536 * wake_up_locked() has to be called after changing any variable that could
537 * change the result of the wait condition.
539 * The function will return -ERESTARTSYS if it was interrupted by a
540 * signal and 0 if @condition evaluated to true.
542 #define wait_event_interruptible_locked_irq(wq, condition) \
543 ((condition) \
544 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
547 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
548 * @wq: the waitqueue to wait on
549 * @condition: a C expression for the event to wait for
551 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
552 * @condition evaluates to true or a signal is received.
553 * The @condition is checked each time the waitqueue @wq is woken up.
555 * It must be called with wq.lock being held. This spinlock is
556 * unlocked while sleeping but @condition testing is done while lock
557 * is held and when this macro exits the lock is held.
559 * The lock is locked/unlocked using spin_lock()/spin_unlock()
560 * functions which must match the way they are locked/unlocked outside
561 * of this macro.
563 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
564 * set thus when other process waits process on the list if this
565 * process is awaken further processes are not considered.
567 * wake_up_locked() has to be called after changing any variable that could
568 * change the result of the wait condition.
570 * The function will return -ERESTARTSYS if it was interrupted by a
571 * signal and 0 if @condition evaluated to true.
573 #define wait_event_interruptible_exclusive_locked(wq, condition) \
574 ((condition) \
575 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
578 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
579 * @wq: the waitqueue to wait on
580 * @condition: a C expression for the event to wait for
582 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
583 * @condition evaluates to true or a signal is received.
584 * The @condition is checked each time the waitqueue @wq is woken up.
586 * It must be called with wq.lock being held. This spinlock is
587 * unlocked while sleeping but @condition testing is done while lock
588 * is held and when this macro exits the lock is held.
590 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
591 * functions which must match the way they are locked/unlocked outside
592 * of this macro.
594 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
595 * set thus when other process waits process on the list if this
596 * process is awaken further processes are not considered.
598 * wake_up_locked() has to be called after changing any variable that could
599 * change the result of the wait condition.
601 * The function will return -ERESTARTSYS if it was interrupted by a
602 * signal and 0 if @condition evaluated to true.
604 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
605 ((condition) \
606 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
610 #define __wait_event_killable(wq, condition, ret) \
611 do { \
612 DEFINE_WAIT(__wait); \
614 for (;;) { \
615 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
616 if (condition) \
617 break; \
618 if (!fatal_signal_pending(current)) { \
619 schedule(); \
620 continue; \
622 ret = -ERESTARTSYS; \
623 break; \
625 finish_wait(&wq, &__wait); \
626 } while (0)
629 * wait_event_killable - sleep until a condition gets true
630 * @wq: the waitqueue to wait on
631 * @condition: a C expression for the event to wait for
633 * The process is put to sleep (TASK_KILLABLE) until the
634 * @condition evaluates to true or a signal is received.
635 * The @condition is checked each time the waitqueue @wq is woken up.
637 * wake_up() has to be called after changing any variable that could
638 * change the result of the wait condition.
640 * The function will return -ERESTARTSYS if it was interrupted by a
641 * signal and 0 if @condition evaluated to true.
643 #define wait_event_killable(wq, condition) \
644 ({ \
645 int __ret = 0; \
646 if (!(condition)) \
647 __wait_event_killable(wq, condition, __ret); \
648 __ret; \
652 #define __wait_event_lock_irq(wq, condition, lock, cmd) \
653 do { \
654 DEFINE_WAIT(__wait); \
656 for (;;) { \
657 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
658 if (condition) \
659 break; \
660 spin_unlock_irq(&lock); \
661 cmd; \
662 schedule(); \
663 spin_lock_irq(&lock); \
665 finish_wait(&wq, &__wait); \
666 } while (0)
669 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
670 * condition is checked under the lock. This
671 * is expected to be called with the lock
672 * taken.
673 * @wq: the waitqueue to wait on
674 * @condition: a C expression for the event to wait for
675 * @lock: a locked spinlock_t, which will be released before cmd
676 * and schedule() and reacquired afterwards.
677 * @cmd: a command which is invoked outside the critical section before
678 * sleep
680 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
681 * @condition evaluates to true. The @condition is checked each time
682 * the waitqueue @wq is woken up.
684 * wake_up() has to be called after changing any variable that could
685 * change the result of the wait condition.
687 * This is supposed to be called while holding the lock. The lock is
688 * dropped before invoking the cmd and going to sleep and is reacquired
689 * afterwards.
691 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
692 do { \
693 if (condition) \
694 break; \
695 __wait_event_lock_irq(wq, condition, lock, cmd); \
696 } while (0)
699 * wait_event_lock_irq - sleep until a condition gets true. The
700 * condition is checked under the lock. This
701 * is expected to be called with the lock
702 * taken.
703 * @wq: the waitqueue to wait on
704 * @condition: a C expression for the event to wait for
705 * @lock: a locked spinlock_t, which will be released before schedule()
706 * and reacquired afterwards.
708 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
709 * @condition evaluates to true. The @condition is checked each time
710 * the waitqueue @wq is woken up.
712 * wake_up() has to be called after changing any variable that could
713 * change the result of the wait condition.
715 * This is supposed to be called while holding the lock. The lock is
716 * dropped before going to sleep and is reacquired afterwards.
718 #define wait_event_lock_irq(wq, condition, lock) \
719 do { \
720 if (condition) \
721 break; \
722 __wait_event_lock_irq(wq, condition, lock, ); \
723 } while (0)
726 #define __wait_event_interruptible_lock_irq(wq, condition, \
727 lock, ret, cmd) \
728 do { \
729 DEFINE_WAIT(__wait); \
731 for (;;) { \
732 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
733 if (condition) \
734 break; \
735 if (signal_pending(current)) { \
736 ret = -ERESTARTSYS; \
737 break; \
739 spin_unlock_irq(&lock); \
740 cmd; \
741 schedule(); \
742 spin_lock_irq(&lock); \
744 finish_wait(&wq, &__wait); \
745 } while (0)
748 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
749 * The condition is checked under the lock. This is expected to
750 * be called with the lock taken.
751 * @wq: the waitqueue to wait on
752 * @condition: a C expression for the event to wait for
753 * @lock: a locked spinlock_t, which will be released before cmd and
754 * schedule() and reacquired afterwards.
755 * @cmd: a command which is invoked outside the critical section before
756 * sleep
758 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
759 * @condition evaluates to true or a signal is received. The @condition is
760 * checked each time the waitqueue @wq is woken up.
762 * wake_up() has to be called after changing any variable that could
763 * change the result of the wait condition.
765 * This is supposed to be called while holding the lock. The lock is
766 * dropped before invoking the cmd and going to sleep and is reacquired
767 * afterwards.
769 * The macro will return -ERESTARTSYS if it was interrupted by a signal
770 * and 0 if @condition evaluated to true.
772 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
773 ({ \
774 int __ret = 0; \
776 if (!(condition)) \
777 __wait_event_interruptible_lock_irq(wq, condition, \
778 lock, __ret, cmd); \
779 __ret; \
783 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
784 * The condition is checked under the lock. This is expected
785 * to be called with the lock taken.
786 * @wq: the waitqueue to wait on
787 * @condition: a C expression for the event to wait for
788 * @lock: a locked spinlock_t, which will be released before schedule()
789 * and reacquired afterwards.
791 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
792 * @condition evaluates to true or signal is received. The @condition is
793 * checked each time the waitqueue @wq is woken up.
795 * wake_up() has to be called after changing any variable that could
796 * change the result of the wait condition.
798 * This is supposed to be called while holding the lock. The lock is
799 * dropped before going to sleep and is reacquired afterwards.
801 * The macro will return -ERESTARTSYS if it was interrupted by a signal
802 * and 0 if @condition evaluated to true.
804 #define wait_event_interruptible_lock_irq(wq, condition, lock) \
805 ({ \
806 int __ret = 0; \
808 if (!(condition)) \
809 __wait_event_interruptible_lock_irq(wq, condition, \
810 lock, __ret, ); \
811 __ret; \
816 * These are the old interfaces to sleep waiting for an event.
817 * They are racy. DO NOT use them, use the wait_event* interfaces above.
818 * We plan to remove these interfaces.
820 extern void sleep_on(wait_queue_head_t *q);
821 extern long sleep_on_timeout(wait_queue_head_t *q,
822 signed long timeout);
823 extern void interruptible_sleep_on(wait_queue_head_t *q);
824 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
825 signed long timeout);
828 * Waitqueues which are removed from the waitqueue_head at wakeup time
830 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
831 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
832 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
833 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
834 unsigned int mode, void *key);
835 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
836 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
838 #define DEFINE_WAIT_FUNC(name, function) \
839 wait_queue_t name = { \
840 .private = current, \
841 .func = function, \
842 .task_list = LIST_HEAD_INIT((name).task_list), \
845 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
847 #define DEFINE_WAIT_BIT(name, word, bit) \
848 struct wait_bit_queue name = { \
849 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
850 .wait = { \
851 .private = current, \
852 .func = wake_bit_function, \
853 .task_list = \
854 LIST_HEAD_INIT((name).wait.task_list), \
855 }, \
858 #define init_wait(wait) \
859 do { \
860 (wait)->private = current; \
861 (wait)->func = autoremove_wake_function; \
862 INIT_LIST_HEAD(&(wait)->task_list); \
863 (wait)->flags = 0; \
864 } while (0)
867 * wait_on_bit - wait for a bit to be cleared
868 * @word: the word being waited on, a kernel virtual address
869 * @bit: the bit of the word being waited on
870 * @action: the function used to sleep, which may take special actions
871 * @mode: the task state to sleep in
873 * There is a standard hashed waitqueue table for generic use. This
874 * is the part of the hashtable's accessor API that waits on a bit.
875 * For instance, if one were to have waiters on a bitflag, one would
876 * call wait_on_bit() in threads waiting for the bit to clear.
877 * One uses wait_on_bit() where one is waiting for the bit to clear,
878 * but has no intention of setting it.
880 static inline int wait_on_bit(void *word, int bit,
881 int (*action)(void *), unsigned mode)
883 if (!test_bit(bit, word))
884 return 0;
885 return out_of_line_wait_on_bit(word, bit, action, mode);
889 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
890 * @word: the word being waited on, a kernel virtual address
891 * @bit: the bit of the word being waited on
892 * @action: the function used to sleep, which may take special actions
893 * @mode: the task state to sleep in
895 * There is a standard hashed waitqueue table for generic use. This
896 * is the part of the hashtable's accessor API that waits on a bit
897 * when one intends to set it, for instance, trying to lock bitflags.
898 * For instance, if one were to have waiters trying to set bitflag
899 * and waiting for it to clear before setting it, one would call
900 * wait_on_bit() in threads waiting to be able to set the bit.
901 * One uses wait_on_bit_lock() where one is waiting for the bit to
902 * clear with the intention of setting it, and when done, clearing it.
904 static inline int wait_on_bit_lock(void *word, int bit,
905 int (*action)(void *), unsigned mode)
907 if (!test_and_set_bit(bit, word))
908 return 0;
909 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
913 * wait_on_atomic_t - Wait for an atomic_t to become 0
914 * @val: The atomic value being waited on, a kernel virtual address
915 * @action: the function used to sleep, which may take special actions
916 * @mode: the task state to sleep in
918 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
919 * the purpose of getting a waitqueue, but we set the key to a bit number
920 * outside of the target 'word'.
922 static inline
923 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
925 if (atomic_read(val) == 0)
926 return 0;
927 return out_of_line_wait_on_atomic_t(val, action, mode);
930 #endif