Import 2.3.18pre1
[davej-history.git] / include / linux / tqueue.h
blobb02f0766560c648f5192dd36dbeaf64bd4f8af83
1 /*
2 * tqueue.h --- task queue handling for Linux.
4 * Mostly based on a proposed bottom-half replacement code written by
5 * Kai Petzke, wpp@marie.physik.tu-berlin.de.
7 * Modified for use in the Linux kernel by Theodore Ts'o,
8 * tytso@mit.edu. Any bugs are my fault, not Kai's.
10 * The original comment follows below.
13 #ifndef _LINUX_TQUEUE_H
14 #define _LINUX_TQUEUE_H
16 #include <linux/spinlock.h>
17 #include <asm/bitops.h>
18 #include <asm/system.h>
21 * New proposed "bottom half" handlers:
22 * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
24 * Advantages:
25 * - Bottom halfs are implemented as a linked list. You can have as many
26 * of them, as you want.
27 * - No more scanning of a bit field is required upon call of a bottom half.
28 * - Support for chained bottom half lists. The run_task_queue() function can be
29 * used as a bottom half handler. This is for example useful for bottom
30 * halfs, which want to be delayed until the next clock tick.
32 * Problems:
33 * - The queue_task_irq() inline function is only atomic with respect to itself.
34 * Problems can occur, when queue_task_irq() is called from a normal system
35 * call, and an interrupt comes in. No problems occur, when queue_task_irq()
36 * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
37 * will not be executed/continued before the last interrupt returns. If in
38 * doubt, use queue_task(), not queue_task_irq().
39 * - Bottom halfs are called in the reverse order that they were linked into
40 * the list.
43 struct tq_struct {
44 struct tq_struct *next; /* linked list of active bh's */
45 unsigned long sync; /* must be initialized to zero */
46 void (*routine)(void *); /* function to call */
47 void *data; /* argument to function */
50 typedef struct tq_struct * task_queue;
52 #define DECLARE_TASK_QUEUE(q) task_queue q = NULL
54 extern task_queue tq_timer, tq_immediate, tq_scheduler, tq_disk;
57 * To implement your own list of active bottom halfs, use the following
58 * two definitions:
60 * struct tq_struct *my_bh = NULL;
61 * struct tq_struct run_my_bh = {
62 * 0, 0, (void (*)(void *)) run_task_queue, &my_bh
63 * };
65 * To activate a bottom half on your list, use:
67 * queue_task(tq_pointer, &my_bh);
69 * To run the bottom halfs on your list put them on the immediate list by:
71 * queue_task(&run_my_bh, &tq_immediate);
73 * This allows you to do deferred procession. For example, you could
74 * have a bottom half list tq_timer, which is marked active by the timer
75 * interrupt.
78 extern spinlock_t tqueue_lock;
81 * queue_task
83 extern __inline__ void queue_task(struct tq_struct *bh_pointer,
84 task_queue *bh_list)
86 if (!test_and_set_bit(0,&bh_pointer->sync)) {
87 unsigned long flags;
88 spin_lock_irqsave(&tqueue_lock, flags);
89 bh_pointer->next = *bh_list;
90 *bh_list = bh_pointer;
91 spin_unlock_irqrestore(&tqueue_lock, flags);
96 * Call all "bottom halfs" on a given list.
98 extern __inline__ void run_task_queue(task_queue *list)
100 if (*list) {
101 unsigned long flags;
102 struct tq_struct *p;
104 spin_lock_irqsave(&tqueue_lock, flags);
105 p = *list;
106 *list = NULL;
107 spin_unlock_irqrestore(&tqueue_lock, flags);
109 while (p) {
110 void *arg;
111 void (*f) (void *);
112 struct tq_struct *save_p;
113 arg = p -> data;
114 f = p -> routine;
115 save_p = p;
116 p = p -> next;
117 mb();
118 save_p -> sync = 0;
119 (*f)(arg);
124 #endif /* _LINUX_TQUEUE_H */