Merge with Linux 2.4.0-test6-pre2.
[linux-2.6/linux-mips.git] / kernel / softirq.c
blob19e068c65f79b97f22e2b4de870bfaea1a5859d5
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
7 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
9 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
12 #include <linux/mm.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
19 - No shared variables, all the data are CPU local.
20 - If a softirq needs serialization, let it serialize itself
21 by its own spinlocks.
22 - Even if softirq is serialized, only local cpu is marked for
23 execution. Hence, we get something sort of weak cpu binding.
24 Though it is still not clear, will it result in better locality
25 or will not.
26 - These softirqs are not masked by global cli() and start_bh_atomic()
27 (by clear reasons). Hence, old parts of code still using global locks
28 MUST NOT use softirqs, but insert interfacing routines acquiring
29 global locks. F.e. look at BHs implementation.
31 Examples:
32 - NET RX softirq. It is multithreaded and does not require
33 any global serialization.
34 - NET TX softirq. It kicks software netdevice queues, hence
35 it is logically serialized per device, but this serialization
36 is invisible to common code.
37 - Tasklets: serialized wrt itself.
38 - Bottom halves: globally serialized, grr...
42 struct softirq_state softirq_state[NR_CPUS];
43 static struct softirq_action softirq_vec[32];
45 asmlinkage void do_softirq()
47 int cpu = smp_processor_id();
48 __u32 active, mask;
50 if (in_interrupt())
51 return;
53 local_bh_disable();
55 local_irq_disable();
56 mask = softirq_state[cpu].mask;
57 active = softirq_state[cpu].active & mask;
59 if (active) {
60 struct softirq_action *h;
62 restart:
63 /* Reset active bitmask before enabling irqs */
64 softirq_state[cpu].active &= ~active;
66 local_irq_enable();
68 h = softirq_vec;
69 mask &= ~active;
71 do {
72 if (active & 1)
73 h->action(h);
74 h++;
75 active >>= 1;
76 } while (active);
78 local_irq_disable();
80 active = softirq_state[cpu].active;
81 if ((active &= mask) != 0)
82 goto retry;
85 local_bh_enable();
87 /* Leave with locally disabled hard irqs. It is critical to close
88 * window for infinite recursion, while we help local bh count,
89 * it protected us. Now we are defenceless.
91 return;
93 retry:
94 goto restart;
98 static spinlock_t softirq_mask_lock = SPIN_LOCK_UNLOCKED;
100 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
102 unsigned long flags;
103 int i;
105 spin_lock_irqsave(&softirq_mask_lock, flags);
106 softirq_vec[nr].data = data;
107 softirq_vec[nr].action = action;
109 for (i=0; i<NR_CPUS; i++)
110 softirq_state[i].mask |= (1<<nr);
111 spin_unlock_irqrestore(&softirq_mask_lock, flags);
115 /* Tasklets */
117 struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
119 static void tasklet_action(struct softirq_action *a)
121 int cpu = smp_processor_id();
122 struct tasklet_struct *list;
124 local_irq_disable();
125 list = tasklet_vec[cpu].list;
126 tasklet_vec[cpu].list = NULL;
127 local_irq_enable();
129 while (list != NULL) {
130 struct tasklet_struct *t = list;
132 list = list->next;
134 if (tasklet_trylock(t)) {
135 if (atomic_read(&t->count) == 0) {
136 clear_bit(TASKLET_STATE_SCHED, &t->state);
138 t->func(t->data);
139 tasklet_unlock(t);
140 continue;
142 tasklet_unlock(t);
144 local_irq_disable();
145 t->next = tasklet_vec[cpu].list;
146 tasklet_vec[cpu].list = t;
147 __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
148 local_irq_enable();
154 struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
156 static void tasklet_hi_action(struct softirq_action *a)
158 int cpu = smp_processor_id();
159 struct tasklet_struct *list;
161 local_irq_disable();
162 list = tasklet_hi_vec[cpu].list;
163 tasklet_hi_vec[cpu].list = NULL;
164 local_irq_enable();
166 while (list != NULL) {
167 struct tasklet_struct *t = list;
169 list = list->next;
171 if (tasklet_trylock(t)) {
172 if (atomic_read(&t->count) == 0) {
173 clear_bit(TASKLET_STATE_SCHED, &t->state);
175 t->func(t->data);
176 tasklet_unlock(t);
177 continue;
179 tasklet_unlock(t);
181 local_irq_disable();
182 t->next = tasklet_hi_vec[cpu].list;
183 tasklet_hi_vec[cpu].list = t;
184 __cpu_raise_softirq(cpu, HI_SOFTIRQ);
185 local_irq_enable();
190 void tasklet_init(struct tasklet_struct *t,
191 void (*func)(unsigned long), unsigned long data)
193 t->func = func;
194 t->data = data;
195 t->state = 0;
196 atomic_set(&t->count, 0);
199 void tasklet_kill(struct tasklet_struct *t)
201 if (in_interrupt())
202 printk("Attempt to kill tasklet from interrupt\n");
204 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
205 current->state = TASK_RUNNING;
206 do {
207 current->policy |= SCHED_YIELD;
208 schedule();
209 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
211 tasklet_unlock_wait(t);
212 clear_bit(TASKLET_STATE_SCHED, &t->state);
217 /* Old style BHs */
219 static void (*bh_base[32])(void);
220 struct tasklet_struct bh_task_vec[32];
222 /* BHs are serialized by spinlock global_bh_lock.
224 It is still possible to make synchronize_bh() as
225 spin_unlock_wait(&global_bh_lock). This operation is not used
226 by kernel now, so that this lock is not made private only
227 due to wait_on_irq().
229 It can be removed only after auditing all the BHs.
231 spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
233 static void bh_action(unsigned long nr)
235 int cpu = smp_processor_id();
237 if (!spin_trylock(&global_bh_lock))
238 goto resched;
240 if (!hardirq_trylock(cpu))
241 goto resched_unlock;
243 if (bh_base[nr])
244 bh_base[nr]();
246 hardirq_endlock(cpu);
247 spin_unlock(&global_bh_lock);
248 return;
250 resched_unlock:
251 spin_unlock(&global_bh_lock);
252 resched:
253 mark_bh(nr);
256 void init_bh(int nr, void (*routine)(void))
258 bh_base[nr] = routine;
259 mb();
262 void remove_bh(int nr)
264 tasklet_kill(bh_task_vec+nr);
265 bh_base[nr] = NULL;
268 void __init softirq_init()
270 int i;
272 for (i=0; i<32; i++)
273 tasklet_init(bh_task_vec+i, bh_action, i);
275 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
276 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);