- pre1: (for ISDN synchronization _ONLY_! Not complete!)
[davej-history.git] / kernel / softirq.c
blobf7be8abd3aea3ef81aa6d78a9d782d9252156575
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
7 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
9 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
20 - No shared variables, all the data are CPU local.
21 - If a softirq needs serialization, let it serialize itself
22 by its own spinlocks.
23 - Even if softirq is serialized, only local cpu is marked for
24 execution. Hence, we get something sort of weak cpu binding.
25 Though it is still not clear, will it result in better locality
26 or will not.
27 - These softirqs are not masked by global cli() and start_bh_atomic()
28 (by clear reasons). Hence, old parts of code still using global locks
29 MUST NOT use softirqs, but insert interfacing routines acquiring
30 global locks. F.e. look at BHs implementation.
32 Examples:
33 - NET RX softirq. It is multithreaded and does not require
34 any global serialization.
35 - NET TX softirq. It kicks software netdevice queues, hence
36 it is logically serialized per device, but this serialization
37 is invisible to common code.
38 - Tasklets: serialized wrt itself.
39 - Bottom halves: globally serialized, grr...
42 /* No separate irq_stat for s390, it is part of PSA */
43 #if !defined(CONFIG_ARCH_S390)
44 irq_cpustat_t irq_stat[NR_CPUS];
45 #endif /* CONFIG_ARCH_S390 */
47 static struct softirq_action softirq_vec[32] __cacheline_aligned;
49 asmlinkage void do_softirq()
51 int cpu = smp_processor_id();
52 __u32 active, mask;
54 if (in_interrupt())
55 return;
57 local_bh_disable();
59 local_irq_disable();
60 mask = softirq_mask(cpu);
61 active = softirq_active(cpu) & mask;
63 if (active) {
64 struct softirq_action *h;
66 restart:
67 /* Reset active bitmask before enabling irqs */
68 softirq_active(cpu) &= ~active;
70 local_irq_enable();
72 h = softirq_vec;
73 mask &= ~active;
75 do {
76 if (active & 1)
77 h->action(h);
78 h++;
79 active >>= 1;
80 } while (active);
82 local_irq_disable();
84 active = softirq_active(cpu);
85 if ((active &= mask) != 0)
86 goto retry;
89 local_bh_enable();
91 /* Leave with locally disabled hard irqs. It is critical to close
92 * window for infinite recursion, while we help local bh count,
93 * it protected us. Now we are defenceless.
95 return;
97 retry:
98 goto restart;
102 static spinlock_t softirq_mask_lock = SPIN_LOCK_UNLOCKED;
104 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
106 unsigned long flags;
107 int i;
109 spin_lock_irqsave(&softirq_mask_lock, flags);
110 softirq_vec[nr].data = data;
111 softirq_vec[nr].action = action;
113 for (i=0; i<NR_CPUS; i++)
114 softirq_mask(i) |= (1<<nr);
115 spin_unlock_irqrestore(&softirq_mask_lock, flags);
119 /* Tasklets */
121 struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
123 static void tasklet_action(struct softirq_action *a)
125 int cpu = smp_processor_id();
126 struct tasklet_struct *list;
128 local_irq_disable();
129 list = tasklet_vec[cpu].list;
130 tasklet_vec[cpu].list = NULL;
131 local_irq_enable();
133 while (list != NULL) {
134 struct tasklet_struct *t = list;
136 list = list->next;
138 if (tasklet_trylock(t)) {
139 if (atomic_read(&t->count) == 0) {
140 clear_bit(TASKLET_STATE_SCHED, &t->state);
142 t->func(t->data);
144 * talklet_trylock() uses test_and_set_bit that imply
145 * an mb when it returns zero, thus we need the explicit
146 * mb only here: while closing the critical section.
148 #ifdef CONFIG_SMP
149 smp_mb__before_clear_bit();
150 #endif
151 tasklet_unlock(t);
152 continue;
154 tasklet_unlock(t);
156 local_irq_disable();
157 t->next = tasklet_vec[cpu].list;
158 tasklet_vec[cpu].list = t;
159 __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
160 local_irq_enable();
166 struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
168 static void tasklet_hi_action(struct softirq_action *a)
170 int cpu = smp_processor_id();
171 struct tasklet_struct *list;
173 local_irq_disable();
174 list = tasklet_hi_vec[cpu].list;
175 tasklet_hi_vec[cpu].list = NULL;
176 local_irq_enable();
178 while (list != NULL) {
179 struct tasklet_struct *t = list;
181 list = list->next;
183 if (tasklet_trylock(t)) {
184 if (atomic_read(&t->count) == 0) {
185 clear_bit(TASKLET_STATE_SCHED, &t->state);
187 t->func(t->data);
188 tasklet_unlock(t);
189 continue;
191 tasklet_unlock(t);
193 local_irq_disable();
194 t->next = tasklet_hi_vec[cpu].list;
195 tasklet_hi_vec[cpu].list = t;
196 __cpu_raise_softirq(cpu, HI_SOFTIRQ);
197 local_irq_enable();
202 void tasklet_init(struct tasklet_struct *t,
203 void (*func)(unsigned long), unsigned long data)
205 t->func = func;
206 t->data = data;
207 t->state = 0;
208 atomic_set(&t->count, 0);
211 void tasklet_kill(struct tasklet_struct *t)
213 if (in_interrupt())
214 printk("Attempt to kill tasklet from interrupt\n");
216 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
217 current->state = TASK_RUNNING;
218 do {
219 current->policy |= SCHED_YIELD;
220 schedule();
221 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
223 tasklet_unlock_wait(t);
224 clear_bit(TASKLET_STATE_SCHED, &t->state);
229 /* Old style BHs */
231 static void (*bh_base[32])(void);
232 struct tasklet_struct bh_task_vec[32];
234 /* BHs are serialized by spinlock global_bh_lock.
236 It is still possible to make synchronize_bh() as
237 spin_unlock_wait(&global_bh_lock). This operation is not used
238 by kernel now, so that this lock is not made private only
239 due to wait_on_irq().
241 It can be removed only after auditing all the BHs.
243 spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
245 static void bh_action(unsigned long nr)
247 int cpu = smp_processor_id();
249 if (!spin_trylock(&global_bh_lock))
250 goto resched;
252 if (!hardirq_trylock(cpu))
253 goto resched_unlock;
255 if (bh_base[nr])
256 bh_base[nr]();
258 hardirq_endlock(cpu);
259 spin_unlock(&global_bh_lock);
260 return;
262 resched_unlock:
263 spin_unlock(&global_bh_lock);
264 resched:
265 mark_bh(nr);
268 void init_bh(int nr, void (*routine)(void))
270 bh_base[nr] = routine;
271 mb();
274 void remove_bh(int nr)
276 tasklet_kill(bh_task_vec+nr);
277 bh_base[nr] = NULL;
280 void __init softirq_init()
282 int i;
284 for (i=0; i<32; i++)
285 tasklet_init(bh_task_vec+i, bh_action, i);
287 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
288 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);