[PATCH] raid5 BIO_UPTODATE set
[linux-2.6/history.git] / kernel / softirq.c
blob8e1ea53cc032f939b7646bfd6bfb170e23a8aa25
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
7 */
9 #include <linux/kernel_stat.h>
10 #include <linux/interrupt.h>
11 #include <linux/notifier.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
17 - No shared variables, all the data are CPU local.
18 - If a softirq needs serialization, let it serialize itself
19 by its own spinlocks.
20 - Even if softirq is serialized, only local cpu is marked for
21 execution. Hence, we get something sort of weak cpu binding.
22 Though it is still not clear, will it result in better locality
23 or will not.
25 Examples:
26 - NET RX softirq. It is multithreaded and does not require
27 any global serialization.
28 - NET TX softirq. It kicks software netdevice queues, hence
29 it is logically serialized per device, but this serialization
30 is invisible to common code.
31 - Tasklets: serialized wrt itself.
34 irq_cpustat_t irq_stat[NR_CPUS];
36 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
39 * we cannot loop indefinitely here to avoid userspace starvation,
40 * but we also don't want to introduce a worst case 1/HZ latency
41 * to the pending events, so lets the scheduler to balance
42 * the softirq load for us.
44 static inline void wakeup_softirqd(unsigned cpu)
46 struct task_struct * tsk = ksoftirqd_task(cpu);
48 if (tsk && tsk->state != TASK_RUNNING)
49 wake_up_process(tsk);
52 asmlinkage void do_softirq()
54 __u32 pending;
55 unsigned long flags;
56 __u32 mask;
57 int cpu;
59 if (in_interrupt())
60 return;
62 local_irq_save(flags);
63 cpu = smp_processor_id();
65 pending = softirq_pending(cpu);
67 if (pending) {
68 struct softirq_action *h;
70 mask = ~pending;
71 local_bh_disable();
72 restart:
73 /* Reset the pending bitmask before enabling irqs */
74 softirq_pending(cpu) = 0;
76 local_irq_enable();
78 h = softirq_vec;
80 do {
81 if (pending & 1)
82 h->action(h);
83 h++;
84 pending >>= 1;
85 } while (pending);
87 local_irq_disable();
89 pending = softirq_pending(cpu);
90 if (pending & mask) {
91 mask &= ~pending;
92 goto restart;
94 __local_bh_enable();
96 if (pending)
97 wakeup_softirqd(cpu);
100 local_irq_restore(flags);
104 * This function must run with irqs disabled!
106 inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
108 __cpu_raise_softirq(cpu, nr);
111 * If we're in an interrupt or softirq, we're done
112 * (this also catches softirq-disabled code). We will
113 * actually run the softirq once we return from
114 * the irq or softirq.
116 * Otherwise we wake up ksoftirqd to make sure we
117 * schedule the softirq soon.
119 if (!in_interrupt())
120 wakeup_softirqd(cpu);
123 void raise_softirq(unsigned int nr)
125 unsigned long flags;
127 local_irq_save(flags);
128 cpu_raise_softirq(smp_processor_id(), nr);
129 local_irq_restore(flags);
132 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
134 softirq_vec[nr].data = data;
135 softirq_vec[nr].action = action;
139 /* Tasklets */
140 struct tasklet_head
142 struct tasklet_struct *list;
145 /* Some compilers disobey section attribute on statics when not
146 initialized -- RR */
147 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
148 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
150 void __tasklet_schedule(struct tasklet_struct *t)
152 unsigned long flags;
154 local_irq_save(flags);
155 t->next = __get_cpu_var(tasklet_vec).list;
156 __get_cpu_var(tasklet_vec).list = t;
157 cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
158 local_irq_restore(flags);
161 void __tasklet_hi_schedule(struct tasklet_struct *t)
163 unsigned long flags;
165 local_irq_save(flags);
166 t->next = __get_cpu_var(tasklet_hi_vec).list;
167 __get_cpu_var(tasklet_hi_vec).list = t;
168 cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
169 local_irq_restore(flags);
172 static void tasklet_action(struct softirq_action *a)
174 struct tasklet_struct *list;
176 local_irq_disable();
177 list = __get_cpu_var(tasklet_vec).list;
178 __get_cpu_var(tasklet_vec).list = NULL;
179 local_irq_enable();
181 while (list) {
182 struct tasklet_struct *t = list;
184 list = list->next;
186 if (tasklet_trylock(t)) {
187 if (!atomic_read(&t->count)) {
188 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
189 BUG();
190 t->func(t->data);
191 tasklet_unlock(t);
192 continue;
194 tasklet_unlock(t);
197 local_irq_disable();
198 t->next = __get_cpu_var(tasklet_vec).list;
199 __get_cpu_var(tasklet_vec).list = t;
200 __cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
201 local_irq_enable();
205 static void tasklet_hi_action(struct softirq_action *a)
207 struct tasklet_struct *list;
209 local_irq_disable();
210 list = __get_cpu_var(tasklet_hi_vec).list;
211 __get_cpu_var(tasklet_hi_vec).list = NULL;
212 local_irq_enable();
214 while (list) {
215 struct tasklet_struct *t = list;
217 list = list->next;
219 if (tasklet_trylock(t)) {
220 if (!atomic_read(&t->count)) {
221 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
222 BUG();
223 t->func(t->data);
224 tasklet_unlock(t);
225 continue;
227 tasklet_unlock(t);
230 local_irq_disable();
231 t->next = __get_cpu_var(tasklet_hi_vec).list;
232 __get_cpu_var(tasklet_hi_vec).list = t;
233 __cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
234 local_irq_enable();
239 void tasklet_init(struct tasklet_struct *t,
240 void (*func)(unsigned long), unsigned long data)
242 t->next = NULL;
243 t->state = 0;
244 atomic_set(&t->count, 0);
245 t->func = func;
246 t->data = data;
249 void tasklet_kill(struct tasklet_struct *t)
251 if (in_interrupt())
252 printk("Attempt to kill tasklet from interrupt\n");
254 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
256 yield();
257 while (test_bit(TASKLET_STATE_SCHED, &t->state));
259 tasklet_unlock_wait(t);
260 clear_bit(TASKLET_STATE_SCHED, &t->state);
263 void __init softirq_init()
265 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
266 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
269 static int ksoftirqd(void * __bind_cpu)
271 int cpu = (int) (long) __bind_cpu;
273 daemonize();
274 set_user_nice(current, 19);
275 current->flags |= PF_IOTHREAD;
276 sigfillset(&current->blocked);
278 /* Migrate to the right CPU */
279 set_cpus_allowed(current, 1UL << cpu);
280 if (smp_processor_id() != cpu)
281 BUG();
283 sprintf(current->comm, "ksoftirqd_CPU%d", cpu);
285 __set_current_state(TASK_INTERRUPTIBLE);
286 mb();
288 ksoftirqd_task(cpu) = current;
290 for (;;) {
291 if (!softirq_pending(cpu))
292 schedule();
294 __set_current_state(TASK_RUNNING);
296 while (softirq_pending(cpu)) {
297 do_softirq();
298 cond_resched();
301 __set_current_state(TASK_INTERRUPTIBLE);
305 static int __devinit cpu_callback(struct notifier_block *nfb,
306 unsigned long action,
307 void *hcpu)
309 int hotcpu = (unsigned long)hcpu;
311 if (action == CPU_ONLINE) {
312 if (kernel_thread(ksoftirqd, hcpu, CLONE_KERNEL) < 0) {
313 printk("ksoftirqd for %i failed\n", hotcpu);
314 return NOTIFY_BAD;
317 while (!ksoftirqd_task(hotcpu))
318 yield();
319 return NOTIFY_OK;
321 return NOTIFY_BAD;
324 static struct notifier_block cpu_nfb = { &cpu_callback, NULL, 0 };
326 __init int spawn_ksoftirqd(void)
328 cpu_callback(&cpu_nfb, CPU_ONLINE, (void *)(long)smp_processor_id());
329 register_cpu_notifier(&cpu_nfb);
330 return 0;