Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / softirq.c
blob336f92d64e2ec04bb56d7146a4fc1e1eeb1141f5
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
7 */
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
19 #include <linux/smp.h>
21 #include <asm/irq.h>
23 - No shared variables, all the data are CPU local.
24 - If a softirq needs serialization, let it serialize itself
25 by its own spinlocks.
26 - Even if softirq is serialized, only local cpu is marked for
27 execution. Hence, we get something sort of weak cpu binding.
28 Though it is still not clear, will it result in better locality
29 or will not.
31 Examples:
32 - NET RX softirq. It is multithreaded and does not require
33 any global serialization.
34 - NET TX softirq. It kicks software netdevice queues, hence
35 it is logically serialized per device, but this serialization
36 is invisible to common code.
37 - Tasklets: serialized wrt itself.
40 #ifndef __ARCH_IRQ_STAT
41 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
42 EXPORT_SYMBOL(irq_stat);
43 #endif
45 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
47 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
50 * we cannot loop indefinitely here to avoid userspace starvation,
51 * but we also don't want to introduce a worst case 1/HZ latency
52 * to the pending events, so lets the scheduler to balance
53 * the softirq load for us.
55 static inline void wakeup_softirqd(void)
57 /* Interrupts are disabled: no need to stop preemption */
58 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
60 if (tsk && tsk->state != TASK_RUNNING)
61 wake_up_process(tsk);
65 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
66 * and we fall back to softirqd after that.
68 * This number has been established via experimentation.
69 * The two things to balance is latency against fairness -
70 * we want to handle softirqs as soon as possible, but they
71 * should not be able to lock up the box.
73 #define MAX_SOFTIRQ_RESTART 10
75 asmlinkage void __do_softirq(void)
77 struct softirq_action *h;
78 __u32 pending;
79 int max_restart = MAX_SOFTIRQ_RESTART;
80 int cpu;
82 pending = local_softirq_pending();
84 local_bh_disable();
85 cpu = smp_processor_id();
86 restart:
87 /* Reset the pending bitmask before enabling irqs */
88 set_softirq_pending(0);
90 local_irq_enable();
92 h = softirq_vec;
94 do {
95 if (pending & 1) {
96 h->action(h);
97 rcu_bh_qsctr_inc(cpu);
99 h++;
100 pending >>= 1;
101 } while (pending);
103 local_irq_disable();
105 pending = local_softirq_pending();
106 if (pending && --max_restart)
107 goto restart;
109 if (pending)
110 wakeup_softirqd();
112 __local_bh_enable();
115 #ifndef __ARCH_HAS_DO_SOFTIRQ
117 asmlinkage void do_softirq(void)
119 __u32 pending;
120 unsigned long flags;
122 if (in_interrupt())
123 return;
125 local_irq_save(flags);
127 pending = local_softirq_pending();
129 if (pending)
130 __do_softirq();
132 local_irq_restore(flags);
135 EXPORT_SYMBOL(do_softirq);
137 #endif
139 void local_bh_enable(void)
141 WARN_ON(irqs_disabled());
143 * Keep preemption disabled until we are done with
144 * softirq processing:
146 sub_preempt_count(SOFTIRQ_OFFSET - 1);
148 if (unlikely(!in_interrupt() && local_softirq_pending()))
149 do_softirq();
151 dec_preempt_count();
152 preempt_check_resched();
154 EXPORT_SYMBOL(local_bh_enable);
156 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
157 # define invoke_softirq() __do_softirq()
158 #else
159 # define invoke_softirq() do_softirq()
160 #endif
163 * Exit an interrupt context. Process softirqs if needed and possible:
165 void irq_exit(void)
167 account_system_vtime(current);
168 sub_preempt_count(IRQ_EXIT_OFFSET);
169 if (!in_interrupt() && local_softirq_pending())
170 invoke_softirq();
171 preempt_enable_no_resched();
175 * This function must run with irqs disabled!
177 inline fastcall void raise_softirq_irqoff(unsigned int nr)
179 __raise_softirq_irqoff(nr);
182 * If we're in an interrupt or softirq, we're done
183 * (this also catches softirq-disabled code). We will
184 * actually run the softirq once we return from
185 * the irq or softirq.
187 * Otherwise we wake up ksoftirqd to make sure we
188 * schedule the softirq soon.
190 if (!in_interrupt())
191 wakeup_softirqd();
194 EXPORT_SYMBOL(raise_softirq_irqoff);
196 void fastcall raise_softirq(unsigned int nr)
198 unsigned long flags;
200 local_irq_save(flags);
201 raise_softirq_irqoff(nr);
202 local_irq_restore(flags);
205 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
207 softirq_vec[nr].data = data;
208 softirq_vec[nr].action = action;
211 EXPORT_SYMBOL(open_softirq);
213 /* Tasklets */
214 struct tasklet_head
216 struct tasklet_struct *list;
219 /* Some compilers disobey section attribute on statics when not
220 initialized -- RR */
221 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
222 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
224 void fastcall __tasklet_schedule(struct tasklet_struct *t)
226 unsigned long flags;
228 local_irq_save(flags);
229 t->next = __get_cpu_var(tasklet_vec).list;
230 __get_cpu_var(tasklet_vec).list = t;
231 raise_softirq_irqoff(TASKLET_SOFTIRQ);
232 local_irq_restore(flags);
235 EXPORT_SYMBOL(__tasklet_schedule);
237 void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
239 unsigned long flags;
241 local_irq_save(flags);
242 t->next = __get_cpu_var(tasklet_hi_vec).list;
243 __get_cpu_var(tasklet_hi_vec).list = t;
244 raise_softirq_irqoff(HI_SOFTIRQ);
245 local_irq_restore(flags);
248 EXPORT_SYMBOL(__tasklet_hi_schedule);
250 static void tasklet_action(struct softirq_action *a)
252 struct tasklet_struct *list;
254 local_irq_disable();
255 list = __get_cpu_var(tasklet_vec).list;
256 __get_cpu_var(tasklet_vec).list = NULL;
257 local_irq_enable();
259 while (list) {
260 struct tasklet_struct *t = list;
262 list = list->next;
264 if (tasklet_trylock(t)) {
265 if (!atomic_read(&t->count)) {
266 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
267 BUG();
268 t->func(t->data);
269 tasklet_unlock(t);
270 continue;
272 tasklet_unlock(t);
275 local_irq_disable();
276 t->next = __get_cpu_var(tasklet_vec).list;
277 __get_cpu_var(tasklet_vec).list = t;
278 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
279 local_irq_enable();
283 static void tasklet_hi_action(struct softirq_action *a)
285 struct tasklet_struct *list;
287 local_irq_disable();
288 list = __get_cpu_var(tasklet_hi_vec).list;
289 __get_cpu_var(tasklet_hi_vec).list = NULL;
290 local_irq_enable();
292 while (list) {
293 struct tasklet_struct *t = list;
295 list = list->next;
297 if (tasklet_trylock(t)) {
298 if (!atomic_read(&t->count)) {
299 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
300 BUG();
301 t->func(t->data);
302 tasklet_unlock(t);
303 continue;
305 tasklet_unlock(t);
308 local_irq_disable();
309 t->next = __get_cpu_var(tasklet_hi_vec).list;
310 __get_cpu_var(tasklet_hi_vec).list = t;
311 __raise_softirq_irqoff(HI_SOFTIRQ);
312 local_irq_enable();
317 void tasklet_init(struct tasklet_struct *t,
318 void (*func)(unsigned long), unsigned long data)
320 t->next = NULL;
321 t->state = 0;
322 atomic_set(&t->count, 0);
323 t->func = func;
324 t->data = data;
327 EXPORT_SYMBOL(tasklet_init);
329 void tasklet_kill(struct tasklet_struct *t)
331 if (in_interrupt())
332 printk("Attempt to kill tasklet from interrupt\n");
334 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
336 yield();
337 while (test_bit(TASKLET_STATE_SCHED, &t->state));
339 tasklet_unlock_wait(t);
340 clear_bit(TASKLET_STATE_SCHED, &t->state);
343 EXPORT_SYMBOL(tasklet_kill);
345 void __init softirq_init(void)
347 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
348 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
351 static int ksoftirqd(void * __bind_cpu)
353 set_user_nice(current, 19);
354 current->flags |= PF_NOFREEZE;
356 set_current_state(TASK_INTERRUPTIBLE);
358 while (!kthread_should_stop()) {
359 preempt_disable();
360 if (!local_softirq_pending()) {
361 preempt_enable_no_resched();
362 schedule();
363 preempt_disable();
366 __set_current_state(TASK_RUNNING);
368 while (local_softirq_pending()) {
369 /* Preempt disable stops cpu going offline.
370 If already offline, we'll be on wrong CPU:
371 don't process */
372 if (cpu_is_offline((long)__bind_cpu))
373 goto wait_to_die;
374 do_softirq();
375 preempt_enable_no_resched();
376 cond_resched();
377 preempt_disable();
379 preempt_enable();
380 set_current_state(TASK_INTERRUPTIBLE);
382 __set_current_state(TASK_RUNNING);
383 return 0;
385 wait_to_die:
386 preempt_enable();
387 /* Wait for kthread_stop */
388 set_current_state(TASK_INTERRUPTIBLE);
389 while (!kthread_should_stop()) {
390 schedule();
391 set_current_state(TASK_INTERRUPTIBLE);
393 __set_current_state(TASK_RUNNING);
394 return 0;
397 #ifdef CONFIG_HOTPLUG_CPU
399 * tasklet_kill_immediate is called to remove a tasklet which can already be
400 * scheduled for execution on @cpu.
402 * Unlike tasklet_kill, this function removes the tasklet
403 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
405 * When this function is called, @cpu must be in the CPU_DEAD state.
407 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
409 struct tasklet_struct **i;
411 BUG_ON(cpu_online(cpu));
412 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
414 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
415 return;
417 /* CPU is dead, so no lock needed. */
418 for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
419 if (*i == t) {
420 *i = t->next;
421 return;
424 BUG();
427 static void takeover_tasklets(unsigned int cpu)
429 struct tasklet_struct **i;
431 /* CPU is dead, so no lock needed. */
432 local_irq_disable();
434 /* Find end, append list for that CPU. */
435 for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
436 *i = per_cpu(tasklet_vec, cpu).list;
437 per_cpu(tasklet_vec, cpu).list = NULL;
438 raise_softirq_irqoff(TASKLET_SOFTIRQ);
440 for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
441 *i = per_cpu(tasklet_hi_vec, cpu).list;
442 per_cpu(tasklet_hi_vec, cpu).list = NULL;
443 raise_softirq_irqoff(HI_SOFTIRQ);
445 local_irq_enable();
447 #endif /* CONFIG_HOTPLUG_CPU */
449 static int cpu_callback(struct notifier_block *nfb,
450 unsigned long action,
451 void *hcpu)
453 int hotcpu = (unsigned long)hcpu;
454 struct task_struct *p;
456 switch (action) {
457 case CPU_UP_PREPARE:
458 BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
459 BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
460 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
461 if (IS_ERR(p)) {
462 printk("ksoftirqd for %i failed\n", hotcpu);
463 return NOTIFY_BAD;
465 kthread_bind(p, hotcpu);
466 per_cpu(ksoftirqd, hotcpu) = p;
467 break;
468 case CPU_ONLINE:
469 wake_up_process(per_cpu(ksoftirqd, hotcpu));
470 break;
471 #ifdef CONFIG_HOTPLUG_CPU
472 case CPU_UP_CANCELED:
473 /* Unbind so it can run. Fall thru. */
474 kthread_bind(per_cpu(ksoftirqd, hotcpu),
475 any_online_cpu(cpu_online_map));
476 case CPU_DEAD:
477 p = per_cpu(ksoftirqd, hotcpu);
478 per_cpu(ksoftirqd, hotcpu) = NULL;
479 kthread_stop(p);
480 takeover_tasklets(hotcpu);
481 break;
482 #endif /* CONFIG_HOTPLUG_CPU */
484 return NOTIFY_OK;
487 static struct notifier_block cpu_nfb = {
488 .notifier_call = cpu_callback
491 __init int spawn_ksoftirqd(void)
493 void *cpu = (void *)(long)smp_processor_id();
494 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
495 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
496 register_cpu_notifier(&cpu_nfb);
497 return 0;
500 #ifdef CONFIG_SMP
502 * Call a function on all processors
504 int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
506 int ret = 0;
508 preempt_disable();
509 ret = smp_call_function(func, info, retry, wait);
510 local_irq_disable();
511 func(info);
512 local_irq_enable();
513 preempt_enable();
514 return ret;
516 EXPORT_SYMBOL(on_each_cpu);
517 #endif