ext4: Retry block allocation if new blocks are allocated from system zone.
[linux-2.6/lfs.git] / kernel / softirq.c
blob36e0617400470f398700376c7fa3359d768e7e58
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 */
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/smp.h>
23 #include <linux/tick.h>
25 #include <asm/irq.h>
27 - No shared variables, all the data are CPU local.
28 - If a softirq needs serialization, let it serialize itself
29 by its own spinlocks.
30 - Even if softirq is serialized, only local cpu is marked for
31 execution. Hence, we get something sort of weak cpu binding.
32 Though it is still not clear, will it result in better locality
33 or will not.
35 Examples:
36 - NET RX softirq. It is multithreaded and does not require
37 any global serialization.
38 - NET TX softirq. It kicks software netdevice queues, hence
39 it is logically serialized per device, but this serialization
40 is invisible to common code.
41 - Tasklets: serialized wrt itself.
44 #ifndef __ARCH_IRQ_STAT
45 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
46 EXPORT_SYMBOL(irq_stat);
47 #endif
49 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
51 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
54 * we cannot loop indefinitely here to avoid userspace starvation,
55 * but we also don't want to introduce a worst case 1/HZ latency
56 * to the pending events, so lets the scheduler to balance
57 * the softirq load for us.
59 static inline void wakeup_softirqd(void)
61 /* Interrupts are disabled: no need to stop preemption */
62 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
64 if (tsk && tsk->state != TASK_RUNNING)
65 wake_up_process(tsk);
69 * This one is for softirq.c-internal use,
70 * where hardirqs are disabled legitimately:
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 static void __local_bh_disable(unsigned long ip)
75 unsigned long flags;
77 WARN_ON_ONCE(in_irq());
79 raw_local_irq_save(flags);
80 add_preempt_count(SOFTIRQ_OFFSET);
82 * Were softirqs turned off above:
84 if (softirq_count() == SOFTIRQ_OFFSET)
85 trace_softirqs_off(ip);
86 raw_local_irq_restore(flags);
88 #else /* !CONFIG_TRACE_IRQFLAGS */
89 static inline void __local_bh_disable(unsigned long ip)
91 add_preempt_count(SOFTIRQ_OFFSET);
92 barrier();
94 #endif /* CONFIG_TRACE_IRQFLAGS */
96 void local_bh_disable(void)
98 __local_bh_disable((unsigned long)__builtin_return_address(0));
101 EXPORT_SYMBOL(local_bh_disable);
103 void __local_bh_enable(void)
105 WARN_ON_ONCE(in_irq());
108 * softirqs should never be enabled by __local_bh_enable(),
109 * it always nests inside local_bh_enable() sections:
111 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
113 sub_preempt_count(SOFTIRQ_OFFSET);
115 EXPORT_SYMBOL_GPL(__local_bh_enable);
118 * Special-case - softirqs can safely be enabled in
119 * cond_resched_softirq(), or by __do_softirq(),
120 * without processing still-pending softirqs:
122 void _local_bh_enable(void)
124 WARN_ON_ONCE(in_irq());
125 WARN_ON_ONCE(!irqs_disabled());
127 if (softirq_count() == SOFTIRQ_OFFSET)
128 trace_softirqs_on((unsigned long)__builtin_return_address(0));
129 sub_preempt_count(SOFTIRQ_OFFSET);
132 EXPORT_SYMBOL(_local_bh_enable);
134 void local_bh_enable(void)
136 #ifdef CONFIG_TRACE_IRQFLAGS
137 unsigned long flags;
139 WARN_ON_ONCE(in_irq());
140 #endif
141 WARN_ON_ONCE(irqs_disabled());
143 #ifdef CONFIG_TRACE_IRQFLAGS
144 local_irq_save(flags);
145 #endif
147 * Are softirqs going to be turned on now:
149 if (softirq_count() == SOFTIRQ_OFFSET)
150 trace_softirqs_on((unsigned long)__builtin_return_address(0));
152 * Keep preemption disabled until we are done with
153 * softirq processing:
155 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157 if (unlikely(!in_interrupt() && local_softirq_pending()))
158 do_softirq();
160 dec_preempt_count();
161 #ifdef CONFIG_TRACE_IRQFLAGS
162 local_irq_restore(flags);
163 #endif
164 preempt_check_resched();
166 EXPORT_SYMBOL(local_bh_enable);
168 void local_bh_enable_ip(unsigned long ip)
170 #ifdef CONFIG_TRACE_IRQFLAGS
171 unsigned long flags;
173 WARN_ON_ONCE(in_irq());
175 local_irq_save(flags);
176 #endif
178 * Are softirqs going to be turned on now:
180 if (softirq_count() == SOFTIRQ_OFFSET)
181 trace_softirqs_on(ip);
183 * Keep preemption disabled until we are done with
184 * softirq processing:
186 sub_preempt_count(SOFTIRQ_OFFSET - 1);
188 if (unlikely(!in_interrupt() && local_softirq_pending()))
189 do_softirq();
191 dec_preempt_count();
192 #ifdef CONFIG_TRACE_IRQFLAGS
193 local_irq_restore(flags);
194 #endif
195 preempt_check_resched();
197 EXPORT_SYMBOL(local_bh_enable_ip);
200 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
201 * and we fall back to softirqd after that.
203 * This number has been established via experimentation.
204 * The two things to balance is latency against fairness -
205 * we want to handle softirqs as soon as possible, but they
206 * should not be able to lock up the box.
208 #define MAX_SOFTIRQ_RESTART 10
210 asmlinkage void __do_softirq(void)
212 struct softirq_action *h;
213 __u32 pending;
214 int max_restart = MAX_SOFTIRQ_RESTART;
215 int cpu;
217 pending = local_softirq_pending();
218 account_system_vtime(current);
220 __local_bh_disable((unsigned long)__builtin_return_address(0));
221 trace_softirq_enter();
223 cpu = smp_processor_id();
224 restart:
225 /* Reset the pending bitmask before enabling irqs */
226 set_softirq_pending(0);
228 local_irq_enable();
230 h = softirq_vec;
232 do {
233 if (pending & 1) {
234 h->action(h);
235 rcu_bh_qsctr_inc(cpu);
237 h++;
238 pending >>= 1;
239 } while (pending);
241 local_irq_disable();
243 pending = local_softirq_pending();
244 if (pending && --max_restart)
245 goto restart;
247 if (pending)
248 wakeup_softirqd();
250 trace_softirq_exit();
252 account_system_vtime(current);
253 _local_bh_enable();
256 #ifndef __ARCH_HAS_DO_SOFTIRQ
258 asmlinkage void do_softirq(void)
260 __u32 pending;
261 unsigned long flags;
263 if (in_interrupt())
264 return;
266 local_irq_save(flags);
268 pending = local_softirq_pending();
270 if (pending)
271 __do_softirq();
273 local_irq_restore(flags);
276 #endif
279 * Enter an interrupt context.
281 void irq_enter(void)
283 #ifdef CONFIG_NO_HZ
284 int cpu = smp_processor_id();
285 if (idle_cpu(cpu) && !in_interrupt())
286 tick_nohz_stop_idle(cpu);
287 #endif
288 __irq_enter();
289 #ifdef CONFIG_NO_HZ
290 if (idle_cpu(cpu))
291 tick_nohz_update_jiffies();
292 #endif
295 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
296 # define invoke_softirq() __do_softirq()
297 #else
298 # define invoke_softirq() do_softirq()
299 #endif
302 * Exit an interrupt context. Process softirqs if needed and possible:
304 void irq_exit(void)
306 account_system_vtime(current);
307 trace_hardirq_exit();
308 sub_preempt_count(IRQ_EXIT_OFFSET);
309 if (!in_interrupt() && local_softirq_pending())
310 invoke_softirq();
312 #ifdef CONFIG_NO_HZ
313 /* Make sure that timer wheel updates are propagated */
314 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
315 tick_nohz_stop_sched_tick();
316 rcu_irq_exit();
317 #endif
318 preempt_enable_no_resched();
322 * This function must run with irqs disabled!
324 inline void raise_softirq_irqoff(unsigned int nr)
326 __raise_softirq_irqoff(nr);
329 * If we're in an interrupt or softirq, we're done
330 * (this also catches softirq-disabled code). We will
331 * actually run the softirq once we return from
332 * the irq or softirq.
334 * Otherwise we wake up ksoftirqd to make sure we
335 * schedule the softirq soon.
337 if (!in_interrupt())
338 wakeup_softirqd();
341 void raise_softirq(unsigned int nr)
343 unsigned long flags;
345 local_irq_save(flags);
346 raise_softirq_irqoff(nr);
347 local_irq_restore(flags);
350 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
352 softirq_vec[nr].data = data;
353 softirq_vec[nr].action = action;
356 /* Tasklets */
357 struct tasklet_head
359 struct tasklet_struct *head;
360 struct tasklet_struct **tail;
363 /* Some compilers disobey section attribute on statics when not
364 initialized -- RR */
365 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
366 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
368 void __tasklet_schedule(struct tasklet_struct *t)
370 unsigned long flags;
372 local_irq_save(flags);
373 t->next = NULL;
374 *__get_cpu_var(tasklet_vec).tail = t;
375 __get_cpu_var(tasklet_vec).tail = &(t->next);
376 raise_softirq_irqoff(TASKLET_SOFTIRQ);
377 local_irq_restore(flags);
380 EXPORT_SYMBOL(__tasklet_schedule);
382 void __tasklet_hi_schedule(struct tasklet_struct *t)
384 unsigned long flags;
386 local_irq_save(flags);
387 t->next = NULL;
388 *__get_cpu_var(tasklet_hi_vec).tail = t;
389 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
390 raise_softirq_irqoff(HI_SOFTIRQ);
391 local_irq_restore(flags);
394 EXPORT_SYMBOL(__tasklet_hi_schedule);
396 static void tasklet_action(struct softirq_action *a)
398 struct tasklet_struct *list;
400 local_irq_disable();
401 list = __get_cpu_var(tasklet_vec).head;
402 __get_cpu_var(tasklet_vec).head = NULL;
403 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
404 local_irq_enable();
406 while (list) {
407 struct tasklet_struct *t = list;
409 list = list->next;
411 if (tasklet_trylock(t)) {
412 if (!atomic_read(&t->count)) {
413 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
414 BUG();
415 t->func(t->data);
416 tasklet_unlock(t);
417 continue;
419 tasklet_unlock(t);
422 local_irq_disable();
423 t->next = NULL;
424 *__get_cpu_var(tasklet_vec).tail = t;
425 __get_cpu_var(tasklet_vec).tail = &(t->next);
426 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
427 local_irq_enable();
431 static void tasklet_hi_action(struct softirq_action *a)
433 struct tasklet_struct *list;
435 local_irq_disable();
436 list = __get_cpu_var(tasklet_hi_vec).head;
437 __get_cpu_var(tasklet_hi_vec).head = NULL;
438 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
439 local_irq_enable();
441 while (list) {
442 struct tasklet_struct *t = list;
444 list = list->next;
446 if (tasklet_trylock(t)) {
447 if (!atomic_read(&t->count)) {
448 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
449 BUG();
450 t->func(t->data);
451 tasklet_unlock(t);
452 continue;
454 tasklet_unlock(t);
457 local_irq_disable();
458 t->next = NULL;
459 *__get_cpu_var(tasklet_hi_vec).tail = t;
460 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
461 __raise_softirq_irqoff(HI_SOFTIRQ);
462 local_irq_enable();
467 void tasklet_init(struct tasklet_struct *t,
468 void (*func)(unsigned long), unsigned long data)
470 t->next = NULL;
471 t->state = 0;
472 atomic_set(&t->count, 0);
473 t->func = func;
474 t->data = data;
477 EXPORT_SYMBOL(tasklet_init);
479 void tasklet_kill(struct tasklet_struct *t)
481 if (in_interrupt())
482 printk("Attempt to kill tasklet from interrupt\n");
484 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
486 yield();
487 while (test_bit(TASKLET_STATE_SCHED, &t->state));
489 tasklet_unlock_wait(t);
490 clear_bit(TASKLET_STATE_SCHED, &t->state);
493 EXPORT_SYMBOL(tasklet_kill);
495 void __init softirq_init(void)
497 int cpu;
499 for_each_possible_cpu(cpu) {
500 per_cpu(tasklet_vec, cpu).tail =
501 &per_cpu(tasklet_vec, cpu).head;
502 per_cpu(tasklet_hi_vec, cpu).tail =
503 &per_cpu(tasklet_hi_vec, cpu).head;
506 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
507 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
510 static int ksoftirqd(void * __bind_cpu)
512 set_current_state(TASK_INTERRUPTIBLE);
514 while (!kthread_should_stop()) {
515 preempt_disable();
516 if (!local_softirq_pending()) {
517 preempt_enable_no_resched();
518 schedule();
519 preempt_disable();
522 __set_current_state(TASK_RUNNING);
524 while (local_softirq_pending()) {
525 /* Preempt disable stops cpu going offline.
526 If already offline, we'll be on wrong CPU:
527 don't process */
528 if (cpu_is_offline((long)__bind_cpu))
529 goto wait_to_die;
530 do_softirq();
531 preempt_enable_no_resched();
532 cond_resched();
533 preempt_disable();
535 preempt_enable();
536 set_current_state(TASK_INTERRUPTIBLE);
538 __set_current_state(TASK_RUNNING);
539 return 0;
541 wait_to_die:
542 preempt_enable();
543 /* Wait for kthread_stop */
544 set_current_state(TASK_INTERRUPTIBLE);
545 while (!kthread_should_stop()) {
546 schedule();
547 set_current_state(TASK_INTERRUPTIBLE);
549 __set_current_state(TASK_RUNNING);
550 return 0;
553 #ifdef CONFIG_HOTPLUG_CPU
555 * tasklet_kill_immediate is called to remove a tasklet which can already be
556 * scheduled for execution on @cpu.
558 * Unlike tasklet_kill, this function removes the tasklet
559 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
561 * When this function is called, @cpu must be in the CPU_DEAD state.
563 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
565 struct tasklet_struct **i;
567 BUG_ON(cpu_online(cpu));
568 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
570 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
571 return;
573 /* CPU is dead, so no lock needed. */
574 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
575 if (*i == t) {
576 *i = t->next;
577 /* If this was the tail element, move the tail ptr */
578 if (*i == NULL)
579 per_cpu(tasklet_vec, cpu).tail = i;
580 return;
583 BUG();
586 static void takeover_tasklets(unsigned int cpu)
588 /* CPU is dead, so no lock needed. */
589 local_irq_disable();
591 /* Find end, append list for that CPU. */
592 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
593 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
594 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
595 per_cpu(tasklet_vec, cpu).head = NULL;
596 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
598 raise_softirq_irqoff(TASKLET_SOFTIRQ);
600 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
601 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
602 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
603 per_cpu(tasklet_hi_vec, cpu).head = NULL;
604 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
606 raise_softirq_irqoff(HI_SOFTIRQ);
608 local_irq_enable();
610 #endif /* CONFIG_HOTPLUG_CPU */
612 static int __cpuinit cpu_callback(struct notifier_block *nfb,
613 unsigned long action,
614 void *hcpu)
616 int hotcpu = (unsigned long)hcpu;
617 struct task_struct *p;
619 switch (action) {
620 case CPU_UP_PREPARE:
621 case CPU_UP_PREPARE_FROZEN:
622 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
623 if (IS_ERR(p)) {
624 printk("ksoftirqd for %i failed\n", hotcpu);
625 return NOTIFY_BAD;
627 kthread_bind(p, hotcpu);
628 per_cpu(ksoftirqd, hotcpu) = p;
629 break;
630 case CPU_ONLINE:
631 case CPU_ONLINE_FROZEN:
632 wake_up_process(per_cpu(ksoftirqd, hotcpu));
633 break;
634 #ifdef CONFIG_HOTPLUG_CPU
635 case CPU_UP_CANCELED:
636 case CPU_UP_CANCELED_FROZEN:
637 if (!per_cpu(ksoftirqd, hotcpu))
638 break;
639 /* Unbind so it can run. Fall thru. */
640 kthread_bind(per_cpu(ksoftirqd, hotcpu),
641 any_online_cpu(cpu_online_map));
642 case CPU_DEAD:
643 case CPU_DEAD_FROZEN: {
644 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
646 p = per_cpu(ksoftirqd, hotcpu);
647 per_cpu(ksoftirqd, hotcpu) = NULL;
648 sched_setscheduler(p, SCHED_FIFO, &param);
649 kthread_stop(p);
650 takeover_tasklets(hotcpu);
651 break;
653 #endif /* CONFIG_HOTPLUG_CPU */
655 return NOTIFY_OK;
658 static struct notifier_block __cpuinitdata cpu_nfb = {
659 .notifier_call = cpu_callback
662 __init int spawn_ksoftirqd(void)
664 void *cpu = (void *)(long)smp_processor_id();
665 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
667 BUG_ON(err == NOTIFY_BAD);
668 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
669 register_cpu_notifier(&cpu_nfb);
670 return 0;
673 #ifdef CONFIG_SMP
675 * Call a function on all processors
677 int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
679 int ret = 0;
681 preempt_disable();
682 ret = smp_call_function(func, info, retry, wait);
683 local_irq_disable();
684 func(info);
685 local_irq_enable();
686 preempt_enable();
687 return ret;
689 EXPORT_SYMBOL(on_each_cpu);
690 #endif