xen: convert to 64 bit stats interface
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / softirq.c
blob40cf63ddd4b3d740d2620ddbf1fa245830b1d703
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
31 #include <asm/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
53 #endif
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 static void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
82 * softirq processing.
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
96 unsigned long flags;
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
122 add_preempt_count(cnt);
123 barrier();
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
133 EXPORT_SYMBOL(local_bh_disable);
135 static void __local_bh_enable(unsigned int cnt)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
155 EXPORT_SYMBOL(_local_bh_enable);
157 static inline void _local_bh_enable_ip(unsigned long ip)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
161 local_irq_disable();
162 #endif
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
175 do_softirq();
177 dec_preempt_count();
178 #ifdef CONFIG_TRACE_IRQFLAGS
179 local_irq_enable();
180 #endif
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable);
190 void local_bh_enable_ip(unsigned long ip)
192 _local_bh_enable_ip(ip);
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage void __do_softirq(void)
209 struct softirq_action *h;
210 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu;
214 pending = local_softirq_pending();
215 account_system_vtime(current);
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
218 SOFTIRQ_OFFSET);
219 lockdep_softirq_enter();
221 cpu = smp_processor_id();
222 restart:
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
226 local_irq_enable();
228 h = softirq_vec;
230 do {
231 if (pending & 1) {
232 unsigned int vec_nr = h - softirq_vec;
233 int prev_count = preempt_count();
235 kstat_incr_softirqs_this_cpu(vec_nr);
237 trace_softirq_entry(vec_nr);
238 h->action(h);
239 trace_softirq_exit(vec_nr);
240 if (unlikely(prev_count != preempt_count())) {
241 printk(KERN_ERR "huh, entered softirq %u %s %p"
242 "with preempt_count %08x,"
243 " exited with %08x?\n", vec_nr,
244 softirq_to_name[vec_nr], h->action,
245 prev_count, preempt_count());
246 preempt_count() = prev_count;
249 rcu_bh_qs(cpu);
251 h++;
252 pending >>= 1;
253 } while (pending);
255 local_irq_disable();
257 pending = local_softirq_pending();
258 if (pending && --max_restart)
259 goto restart;
261 if (pending)
262 wakeup_softirqd();
264 lockdep_softirq_exit();
266 account_system_vtime(current);
267 __local_bh_enable(SOFTIRQ_OFFSET);
270 #ifndef __ARCH_HAS_DO_SOFTIRQ
272 asmlinkage void do_softirq(void)
274 __u32 pending;
275 unsigned long flags;
277 if (in_interrupt())
278 return;
280 local_irq_save(flags);
282 pending = local_softirq_pending();
284 if (pending)
285 __do_softirq();
287 local_irq_restore(flags);
290 #endif
293 * Enter an interrupt context.
295 void irq_enter(void)
297 int cpu = smp_processor_id();
299 rcu_irq_enter();
300 if (idle_cpu(cpu) && !in_interrupt()) {
302 * Prevent raise_softirq from needlessly waking up ksoftirqd
303 * here, as softirq will be serviced on return from interrupt.
305 local_bh_disable();
306 tick_check_idle(cpu);
307 _local_bh_enable();
310 __irq_enter();
313 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
314 static inline void invoke_softirq(void)
316 if (!force_irqthreads)
317 __do_softirq();
318 else
319 wakeup_softirqd();
321 #else
322 static inline void invoke_softirq(void)
324 if (!force_irqthreads)
325 do_softirq();
326 else
327 wakeup_softirqd();
329 #endif
332 * Exit an interrupt context. Process softirqs if needed and possible:
334 void irq_exit(void)
336 account_system_vtime(current);
337 trace_hardirq_exit();
338 sub_preempt_count(IRQ_EXIT_OFFSET);
339 if (!in_interrupt() && local_softirq_pending())
340 invoke_softirq();
342 rcu_irq_exit();
343 #ifdef CONFIG_NO_HZ
344 /* Make sure that timer wheel updates are propagated */
345 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
346 tick_nohz_stop_sched_tick(0);
347 #endif
348 preempt_enable_no_resched();
352 * This function must run with irqs disabled!
354 inline void raise_softirq_irqoff(unsigned int nr)
356 __raise_softirq_irqoff(nr);
359 * If we're in an interrupt or softirq, we're done
360 * (this also catches softirq-disabled code). We will
361 * actually run the softirq once we return from
362 * the irq or softirq.
364 * Otherwise we wake up ksoftirqd to make sure we
365 * schedule the softirq soon.
367 if (!in_interrupt())
368 wakeup_softirqd();
371 void raise_softirq(unsigned int nr)
373 unsigned long flags;
375 local_irq_save(flags);
376 raise_softirq_irqoff(nr);
377 local_irq_restore(flags);
380 void open_softirq(int nr, void (*action)(struct softirq_action *))
382 softirq_vec[nr].action = action;
386 * Tasklets
388 struct tasklet_head
390 struct tasklet_struct *head;
391 struct tasklet_struct **tail;
394 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
395 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
397 void __tasklet_schedule(struct tasklet_struct *t)
399 unsigned long flags;
401 local_irq_save(flags);
402 t->next = NULL;
403 *__this_cpu_read(tasklet_vec.tail) = t;
404 __this_cpu_write(tasklet_vec.tail, &(t->next));
405 raise_softirq_irqoff(TASKLET_SOFTIRQ);
406 local_irq_restore(flags);
409 EXPORT_SYMBOL(__tasklet_schedule);
411 void __tasklet_hi_schedule(struct tasklet_struct *t)
413 unsigned long flags;
415 local_irq_save(flags);
416 t->next = NULL;
417 *__this_cpu_read(tasklet_hi_vec.tail) = t;
418 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
419 raise_softirq_irqoff(HI_SOFTIRQ);
420 local_irq_restore(flags);
423 EXPORT_SYMBOL(__tasklet_hi_schedule);
425 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
427 BUG_ON(!irqs_disabled());
429 t->next = __this_cpu_read(tasklet_hi_vec.head);
430 __this_cpu_write(tasklet_hi_vec.head, t);
431 __raise_softirq_irqoff(HI_SOFTIRQ);
434 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
436 static void tasklet_action(struct softirq_action *a)
438 struct tasklet_struct *list;
440 local_irq_disable();
441 list = __this_cpu_read(tasklet_vec.head);
442 __this_cpu_write(tasklet_vec.head, NULL);
443 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
444 local_irq_enable();
446 while (list) {
447 struct tasklet_struct *t = list;
449 list = list->next;
451 if (tasklet_trylock(t)) {
452 if (!atomic_read(&t->count)) {
453 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
454 BUG();
455 t->func(t->data);
456 tasklet_unlock(t);
457 continue;
459 tasklet_unlock(t);
462 local_irq_disable();
463 t->next = NULL;
464 *__this_cpu_read(tasklet_vec.tail) = t;
465 __this_cpu_write(tasklet_vec.tail, &(t->next));
466 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
467 local_irq_enable();
471 static void tasklet_hi_action(struct softirq_action *a)
473 struct tasklet_struct *list;
475 local_irq_disable();
476 list = __this_cpu_read(tasklet_hi_vec.head);
477 __this_cpu_write(tasklet_hi_vec.head, NULL);
478 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
479 local_irq_enable();
481 while (list) {
482 struct tasklet_struct *t = list;
484 list = list->next;
486 if (tasklet_trylock(t)) {
487 if (!atomic_read(&t->count)) {
488 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
489 BUG();
490 t->func(t->data);
491 tasklet_unlock(t);
492 continue;
494 tasklet_unlock(t);
497 local_irq_disable();
498 t->next = NULL;
499 *__this_cpu_read(tasklet_hi_vec.tail) = t;
500 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
501 __raise_softirq_irqoff(HI_SOFTIRQ);
502 local_irq_enable();
507 void tasklet_init(struct tasklet_struct *t,
508 void (*func)(unsigned long), unsigned long data)
510 t->next = NULL;
511 t->state = 0;
512 atomic_set(&t->count, 0);
513 t->func = func;
514 t->data = data;
517 EXPORT_SYMBOL(tasklet_init);
519 void tasklet_kill(struct tasklet_struct *t)
521 if (in_interrupt())
522 printk("Attempt to kill tasklet from interrupt\n");
524 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
525 do {
526 yield();
527 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
529 tasklet_unlock_wait(t);
530 clear_bit(TASKLET_STATE_SCHED, &t->state);
533 EXPORT_SYMBOL(tasklet_kill);
536 * tasklet_hrtimer
540 * The trampoline is called when the hrtimer expires. It schedules a tasklet
541 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
542 * hrtimer callback, but from softirq context.
544 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
546 struct tasklet_hrtimer *ttimer =
547 container_of(timer, struct tasklet_hrtimer, timer);
549 tasklet_hi_schedule(&ttimer->tasklet);
550 return HRTIMER_NORESTART;
554 * Helper function which calls the hrtimer callback from
555 * tasklet/softirq context
557 static void __tasklet_hrtimer_trampoline(unsigned long data)
559 struct tasklet_hrtimer *ttimer = (void *)data;
560 enum hrtimer_restart restart;
562 restart = ttimer->function(&ttimer->timer);
563 if (restart != HRTIMER_NORESTART)
564 hrtimer_restart(&ttimer->timer);
568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
569 * @ttimer: tasklet_hrtimer which is initialized
570 * @function: hrtimer callback function which gets called from softirq context
571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
574 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
575 enum hrtimer_restart (*function)(struct hrtimer *),
576 clockid_t which_clock, enum hrtimer_mode mode)
578 hrtimer_init(&ttimer->timer, which_clock, mode);
579 ttimer->timer.function = __hrtimer_tasklet_trampoline;
580 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
581 (unsigned long)ttimer);
582 ttimer->function = function;
584 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
587 * Remote softirq bits
590 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
591 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
593 static void __local_trigger(struct call_single_data *cp, int softirq)
595 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
597 list_add_tail(&cp->list, head);
599 /* Trigger the softirq only if the list was previously empty. */
600 if (head->next == &cp->list)
601 raise_softirq_irqoff(softirq);
604 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
605 static void remote_softirq_receive(void *data)
607 struct call_single_data *cp = data;
608 unsigned long flags;
609 int softirq;
611 softirq = cp->priv;
613 local_irq_save(flags);
614 __local_trigger(cp, softirq);
615 local_irq_restore(flags);
618 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
620 if (cpu_online(cpu)) {
621 cp->func = remote_softirq_receive;
622 cp->info = cp;
623 cp->flags = 0;
624 cp->priv = softirq;
626 __smp_call_function_single(cpu, cp, 0);
627 return 0;
629 return 1;
631 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
632 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
634 return 1;
636 #endif
639 * __send_remote_softirq - try to schedule softirq work on a remote cpu
640 * @cp: private SMP call function data area
641 * @cpu: the remote cpu
642 * @this_cpu: the currently executing cpu
643 * @softirq: the softirq for the work
645 * Attempt to schedule softirq work on a remote cpu. If this cannot be
646 * done, the work is instead queued up on the local cpu.
648 * Interrupts must be disabled.
650 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
652 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
653 __local_trigger(cp, softirq);
655 EXPORT_SYMBOL(__send_remote_softirq);
658 * send_remote_softirq - try to schedule softirq work on a remote cpu
659 * @cp: private SMP call function data area
660 * @cpu: the remote cpu
661 * @softirq: the softirq for the work
663 * Like __send_remote_softirq except that disabling interrupts and
664 * computing the current cpu is done for the caller.
666 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
668 unsigned long flags;
669 int this_cpu;
671 local_irq_save(flags);
672 this_cpu = smp_processor_id();
673 __send_remote_softirq(cp, cpu, this_cpu, softirq);
674 local_irq_restore(flags);
676 EXPORT_SYMBOL(send_remote_softirq);
678 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
679 unsigned long action, void *hcpu)
682 * If a CPU goes away, splice its entries to the current CPU
683 * and trigger a run of the softirq
685 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
686 int cpu = (unsigned long) hcpu;
687 int i;
689 local_irq_disable();
690 for (i = 0; i < NR_SOFTIRQS; i++) {
691 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
692 struct list_head *local_head;
694 if (list_empty(head))
695 continue;
697 local_head = &__get_cpu_var(softirq_work_list[i]);
698 list_splice_init(head, local_head);
699 raise_softirq_irqoff(i);
701 local_irq_enable();
704 return NOTIFY_OK;
707 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
708 .notifier_call = remote_softirq_cpu_notify,
711 void __init softirq_init(void)
713 int cpu;
715 for_each_possible_cpu(cpu) {
716 int i;
718 per_cpu(tasklet_vec, cpu).tail =
719 &per_cpu(tasklet_vec, cpu).head;
720 per_cpu(tasklet_hi_vec, cpu).tail =
721 &per_cpu(tasklet_hi_vec, cpu).head;
722 for (i = 0; i < NR_SOFTIRQS; i++)
723 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
726 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
728 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
729 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
732 static int run_ksoftirqd(void * __bind_cpu)
734 set_current_state(TASK_INTERRUPTIBLE);
736 while (!kthread_should_stop()) {
737 preempt_disable();
738 if (!local_softirq_pending()) {
739 preempt_enable_no_resched();
740 schedule();
741 preempt_disable();
744 __set_current_state(TASK_RUNNING);
746 while (local_softirq_pending()) {
747 /* Preempt disable stops cpu going offline.
748 If already offline, we'll be on wrong CPU:
749 don't process */
750 if (cpu_is_offline((long)__bind_cpu))
751 goto wait_to_die;
752 local_irq_disable();
753 if (local_softirq_pending())
754 __do_softirq();
755 local_irq_enable();
756 preempt_enable_no_resched();
757 cond_resched();
758 preempt_disable();
759 rcu_note_context_switch((long)__bind_cpu);
761 preempt_enable();
762 set_current_state(TASK_INTERRUPTIBLE);
764 __set_current_state(TASK_RUNNING);
765 return 0;
767 wait_to_die:
768 preempt_enable();
769 /* Wait for kthread_stop */
770 set_current_state(TASK_INTERRUPTIBLE);
771 while (!kthread_should_stop()) {
772 schedule();
773 set_current_state(TASK_INTERRUPTIBLE);
775 __set_current_state(TASK_RUNNING);
776 return 0;
779 #ifdef CONFIG_HOTPLUG_CPU
781 * tasklet_kill_immediate is called to remove a tasklet which can already be
782 * scheduled for execution on @cpu.
784 * Unlike tasklet_kill, this function removes the tasklet
785 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
787 * When this function is called, @cpu must be in the CPU_DEAD state.
789 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
791 struct tasklet_struct **i;
793 BUG_ON(cpu_online(cpu));
794 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
796 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
797 return;
799 /* CPU is dead, so no lock needed. */
800 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
801 if (*i == t) {
802 *i = t->next;
803 /* If this was the tail element, move the tail ptr */
804 if (*i == NULL)
805 per_cpu(tasklet_vec, cpu).tail = i;
806 return;
809 BUG();
812 static void takeover_tasklets(unsigned int cpu)
814 /* CPU is dead, so no lock needed. */
815 local_irq_disable();
817 /* Find end, append list for that CPU. */
818 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
819 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
820 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
821 per_cpu(tasklet_vec, cpu).head = NULL;
822 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
824 raise_softirq_irqoff(TASKLET_SOFTIRQ);
826 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
827 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
828 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
829 per_cpu(tasklet_hi_vec, cpu).head = NULL;
830 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
832 raise_softirq_irqoff(HI_SOFTIRQ);
834 local_irq_enable();
836 #endif /* CONFIG_HOTPLUG_CPU */
838 static int __cpuinit cpu_callback(struct notifier_block *nfb,
839 unsigned long action,
840 void *hcpu)
842 int hotcpu = (unsigned long)hcpu;
843 struct task_struct *p;
845 switch (action) {
846 case CPU_UP_PREPARE:
847 case CPU_UP_PREPARE_FROZEN:
848 p = kthread_create_on_node(run_ksoftirqd,
849 hcpu,
850 cpu_to_node(hotcpu),
851 "ksoftirqd/%d", hotcpu);
852 if (IS_ERR(p)) {
853 printk("ksoftirqd for %i failed\n", hotcpu);
854 return notifier_from_errno(PTR_ERR(p));
856 kthread_bind(p, hotcpu);
857 per_cpu(ksoftirqd, hotcpu) = p;
858 break;
859 case CPU_ONLINE:
860 case CPU_ONLINE_FROZEN:
861 wake_up_process(per_cpu(ksoftirqd, hotcpu));
862 break;
863 #ifdef CONFIG_HOTPLUG_CPU
864 case CPU_UP_CANCELED:
865 case CPU_UP_CANCELED_FROZEN:
866 if (!per_cpu(ksoftirqd, hotcpu))
867 break;
868 /* Unbind so it can run. Fall thru. */
869 kthread_bind(per_cpu(ksoftirqd, hotcpu),
870 cpumask_any(cpu_online_mask));
871 case CPU_DEAD:
872 case CPU_DEAD_FROZEN: {
873 static const struct sched_param param = {
874 .sched_priority = MAX_RT_PRIO-1
877 p = per_cpu(ksoftirqd, hotcpu);
878 per_cpu(ksoftirqd, hotcpu) = NULL;
879 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
880 kthread_stop(p);
881 takeover_tasklets(hotcpu);
882 break;
884 #endif /* CONFIG_HOTPLUG_CPU */
886 return NOTIFY_OK;
889 static struct notifier_block __cpuinitdata cpu_nfb = {
890 .notifier_call = cpu_callback
893 static __init int spawn_ksoftirqd(void)
895 void *cpu = (void *)(long)smp_processor_id();
896 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
898 BUG_ON(err != NOTIFY_OK);
899 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
900 register_cpu_notifier(&cpu_nfb);
901 return 0;
903 early_initcall(spawn_ksoftirqd);
906 * [ These __weak aliases are kept in a separate compilation unit, so that
907 * GCC does not inline them incorrectly. ]
910 int __init __weak early_irq_init(void)
912 return 0;
915 #ifdef CONFIG_GENERIC_HARDIRQS
916 int __init __weak arch_probe_nr_irqs(void)
918 return NR_IRQS_LEGACY;
921 int __init __weak arch_early_irq_init(void)
923 return 0;
925 #endif