2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
52 EXPORT_SYMBOL(irq_stat
);
55 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
57 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
59 char *softirq_to_name
[NR_SOFTIRQS
] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
75 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
80 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 static void __local_bh_disable(unsigned long ip
)
88 WARN_ON_ONCE(in_irq());
90 raw_local_irq_save(flags
);
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
98 preempt_count() += SOFTIRQ_OFFSET
;
100 * Were softirqs turned off above:
102 if (softirq_count() == SOFTIRQ_OFFSET
)
103 trace_softirqs_off(ip
);
104 raw_local_irq_restore(flags
);
106 if (preempt_count() == SOFTIRQ_OFFSET
)
107 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
109 #else /* !CONFIG_TRACE_IRQFLAGS */
110 static inline void __local_bh_disable(unsigned long ip
)
112 add_preempt_count(SOFTIRQ_OFFSET
);
115 #endif /* CONFIG_TRACE_IRQFLAGS */
117 void local_bh_disable(void)
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
122 EXPORT_SYMBOL(local_bh_disable
);
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
129 void _local_bh_enable(void)
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
134 if (softirq_count() == SOFTIRQ_OFFSET
)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET
);
139 EXPORT_SYMBOL(_local_bh_enable
);
141 static inline void _local_bh_enable_ip(unsigned long ip
)
143 WARN_ON_ONCE(in_irq() || irqs_disabled());
144 #ifdef CONFIG_TRACE_IRQFLAGS
148 * Are softirqs going to be turned on now:
150 if (softirq_count() == SOFTIRQ_OFFSET
)
151 trace_softirqs_on(ip
);
153 * Keep preemption disabled until we are done with
154 * softirq processing:
156 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
162 #ifdef CONFIG_TRACE_IRQFLAGS
165 preempt_check_resched();
168 void local_bh_enable(void)
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
172 EXPORT_SYMBOL(local_bh_enable
);
174 void local_bh_enable_ip(unsigned long ip
)
176 _local_bh_enable_ip(ip
);
178 EXPORT_SYMBOL(local_bh_enable_ip
);
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
189 #define MAX_SOFTIRQ_RESTART 10
191 asmlinkage
void __do_softirq(void)
193 struct softirq_action
*h
;
195 int max_restart
= MAX_SOFTIRQ_RESTART
;
198 pending
= local_softirq_pending();
199 account_system_vtime(current
);
201 __local_bh_disable((unsigned long)__builtin_return_address(0));
202 lockdep_softirq_enter();
204 cpu
= smp_processor_id();
206 /* Reset the pending bitmask before enabling irqs */
207 set_softirq_pending(0);
215 int prev_count
= preempt_count();
216 kstat_incr_softirqs_this_cpu(h
- softirq_vec
);
218 trace_softirq_entry(h
, softirq_vec
);
220 trace_softirq_exit(h
, softirq_vec
);
221 if (unlikely(prev_count
!= preempt_count())) {
222 printk(KERN_ERR
"huh, entered softirq %td %s %p"
223 "with preempt_count %08x,"
224 " exited with %08x?\n", h
- softirq_vec
,
225 softirq_to_name
[h
- softirq_vec
],
226 h
->action
, prev_count
, preempt_count());
227 preempt_count() = prev_count
;
230 rcu_bh_qsctr_inc(cpu
);
238 pending
= local_softirq_pending();
239 if (pending
&& --max_restart
)
245 lockdep_softirq_exit();
247 account_system_vtime(current
);
251 #ifndef __ARCH_HAS_DO_SOFTIRQ
253 asmlinkage
void do_softirq(void)
261 local_irq_save(flags
);
263 pending
= local_softirq_pending();
268 local_irq_restore(flags
);
274 * Enter an interrupt context.
278 int cpu
= smp_processor_id();
281 if (idle_cpu(cpu
) && !in_interrupt()) {
283 tick_check_idle(cpu
);
288 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289 # define invoke_softirq() __do_softirq()
291 # define invoke_softirq() do_softirq()
295 * Exit an interrupt context. Process softirqs if needed and possible:
299 account_system_vtime(current
);
300 trace_hardirq_exit();
301 sub_preempt_count(IRQ_EXIT_OFFSET
);
302 if (!in_interrupt() && local_softirq_pending())
306 /* Make sure that timer wheel updates are propagated */
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
311 preempt_enable_no_resched();
315 * This function must run with irqs disabled!
317 inline void raise_softirq_irqoff(unsigned int nr
)
319 __raise_softirq_irqoff(nr
);
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
334 void raise_softirq(unsigned int nr
)
338 local_irq_save(flags
);
339 raise_softirq_irqoff(nr
);
340 local_irq_restore(flags
);
343 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
345 softirq_vec
[nr
].action
= action
;
353 struct tasklet_struct
*head
;
354 struct tasklet_struct
**tail
;
357 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
358 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
360 void __tasklet_schedule(struct tasklet_struct
*t
)
364 local_irq_save(flags
);
366 *__get_cpu_var(tasklet_vec
).tail
= t
;
367 __get_cpu_var(tasklet_vec
).tail
= &(t
->next
);
368 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
369 local_irq_restore(flags
);
372 EXPORT_SYMBOL(__tasklet_schedule
);
374 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
378 local_irq_save(flags
);
380 *__get_cpu_var(tasklet_hi_vec
).tail
= t
;
381 __get_cpu_var(tasklet_hi_vec
).tail
= &(t
->next
);
382 raise_softirq_irqoff(HI_SOFTIRQ
);
383 local_irq_restore(flags
);
386 EXPORT_SYMBOL(__tasklet_hi_schedule
);
388 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
390 BUG_ON(!irqs_disabled());
392 t
->next
= __get_cpu_var(tasklet_hi_vec
).head
;
393 __get_cpu_var(tasklet_hi_vec
).head
= t
;
394 __raise_softirq_irqoff(HI_SOFTIRQ
);
397 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
399 static void tasklet_action(struct softirq_action
*a
)
401 struct tasklet_struct
*list
;
404 list
= __get_cpu_var(tasklet_vec
).head
;
405 __get_cpu_var(tasklet_vec
).head
= NULL
;
406 __get_cpu_var(tasklet_vec
).tail
= &__get_cpu_var(tasklet_vec
).head
;
410 struct tasklet_struct
*t
= list
;
414 if (tasklet_trylock(t
)) {
415 if (!atomic_read(&t
->count
)) {
416 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
427 *__get_cpu_var(tasklet_vec
).tail
= t
;
428 __get_cpu_var(tasklet_vec
).tail
= &(t
->next
);
429 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
434 static void tasklet_hi_action(struct softirq_action
*a
)
436 struct tasklet_struct
*list
;
439 list
= __get_cpu_var(tasklet_hi_vec
).head
;
440 __get_cpu_var(tasklet_hi_vec
).head
= NULL
;
441 __get_cpu_var(tasklet_hi_vec
).tail
= &__get_cpu_var(tasklet_hi_vec
).head
;
445 struct tasklet_struct
*t
= list
;
449 if (tasklet_trylock(t
)) {
450 if (!atomic_read(&t
->count
)) {
451 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
462 *__get_cpu_var(tasklet_hi_vec
).tail
= t
;
463 __get_cpu_var(tasklet_hi_vec
).tail
= &(t
->next
);
464 __raise_softirq_irqoff(HI_SOFTIRQ
);
470 void tasklet_init(struct tasklet_struct
*t
,
471 void (*func
)(unsigned long), unsigned long data
)
475 atomic_set(&t
->count
, 0);
480 EXPORT_SYMBOL(tasklet_init
);
482 void tasklet_kill(struct tasklet_struct
*t
)
485 printk("Attempt to kill tasklet from interrupt\n");
487 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
490 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
492 tasklet_unlock_wait(t
);
493 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
496 EXPORT_SYMBOL(tasklet_kill
);
503 * The trampoline is called when the hrtimer expires. If this is
504 * called from the hrtimer interrupt then we schedule the tasklet as
505 * the timer callback function expects to run in softirq context. If
506 * it's called in softirq context anyway (i.e. high resolution timers
507 * disabled) then the hrtimer callback is called right away.
509 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
511 struct tasklet_hrtimer
*ttimer
=
512 container_of(timer
, struct tasklet_hrtimer
, timer
);
514 if (hrtimer_is_hres_active(timer
)) {
515 tasklet_hi_schedule(&ttimer
->tasklet
);
516 return HRTIMER_NORESTART
;
518 return ttimer
->function(timer
);
522 * Helper function which calls the hrtimer callback from
523 * tasklet/softirq context
525 static void __tasklet_hrtimer_trampoline(unsigned long data
)
527 struct tasklet_hrtimer
*ttimer
= (void *)data
;
528 enum hrtimer_restart restart
;
530 restart
= ttimer
->function(&ttimer
->timer
);
531 if (restart
!= HRTIMER_NORESTART
)
532 hrtimer_restart(&ttimer
->timer
);
536 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
537 * @ttimer: tasklet_hrtimer which is initialized
538 * @function: hrtimer callback funtion which gets called from softirq context
539 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
540 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
542 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
543 enum hrtimer_restart (*function
)(struct hrtimer
*),
544 clockid_t which_clock
, enum hrtimer_mode mode
)
546 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
547 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
548 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
549 (unsigned long)ttimer
);
550 ttimer
->function
= function
;
552 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
555 * Remote softirq bits
558 DEFINE_PER_CPU(struct list_head
[NR_SOFTIRQS
], softirq_work_list
);
559 EXPORT_PER_CPU_SYMBOL(softirq_work_list
);
561 static void __local_trigger(struct call_single_data
*cp
, int softirq
)
563 struct list_head
*head
= &__get_cpu_var(softirq_work_list
[softirq
]);
565 list_add_tail(&cp
->list
, head
);
567 /* Trigger the softirq only if the list was previously empty. */
568 if (head
->next
== &cp
->list
)
569 raise_softirq_irqoff(softirq
);
572 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
573 static void remote_softirq_receive(void *data
)
575 struct call_single_data
*cp
= data
;
581 local_irq_save(flags
);
582 __local_trigger(cp
, softirq
);
583 local_irq_restore(flags
);
586 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
588 if (cpu_online(cpu
)) {
589 cp
->func
= remote_softirq_receive
;
594 __smp_call_function_single(cpu
, cp
, 0);
599 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
600 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
607 * __send_remote_softirq - try to schedule softirq work on a remote cpu
608 * @cp: private SMP call function data area
609 * @cpu: the remote cpu
610 * @this_cpu: the currently executing cpu
611 * @softirq: the softirq for the work
613 * Attempt to schedule softirq work on a remote cpu. If this cannot be
614 * done, the work is instead queued up on the local cpu.
616 * Interrupts must be disabled.
618 void __send_remote_softirq(struct call_single_data
*cp
, int cpu
, int this_cpu
, int softirq
)
620 if (cpu
== this_cpu
|| __try_remote_softirq(cp
, cpu
, softirq
))
621 __local_trigger(cp
, softirq
);
623 EXPORT_SYMBOL(__send_remote_softirq
);
626 * send_remote_softirq - try to schedule softirq work on a remote cpu
627 * @cp: private SMP call function data area
628 * @cpu: the remote cpu
629 * @softirq: the softirq for the work
631 * Like __send_remote_softirq except that disabling interrupts and
632 * computing the current cpu is done for the caller.
634 void send_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
639 local_irq_save(flags
);
640 this_cpu
= smp_processor_id();
641 __send_remote_softirq(cp
, cpu
, this_cpu
, softirq
);
642 local_irq_restore(flags
);
644 EXPORT_SYMBOL(send_remote_softirq
);
646 static int __cpuinit
remote_softirq_cpu_notify(struct notifier_block
*self
,
647 unsigned long action
, void *hcpu
)
650 * If a CPU goes away, splice its entries to the current CPU
651 * and trigger a run of the softirq
653 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
) {
654 int cpu
= (unsigned long) hcpu
;
658 for (i
= 0; i
< NR_SOFTIRQS
; i
++) {
659 struct list_head
*head
= &per_cpu(softirq_work_list
[i
], cpu
);
660 struct list_head
*local_head
;
662 if (list_empty(head
))
665 local_head
= &__get_cpu_var(softirq_work_list
[i
]);
666 list_splice_init(head
, local_head
);
667 raise_softirq_irqoff(i
);
675 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier
= {
676 .notifier_call
= remote_softirq_cpu_notify
,
679 void __init
softirq_init(void)
683 for_each_possible_cpu(cpu
) {
686 per_cpu(tasklet_vec
, cpu
).tail
=
687 &per_cpu(tasklet_vec
, cpu
).head
;
688 per_cpu(tasklet_hi_vec
, cpu
).tail
=
689 &per_cpu(tasklet_hi_vec
, cpu
).head
;
690 for (i
= 0; i
< NR_SOFTIRQS
; i
++)
691 INIT_LIST_HEAD(&per_cpu(softirq_work_list
[i
], cpu
));
694 register_hotcpu_notifier(&remote_softirq_cpu_notifier
);
696 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
697 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
700 static int ksoftirqd(void * __bind_cpu
)
702 set_current_state(TASK_INTERRUPTIBLE
);
704 while (!kthread_should_stop()) {
706 if (!local_softirq_pending()) {
707 preempt_enable_no_resched();
712 __set_current_state(TASK_RUNNING
);
714 while (local_softirq_pending()) {
715 /* Preempt disable stops cpu going offline.
716 If already offline, we'll be on wrong CPU:
718 if (cpu_is_offline((long)__bind_cpu
))
721 preempt_enable_no_resched();
724 rcu_qsctr_inc((long)__bind_cpu
);
727 set_current_state(TASK_INTERRUPTIBLE
);
729 __set_current_state(TASK_RUNNING
);
734 /* Wait for kthread_stop */
735 set_current_state(TASK_INTERRUPTIBLE
);
736 while (!kthread_should_stop()) {
738 set_current_state(TASK_INTERRUPTIBLE
);
740 __set_current_state(TASK_RUNNING
);
744 #ifdef CONFIG_HOTPLUG_CPU
746 * tasklet_kill_immediate is called to remove a tasklet which can already be
747 * scheduled for execution on @cpu.
749 * Unlike tasklet_kill, this function removes the tasklet
750 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
752 * When this function is called, @cpu must be in the CPU_DEAD state.
754 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
756 struct tasklet_struct
**i
;
758 BUG_ON(cpu_online(cpu
));
759 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
761 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
764 /* CPU is dead, so no lock needed. */
765 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
768 /* If this was the tail element, move the tail ptr */
770 per_cpu(tasklet_vec
, cpu
).tail
= i
;
777 static void takeover_tasklets(unsigned int cpu
)
779 /* CPU is dead, so no lock needed. */
782 /* Find end, append list for that CPU. */
783 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
784 *(__get_cpu_var(tasklet_vec
).tail
) = per_cpu(tasklet_vec
, cpu
).head
;
785 __get_cpu_var(tasklet_vec
).tail
= per_cpu(tasklet_vec
, cpu
).tail
;
786 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
787 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
789 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
791 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
792 *__get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).head
;
793 __get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).tail
;
794 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
795 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
797 raise_softirq_irqoff(HI_SOFTIRQ
);
801 #endif /* CONFIG_HOTPLUG_CPU */
803 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
804 unsigned long action
,
807 int hotcpu
= (unsigned long)hcpu
;
808 struct task_struct
*p
;
812 case CPU_UP_PREPARE_FROZEN
:
813 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
815 printk("ksoftirqd for %i failed\n", hotcpu
);
818 kthread_bind(p
, hotcpu
);
819 per_cpu(ksoftirqd
, hotcpu
) = p
;
822 case CPU_ONLINE_FROZEN
:
823 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
825 #ifdef CONFIG_HOTPLUG_CPU
826 case CPU_UP_CANCELED
:
827 case CPU_UP_CANCELED_FROZEN
:
828 if (!per_cpu(ksoftirqd
, hotcpu
))
830 /* Unbind so it can run. Fall thru. */
831 kthread_bind(per_cpu(ksoftirqd
, hotcpu
),
832 cpumask_any(cpu_online_mask
));
834 case CPU_DEAD_FROZEN
: {
835 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
837 p
= per_cpu(ksoftirqd
, hotcpu
);
838 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
839 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
841 takeover_tasklets(hotcpu
);
844 #endif /* CONFIG_HOTPLUG_CPU */
849 static struct notifier_block __cpuinitdata cpu_nfb
= {
850 .notifier_call
= cpu_callback
853 static __init
int spawn_ksoftirqd(void)
855 void *cpu
= (void *)(long)smp_processor_id();
856 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
858 BUG_ON(err
== NOTIFY_BAD
);
859 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
860 register_cpu_notifier(&cpu_nfb
);
863 early_initcall(spawn_ksoftirqd
);
867 * Call a function on all processors
869 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
874 ret
= smp_call_function(func
, info
, wait
);
881 EXPORT_SYMBOL(on_each_cpu
);
885 * [ These __weak aliases are kept in a separate compilation unit, so that
886 * GCC does not inline them incorrectly. ]
889 int __init __weak
early_irq_init(void)
894 int __init __weak
arch_probe_nr_irqs(void)
899 int __init __weak
arch_early_irq_init(void)
904 int __weak
arch_init_chip_data(struct irq_desc
*desc
, int node
)