tracing: tracepoints for softirq entry/exit - tracepoints
[linux-2.6.git] / kernel / softirq.c
bloba5e81231ca7acd3a9dba0937cddd1dd2669837d6
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
27 #include <trace/irq.h>
29 #include <asm/irq.h>
31 - No shared variables, all the data are CPU local.
32 - If a softirq needs serialization, let it serialize itself
33 by its own spinlocks.
34 - Even if softirq is serialized, only local cpu is marked for
35 execution. Hence, we get something sort of weak cpu binding.
36 Though it is still not clear, will it result in better locality
37 or will not.
39 Examples:
40 - NET RX softirq. It is multithreaded and does not require
41 any global serialization.
42 - NET TX softirq. It kicks software netdevice queues, hence
43 it is logically serialized per device, but this serialization
44 is invisible to common code.
45 - Tasklets: serialized wrt itself.
48 #ifndef __ARCH_IRQ_STAT
49 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
50 EXPORT_SYMBOL(irq_stat);
51 #endif
53 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
55 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
57 char *softirq_to_name[NR_SOFTIRQS] = {
58 "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ",
59 "BLOCK_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ",
60 "RCU_SOFTIRQ"
64 * we cannot loop indefinitely here to avoid userspace starvation,
65 * but we also don't want to introduce a worst case 1/HZ latency
66 * to the pending events, so lets the scheduler to balance
67 * the softirq load for us.
69 static inline void wakeup_softirqd(void)
71 /* Interrupts are disabled: no need to stop preemption */
72 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74 if (tsk && tsk->state != TASK_RUNNING)
75 wake_up_process(tsk);
79 * This one is for softirq.c-internal use,
80 * where hardirqs are disabled legitimately:
82 #ifdef CONFIG_TRACE_IRQFLAGS
83 static void __local_bh_disable(unsigned long ip)
85 unsigned long flags;
87 WARN_ON_ONCE(in_irq());
89 raw_local_irq_save(flags);
91 * The preempt tracer hooks into add_preempt_count and will break
92 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
93 * is set and before current->softirq_enabled is cleared.
94 * We must manually increment preempt_count here and manually
95 * call the trace_preempt_off later.
97 preempt_count() += SOFTIRQ_OFFSET;
99 * Were softirqs turned off above:
101 if (softirq_count() == SOFTIRQ_OFFSET)
102 trace_softirqs_off(ip);
103 raw_local_irq_restore(flags);
105 if (preempt_count() == SOFTIRQ_OFFSET)
106 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
108 #else /* !CONFIG_TRACE_IRQFLAGS */
109 static inline void __local_bh_disable(unsigned long ip)
111 add_preempt_count(SOFTIRQ_OFFSET);
112 barrier();
114 #endif /* CONFIG_TRACE_IRQFLAGS */
116 void local_bh_disable(void)
118 __local_bh_disable((unsigned long)__builtin_return_address(0));
121 EXPORT_SYMBOL(local_bh_disable);
124 * Special-case - softirqs can safely be enabled in
125 * cond_resched_softirq(), or by __do_softirq(),
126 * without processing still-pending softirqs:
128 void _local_bh_enable(void)
130 WARN_ON_ONCE(in_irq());
131 WARN_ON_ONCE(!irqs_disabled());
133 if (softirq_count() == SOFTIRQ_OFFSET)
134 trace_softirqs_on((unsigned long)__builtin_return_address(0));
135 sub_preempt_count(SOFTIRQ_OFFSET);
138 EXPORT_SYMBOL(_local_bh_enable);
140 static inline void _local_bh_enable_ip(unsigned long ip)
142 WARN_ON_ONCE(in_irq() || irqs_disabled());
143 #ifdef CONFIG_TRACE_IRQFLAGS
144 local_irq_disable();
145 #endif
147 * Are softirqs going to be turned on now:
149 if (softirq_count() == SOFTIRQ_OFFSET)
150 trace_softirqs_on(ip);
152 * Keep preemption disabled until we are done with
153 * softirq processing:
155 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157 if (unlikely(!in_interrupt() && local_softirq_pending()))
158 do_softirq();
160 dec_preempt_count();
161 #ifdef CONFIG_TRACE_IRQFLAGS
162 local_irq_enable();
163 #endif
164 preempt_check_resched();
167 void local_bh_enable(void)
169 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171 EXPORT_SYMBOL(local_bh_enable);
173 void local_bh_enable_ip(unsigned long ip)
175 _local_bh_enable_ip(ip);
177 EXPORT_SYMBOL(local_bh_enable_ip);
180 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
181 * and we fall back to softirqd after that.
183 * This number has been established via experimentation.
184 * The two things to balance is latency against fairness -
185 * we want to handle softirqs as soon as possible, but they
186 * should not be able to lock up the box.
188 #define MAX_SOFTIRQ_RESTART 10
190 DEFINE_TRACE(softirq_entry);
191 DEFINE_TRACE(softirq_exit);
193 asmlinkage void __do_softirq(void)
195 struct softirq_action *h;
196 __u32 pending;
197 int max_restart = MAX_SOFTIRQ_RESTART;
198 int cpu;
200 pending = local_softirq_pending();
201 account_system_vtime(current);
203 __local_bh_disable((unsigned long)__builtin_return_address(0));
204 lockdep_softirq_enter();
206 cpu = smp_processor_id();
207 restart:
208 /* Reset the pending bitmask before enabling irqs */
209 set_softirq_pending(0);
211 local_irq_enable();
213 h = softirq_vec;
215 do {
216 if (pending & 1) {
217 int prev_count = preempt_count();
219 trace_softirq_entry(h, softirq_vec);
220 h->action(h);
221 trace_softirq_exit(h, softirq_vec);
222 if (unlikely(prev_count != preempt_count())) {
223 printk(KERN_ERR "huh, entered softirq %td %s %p"
224 "with preempt_count %08x,"
225 " exited with %08x?\n", h - softirq_vec,
226 softirq_to_name[h - softirq_vec],
227 h->action, prev_count, preempt_count());
228 preempt_count() = prev_count;
231 rcu_bh_qsctr_inc(cpu);
233 h++;
234 pending >>= 1;
235 } while (pending);
237 local_irq_disable();
239 pending = local_softirq_pending();
240 if (pending && --max_restart)
241 goto restart;
243 if (pending)
244 wakeup_softirqd();
246 lockdep_softirq_exit();
248 account_system_vtime(current);
249 _local_bh_enable();
252 #ifndef __ARCH_HAS_DO_SOFTIRQ
254 asmlinkage void do_softirq(void)
256 __u32 pending;
257 unsigned long flags;
259 if (in_interrupt())
260 return;
262 local_irq_save(flags);
264 pending = local_softirq_pending();
266 if (pending)
267 __do_softirq();
269 local_irq_restore(flags);
272 #endif
275 * Enter an interrupt context.
277 void irq_enter(void)
279 int cpu = smp_processor_id();
281 rcu_irq_enter();
282 if (idle_cpu(cpu) && !in_interrupt()) {
283 __irq_enter();
284 tick_check_idle(cpu);
285 } else
286 __irq_enter();
289 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
290 # define invoke_softirq() __do_softirq()
291 #else
292 # define invoke_softirq() do_softirq()
293 #endif
296 * Exit an interrupt context. Process softirqs if needed and possible:
298 void irq_exit(void)
300 account_system_vtime(current);
301 trace_hardirq_exit();
302 sub_preempt_count(IRQ_EXIT_OFFSET);
303 if (!in_interrupt() && local_softirq_pending())
304 invoke_softirq();
306 #ifdef CONFIG_NO_HZ
307 /* Make sure that timer wheel updates are propagated */
308 rcu_irq_exit();
309 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
310 tick_nohz_stop_sched_tick(0);
311 #endif
312 preempt_enable_no_resched();
316 * This function must run with irqs disabled!
318 inline void raise_softirq_irqoff(unsigned int nr)
320 __raise_softirq_irqoff(nr);
323 * If we're in an interrupt or softirq, we're done
324 * (this also catches softirq-disabled code). We will
325 * actually run the softirq once we return from
326 * the irq or softirq.
328 * Otherwise we wake up ksoftirqd to make sure we
329 * schedule the softirq soon.
331 if (!in_interrupt())
332 wakeup_softirqd();
335 void raise_softirq(unsigned int nr)
337 unsigned long flags;
339 local_irq_save(flags);
340 raise_softirq_irqoff(nr);
341 local_irq_restore(flags);
344 void open_softirq(int nr, void (*action)(struct softirq_action *))
346 softirq_vec[nr].action = action;
349 /* Tasklets */
350 struct tasklet_head
352 struct tasklet_struct *head;
353 struct tasklet_struct **tail;
356 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
357 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
359 void __tasklet_schedule(struct tasklet_struct *t)
361 unsigned long flags;
363 local_irq_save(flags);
364 t->next = NULL;
365 *__get_cpu_var(tasklet_vec).tail = t;
366 __get_cpu_var(tasklet_vec).tail = &(t->next);
367 raise_softirq_irqoff(TASKLET_SOFTIRQ);
368 local_irq_restore(flags);
371 EXPORT_SYMBOL(__tasklet_schedule);
373 void __tasklet_hi_schedule(struct tasklet_struct *t)
375 unsigned long flags;
377 local_irq_save(flags);
378 t->next = NULL;
379 *__get_cpu_var(tasklet_hi_vec).tail = t;
380 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
381 raise_softirq_irqoff(HI_SOFTIRQ);
382 local_irq_restore(flags);
385 EXPORT_SYMBOL(__tasklet_hi_schedule);
387 static void tasklet_action(struct softirq_action *a)
389 struct tasklet_struct *list;
391 local_irq_disable();
392 list = __get_cpu_var(tasklet_vec).head;
393 __get_cpu_var(tasklet_vec).head = NULL;
394 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
395 local_irq_enable();
397 while (list) {
398 struct tasklet_struct *t = list;
400 list = list->next;
402 if (tasklet_trylock(t)) {
403 if (!atomic_read(&t->count)) {
404 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
405 BUG();
406 t->func(t->data);
407 tasklet_unlock(t);
408 continue;
410 tasklet_unlock(t);
413 local_irq_disable();
414 t->next = NULL;
415 *__get_cpu_var(tasklet_vec).tail = t;
416 __get_cpu_var(tasklet_vec).tail = &(t->next);
417 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
418 local_irq_enable();
422 static void tasklet_hi_action(struct softirq_action *a)
424 struct tasklet_struct *list;
426 local_irq_disable();
427 list = __get_cpu_var(tasklet_hi_vec).head;
428 __get_cpu_var(tasklet_hi_vec).head = NULL;
429 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
430 local_irq_enable();
432 while (list) {
433 struct tasklet_struct *t = list;
435 list = list->next;
437 if (tasklet_trylock(t)) {
438 if (!atomic_read(&t->count)) {
439 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
440 BUG();
441 t->func(t->data);
442 tasklet_unlock(t);
443 continue;
445 tasklet_unlock(t);
448 local_irq_disable();
449 t->next = NULL;
450 *__get_cpu_var(tasklet_hi_vec).tail = t;
451 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
452 __raise_softirq_irqoff(HI_SOFTIRQ);
453 local_irq_enable();
458 void tasklet_init(struct tasklet_struct *t,
459 void (*func)(unsigned long), unsigned long data)
461 t->next = NULL;
462 t->state = 0;
463 atomic_set(&t->count, 0);
464 t->func = func;
465 t->data = data;
468 EXPORT_SYMBOL(tasklet_init);
470 void tasklet_kill(struct tasklet_struct *t)
472 if (in_interrupt())
473 printk("Attempt to kill tasklet from interrupt\n");
475 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
477 yield();
478 while (test_bit(TASKLET_STATE_SCHED, &t->state));
480 tasklet_unlock_wait(t);
481 clear_bit(TASKLET_STATE_SCHED, &t->state);
484 EXPORT_SYMBOL(tasklet_kill);
486 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
487 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
489 static void __local_trigger(struct call_single_data *cp, int softirq)
491 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
493 list_add_tail(&cp->list, head);
495 /* Trigger the softirq only if the list was previously empty. */
496 if (head->next == &cp->list)
497 raise_softirq_irqoff(softirq);
500 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
501 static void remote_softirq_receive(void *data)
503 struct call_single_data *cp = data;
504 unsigned long flags;
505 int softirq;
507 softirq = cp->priv;
509 local_irq_save(flags);
510 __local_trigger(cp, softirq);
511 local_irq_restore(flags);
514 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
516 if (cpu_online(cpu)) {
517 cp->func = remote_softirq_receive;
518 cp->info = cp;
519 cp->flags = 0;
520 cp->priv = softirq;
522 __smp_call_function_single(cpu, cp);
523 return 0;
525 return 1;
527 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
528 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
530 return 1;
532 #endif
535 * __send_remote_softirq - try to schedule softirq work on a remote cpu
536 * @cp: private SMP call function data area
537 * @cpu: the remote cpu
538 * @this_cpu: the currently executing cpu
539 * @softirq: the softirq for the work
541 * Attempt to schedule softirq work on a remote cpu. If this cannot be
542 * done, the work is instead queued up on the local cpu.
544 * Interrupts must be disabled.
546 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
548 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
549 __local_trigger(cp, softirq);
551 EXPORT_SYMBOL(__send_remote_softirq);
554 * send_remote_softirq - try to schedule softirq work on a remote cpu
555 * @cp: private SMP call function data area
556 * @cpu: the remote cpu
557 * @softirq: the softirq for the work
559 * Like __send_remote_softirq except that disabling interrupts and
560 * computing the current cpu is done for the caller.
562 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
564 unsigned long flags;
565 int this_cpu;
567 local_irq_save(flags);
568 this_cpu = smp_processor_id();
569 __send_remote_softirq(cp, cpu, this_cpu, softirq);
570 local_irq_restore(flags);
572 EXPORT_SYMBOL(send_remote_softirq);
574 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
575 unsigned long action, void *hcpu)
578 * If a CPU goes away, splice its entries to the current CPU
579 * and trigger a run of the softirq
581 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
582 int cpu = (unsigned long) hcpu;
583 int i;
585 local_irq_disable();
586 for (i = 0; i < NR_SOFTIRQS; i++) {
587 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
588 struct list_head *local_head;
590 if (list_empty(head))
591 continue;
593 local_head = &__get_cpu_var(softirq_work_list[i]);
594 list_splice_init(head, local_head);
595 raise_softirq_irqoff(i);
597 local_irq_enable();
600 return NOTIFY_OK;
603 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
604 .notifier_call = remote_softirq_cpu_notify,
607 void __init softirq_init(void)
609 int cpu;
611 for_each_possible_cpu(cpu) {
612 int i;
614 per_cpu(tasklet_vec, cpu).tail =
615 &per_cpu(tasklet_vec, cpu).head;
616 per_cpu(tasklet_hi_vec, cpu).tail =
617 &per_cpu(tasklet_hi_vec, cpu).head;
618 for (i = 0; i < NR_SOFTIRQS; i++)
619 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
622 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
624 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
625 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
628 static int ksoftirqd(void * __bind_cpu)
630 set_current_state(TASK_INTERRUPTIBLE);
632 while (!kthread_should_stop()) {
633 preempt_disable();
634 if (!local_softirq_pending()) {
635 preempt_enable_no_resched();
636 schedule();
637 preempt_disable();
640 __set_current_state(TASK_RUNNING);
642 while (local_softirq_pending()) {
643 /* Preempt disable stops cpu going offline.
644 If already offline, we'll be on wrong CPU:
645 don't process */
646 if (cpu_is_offline((long)__bind_cpu))
647 goto wait_to_die;
648 do_softirq();
649 preempt_enable_no_resched();
650 cond_resched();
651 preempt_disable();
652 rcu_qsctr_inc((long)__bind_cpu);
654 preempt_enable();
655 set_current_state(TASK_INTERRUPTIBLE);
657 __set_current_state(TASK_RUNNING);
658 return 0;
660 wait_to_die:
661 preempt_enable();
662 /* Wait for kthread_stop */
663 set_current_state(TASK_INTERRUPTIBLE);
664 while (!kthread_should_stop()) {
665 schedule();
666 set_current_state(TASK_INTERRUPTIBLE);
668 __set_current_state(TASK_RUNNING);
669 return 0;
672 #ifdef CONFIG_HOTPLUG_CPU
674 * tasklet_kill_immediate is called to remove a tasklet which can already be
675 * scheduled for execution on @cpu.
677 * Unlike tasklet_kill, this function removes the tasklet
678 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
680 * When this function is called, @cpu must be in the CPU_DEAD state.
682 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
684 struct tasklet_struct **i;
686 BUG_ON(cpu_online(cpu));
687 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
689 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
690 return;
692 /* CPU is dead, so no lock needed. */
693 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
694 if (*i == t) {
695 *i = t->next;
696 /* If this was the tail element, move the tail ptr */
697 if (*i == NULL)
698 per_cpu(tasklet_vec, cpu).tail = i;
699 return;
702 BUG();
705 static void takeover_tasklets(unsigned int cpu)
707 /* CPU is dead, so no lock needed. */
708 local_irq_disable();
710 /* Find end, append list for that CPU. */
711 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
712 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
713 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
714 per_cpu(tasklet_vec, cpu).head = NULL;
715 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
717 raise_softirq_irqoff(TASKLET_SOFTIRQ);
719 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
720 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
721 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
722 per_cpu(tasklet_hi_vec, cpu).head = NULL;
723 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
725 raise_softirq_irqoff(HI_SOFTIRQ);
727 local_irq_enable();
729 #endif /* CONFIG_HOTPLUG_CPU */
731 static int __cpuinit cpu_callback(struct notifier_block *nfb,
732 unsigned long action,
733 void *hcpu)
735 int hotcpu = (unsigned long)hcpu;
736 struct task_struct *p;
738 switch (action) {
739 case CPU_UP_PREPARE:
740 case CPU_UP_PREPARE_FROZEN:
741 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
742 if (IS_ERR(p)) {
743 printk("ksoftirqd for %i failed\n", hotcpu);
744 return NOTIFY_BAD;
746 kthread_bind(p, hotcpu);
747 per_cpu(ksoftirqd, hotcpu) = p;
748 break;
749 case CPU_ONLINE:
750 case CPU_ONLINE_FROZEN:
751 wake_up_process(per_cpu(ksoftirqd, hotcpu));
752 break;
753 #ifdef CONFIG_HOTPLUG_CPU
754 case CPU_UP_CANCELED:
755 case CPU_UP_CANCELED_FROZEN:
756 if (!per_cpu(ksoftirqd, hotcpu))
757 break;
758 /* Unbind so it can run. Fall thru. */
759 kthread_bind(per_cpu(ksoftirqd, hotcpu),
760 cpumask_any(cpu_online_mask));
761 case CPU_DEAD:
762 case CPU_DEAD_FROZEN: {
763 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
765 p = per_cpu(ksoftirqd, hotcpu);
766 per_cpu(ksoftirqd, hotcpu) = NULL;
767 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
768 kthread_stop(p);
769 takeover_tasklets(hotcpu);
770 break;
772 #endif /* CONFIG_HOTPLUG_CPU */
774 return NOTIFY_OK;
777 static struct notifier_block __cpuinitdata cpu_nfb = {
778 .notifier_call = cpu_callback
781 static __init int spawn_ksoftirqd(void)
783 void *cpu = (void *)(long)smp_processor_id();
784 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
786 BUG_ON(err == NOTIFY_BAD);
787 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
788 register_cpu_notifier(&cpu_nfb);
789 return 0;
791 early_initcall(spawn_ksoftirqd);
793 #ifdef CONFIG_SMP
795 * Call a function on all processors
797 int on_each_cpu(void (*func) (void *info), void *info, int wait)
799 int ret = 0;
801 preempt_disable();
802 ret = smp_call_function(func, info, wait);
803 local_irq_disable();
804 func(info);
805 local_irq_enable();
806 preempt_enable();
807 return ret;
809 EXPORT_SYMBOL(on_each_cpu);
810 #endif
813 * [ These __weak aliases are kept in a separate compilation unit, so that
814 * GCC does not inline them incorrectly. ]
817 int __init __weak early_irq_init(void)
819 return 0;
822 int __init __weak arch_probe_nr_irqs(void)
824 return 0;
827 int __init __weak arch_early_irq_init(void)
829 return 0;
832 int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
834 return 0;