trace: Use tracing_reset_online_cpus in more places
[linux-2.6.git] / kernel / softirq.c
blob6edfc2c11d99af1c845689adf0e9fbf1043e723d
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #include <asm/irq.h>
30 - No shared variables, all the data are CPU local.
31 - If a softirq needs serialization, let it serialize itself
32 by its own spinlocks.
33 - Even if softirq is serialized, only local cpu is marked for
34 execution. Hence, we get something sort of weak cpu binding.
35 Though it is still not clear, will it result in better locality
36 or will not.
38 Examples:
39 - NET RX softirq. It is multithreaded and does not require
40 any global serialization.
41 - NET TX softirq. It kicks software netdevice queues, hence
42 it is logically serialized per device, but this serialization
43 is invisible to common code.
44 - Tasklets: serialized wrt itself.
47 #ifndef __ARCH_IRQ_STAT
48 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
49 EXPORT_SYMBOL(irq_stat);
50 #endif
52 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
54 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
57 * we cannot loop indefinitely here to avoid userspace starvation,
58 * but we also don't want to introduce a worst case 1/HZ latency
59 * to the pending events, so lets the scheduler to balance
60 * the softirq load for us.
62 static inline void wakeup_softirqd(void)
64 /* Interrupts are disabled: no need to stop preemption */
65 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
67 if (tsk && tsk->state != TASK_RUNNING)
68 wake_up_process(tsk);
72 * This one is for softirq.c-internal use,
73 * where hardirqs are disabled legitimately:
75 #ifdef CONFIG_TRACE_IRQFLAGS
76 static void __local_bh_disable(unsigned long ip)
78 unsigned long flags;
80 WARN_ON_ONCE(in_irq());
82 raw_local_irq_save(flags);
84 * The preempt tracer hooks into add_preempt_count and will break
85 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
86 * is set and before current->softirq_enabled is cleared.
87 * We must manually increment preempt_count here and manually
88 * call the trace_preempt_off later.
90 preempt_count() += SOFTIRQ_OFFSET;
92 * Were softirqs turned off above:
94 if (softirq_count() == SOFTIRQ_OFFSET)
95 trace_softirqs_off(ip);
96 raw_local_irq_restore(flags);
98 if (preempt_count() == SOFTIRQ_OFFSET)
99 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
101 #else /* !CONFIG_TRACE_IRQFLAGS */
102 static inline void __local_bh_disable(unsigned long ip)
104 add_preempt_count(SOFTIRQ_OFFSET);
105 barrier();
107 #endif /* CONFIG_TRACE_IRQFLAGS */
109 void local_bh_disable(void)
111 __local_bh_disable((unsigned long)__builtin_return_address(0));
114 EXPORT_SYMBOL(local_bh_disable);
117 * Special-case - softirqs can safely be enabled in
118 * cond_resched_softirq(), or by __do_softirq(),
119 * without processing still-pending softirqs:
121 void _local_bh_enable(void)
123 WARN_ON_ONCE(in_irq());
124 WARN_ON_ONCE(!irqs_disabled());
126 if (softirq_count() == SOFTIRQ_OFFSET)
127 trace_softirqs_on((unsigned long)__builtin_return_address(0));
128 sub_preempt_count(SOFTIRQ_OFFSET);
131 EXPORT_SYMBOL(_local_bh_enable);
133 static inline void _local_bh_enable_ip(unsigned long ip)
135 WARN_ON_ONCE(in_irq() || irqs_disabled());
136 #ifdef CONFIG_TRACE_IRQFLAGS
137 local_irq_disable();
138 #endif
140 * Are softirqs going to be turned on now:
142 if (softirq_count() == SOFTIRQ_OFFSET)
143 trace_softirqs_on(ip);
145 * Keep preemption disabled until we are done with
146 * softirq processing:
148 sub_preempt_count(SOFTIRQ_OFFSET - 1);
150 if (unlikely(!in_interrupt() && local_softirq_pending()))
151 do_softirq();
153 dec_preempt_count();
154 #ifdef CONFIG_TRACE_IRQFLAGS
155 local_irq_enable();
156 #endif
157 preempt_check_resched();
160 void local_bh_enable(void)
162 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
164 EXPORT_SYMBOL(local_bh_enable);
166 void local_bh_enable_ip(unsigned long ip)
168 _local_bh_enable_ip(ip);
170 EXPORT_SYMBOL(local_bh_enable_ip);
173 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
174 * and we fall back to softirqd after that.
176 * This number has been established via experimentation.
177 * The two things to balance is latency against fairness -
178 * we want to handle softirqs as soon as possible, but they
179 * should not be able to lock up the box.
181 #define MAX_SOFTIRQ_RESTART 10
183 asmlinkage void __do_softirq(void)
185 struct softirq_action *h;
186 __u32 pending;
187 int max_restart = MAX_SOFTIRQ_RESTART;
188 int cpu;
190 pending = local_softirq_pending();
191 account_system_vtime(current);
193 __local_bh_disable((unsigned long)__builtin_return_address(0));
194 trace_softirq_enter();
196 cpu = smp_processor_id();
197 restart:
198 /* Reset the pending bitmask before enabling irqs */
199 set_softirq_pending(0);
201 local_irq_enable();
203 h = softirq_vec;
205 do {
206 if (pending & 1) {
207 int prev_count = preempt_count();
209 h->action(h);
211 if (unlikely(prev_count != preempt_count())) {
212 printk(KERN_ERR "huh, entered softirq %td %p"
213 "with preempt_count %08x,"
214 " exited with %08x?\n", h - softirq_vec,
215 h->action, prev_count, preempt_count());
216 preempt_count() = prev_count;
219 rcu_bh_qsctr_inc(cpu);
221 h++;
222 pending >>= 1;
223 } while (pending);
225 local_irq_disable();
227 pending = local_softirq_pending();
228 if (pending && --max_restart)
229 goto restart;
231 if (pending)
232 wakeup_softirqd();
234 trace_softirq_exit();
236 account_system_vtime(current);
237 _local_bh_enable();
240 #ifndef __ARCH_HAS_DO_SOFTIRQ
242 asmlinkage void do_softirq(void)
244 __u32 pending;
245 unsigned long flags;
247 if (in_interrupt())
248 return;
250 local_irq_save(flags);
252 pending = local_softirq_pending();
254 if (pending)
255 __do_softirq();
257 local_irq_restore(flags);
260 #endif
263 * Enter an interrupt context.
265 void irq_enter(void)
267 int cpu = smp_processor_id();
269 rcu_irq_enter();
270 if (idle_cpu(cpu) && !in_interrupt()) {
271 __irq_enter();
272 tick_check_idle(cpu);
273 } else
274 __irq_enter();
277 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
278 # define invoke_softirq() __do_softirq()
279 #else
280 # define invoke_softirq() do_softirq()
281 #endif
284 * Exit an interrupt context. Process softirqs if needed and possible:
286 void irq_exit(void)
288 account_system_vtime(current);
289 trace_hardirq_exit();
290 sub_preempt_count(IRQ_EXIT_OFFSET);
291 if (!in_interrupt() && local_softirq_pending())
292 invoke_softirq();
294 #ifdef CONFIG_NO_HZ
295 /* Make sure that timer wheel updates are propagated */
296 rcu_irq_exit();
297 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
298 tick_nohz_stop_sched_tick(0);
299 #endif
300 preempt_enable_no_resched();
304 * This function must run with irqs disabled!
306 inline void raise_softirq_irqoff(unsigned int nr)
308 __raise_softirq_irqoff(nr);
311 * If we're in an interrupt or softirq, we're done
312 * (this also catches softirq-disabled code). We will
313 * actually run the softirq once we return from
314 * the irq or softirq.
316 * Otherwise we wake up ksoftirqd to make sure we
317 * schedule the softirq soon.
319 if (!in_interrupt())
320 wakeup_softirqd();
323 void raise_softirq(unsigned int nr)
325 unsigned long flags;
327 local_irq_save(flags);
328 raise_softirq_irqoff(nr);
329 local_irq_restore(flags);
332 void open_softirq(int nr, void (*action)(struct softirq_action *))
334 softirq_vec[nr].action = action;
337 /* Tasklets */
338 struct tasklet_head
340 struct tasklet_struct *head;
341 struct tasklet_struct **tail;
344 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
345 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
347 void __tasklet_schedule(struct tasklet_struct *t)
349 unsigned long flags;
351 local_irq_save(flags);
352 t->next = NULL;
353 *__get_cpu_var(tasklet_vec).tail = t;
354 __get_cpu_var(tasklet_vec).tail = &(t->next);
355 raise_softirq_irqoff(TASKLET_SOFTIRQ);
356 local_irq_restore(flags);
359 EXPORT_SYMBOL(__tasklet_schedule);
361 void __tasklet_hi_schedule(struct tasklet_struct *t)
363 unsigned long flags;
365 local_irq_save(flags);
366 t->next = NULL;
367 *__get_cpu_var(tasklet_hi_vec).tail = t;
368 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
369 raise_softirq_irqoff(HI_SOFTIRQ);
370 local_irq_restore(flags);
373 EXPORT_SYMBOL(__tasklet_hi_schedule);
375 static void tasklet_action(struct softirq_action *a)
377 struct tasklet_struct *list;
379 local_irq_disable();
380 list = __get_cpu_var(tasklet_vec).head;
381 __get_cpu_var(tasklet_vec).head = NULL;
382 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
383 local_irq_enable();
385 while (list) {
386 struct tasklet_struct *t = list;
388 list = list->next;
390 if (tasklet_trylock(t)) {
391 if (!atomic_read(&t->count)) {
392 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
393 BUG();
394 t->func(t->data);
395 tasklet_unlock(t);
396 continue;
398 tasklet_unlock(t);
401 local_irq_disable();
402 t->next = NULL;
403 *__get_cpu_var(tasklet_vec).tail = t;
404 __get_cpu_var(tasklet_vec).tail = &(t->next);
405 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
406 local_irq_enable();
410 static void tasklet_hi_action(struct softirq_action *a)
412 struct tasklet_struct *list;
414 local_irq_disable();
415 list = __get_cpu_var(tasklet_hi_vec).head;
416 __get_cpu_var(tasklet_hi_vec).head = NULL;
417 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
418 local_irq_enable();
420 while (list) {
421 struct tasklet_struct *t = list;
423 list = list->next;
425 if (tasklet_trylock(t)) {
426 if (!atomic_read(&t->count)) {
427 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
428 BUG();
429 t->func(t->data);
430 tasklet_unlock(t);
431 continue;
433 tasklet_unlock(t);
436 local_irq_disable();
437 t->next = NULL;
438 *__get_cpu_var(tasklet_hi_vec).tail = t;
439 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
440 __raise_softirq_irqoff(HI_SOFTIRQ);
441 local_irq_enable();
446 void tasklet_init(struct tasklet_struct *t,
447 void (*func)(unsigned long), unsigned long data)
449 t->next = NULL;
450 t->state = 0;
451 atomic_set(&t->count, 0);
452 t->func = func;
453 t->data = data;
456 EXPORT_SYMBOL(tasklet_init);
458 void tasklet_kill(struct tasklet_struct *t)
460 if (in_interrupt())
461 printk("Attempt to kill tasklet from interrupt\n");
463 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
465 yield();
466 while (test_bit(TASKLET_STATE_SCHED, &t->state));
468 tasklet_unlock_wait(t);
469 clear_bit(TASKLET_STATE_SCHED, &t->state);
472 EXPORT_SYMBOL(tasklet_kill);
474 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
475 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
477 static void __local_trigger(struct call_single_data *cp, int softirq)
479 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
481 list_add_tail(&cp->list, head);
483 /* Trigger the softirq only if the list was previously empty. */
484 if (head->next == &cp->list)
485 raise_softirq_irqoff(softirq);
488 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
489 static void remote_softirq_receive(void *data)
491 struct call_single_data *cp = data;
492 unsigned long flags;
493 int softirq;
495 softirq = cp->priv;
497 local_irq_save(flags);
498 __local_trigger(cp, softirq);
499 local_irq_restore(flags);
502 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
504 if (cpu_online(cpu)) {
505 cp->func = remote_softirq_receive;
506 cp->info = cp;
507 cp->flags = 0;
508 cp->priv = softirq;
510 __smp_call_function_single(cpu, cp);
511 return 0;
513 return 1;
515 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
516 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
518 return 1;
520 #endif
523 * __send_remote_softirq - try to schedule softirq work on a remote cpu
524 * @cp: private SMP call function data area
525 * @cpu: the remote cpu
526 * @this_cpu: the currently executing cpu
527 * @softirq: the softirq for the work
529 * Attempt to schedule softirq work on a remote cpu. If this cannot be
530 * done, the work is instead queued up on the local cpu.
532 * Interrupts must be disabled.
534 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
536 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
537 __local_trigger(cp, softirq);
539 EXPORT_SYMBOL(__send_remote_softirq);
542 * send_remote_softirq - try to schedule softirq work on a remote cpu
543 * @cp: private SMP call function data area
544 * @cpu: the remote cpu
545 * @softirq: the softirq for the work
547 * Like __send_remote_softirq except that disabling interrupts and
548 * computing the current cpu is done for the caller.
550 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
552 unsigned long flags;
553 int this_cpu;
555 local_irq_save(flags);
556 this_cpu = smp_processor_id();
557 __send_remote_softirq(cp, cpu, this_cpu, softirq);
558 local_irq_restore(flags);
560 EXPORT_SYMBOL(send_remote_softirq);
562 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
563 unsigned long action, void *hcpu)
566 * If a CPU goes away, splice its entries to the current CPU
567 * and trigger a run of the softirq
569 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
570 int cpu = (unsigned long) hcpu;
571 int i;
573 local_irq_disable();
574 for (i = 0; i < NR_SOFTIRQS; i++) {
575 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
576 struct list_head *local_head;
578 if (list_empty(head))
579 continue;
581 local_head = &__get_cpu_var(softirq_work_list[i]);
582 list_splice_init(head, local_head);
583 raise_softirq_irqoff(i);
585 local_irq_enable();
588 return NOTIFY_OK;
591 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
592 .notifier_call = remote_softirq_cpu_notify,
595 void __init softirq_init(void)
597 int cpu;
599 for_each_possible_cpu(cpu) {
600 int i;
602 per_cpu(tasklet_vec, cpu).tail =
603 &per_cpu(tasklet_vec, cpu).head;
604 per_cpu(tasklet_hi_vec, cpu).tail =
605 &per_cpu(tasklet_hi_vec, cpu).head;
606 for (i = 0; i < NR_SOFTIRQS; i++)
607 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
610 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
612 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
613 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
616 static int ksoftirqd(void * __bind_cpu)
618 set_current_state(TASK_INTERRUPTIBLE);
620 while (!kthread_should_stop()) {
621 preempt_disable();
622 if (!local_softirq_pending()) {
623 preempt_enable_no_resched();
624 schedule();
625 preempt_disable();
628 __set_current_state(TASK_RUNNING);
630 while (local_softirq_pending()) {
631 /* Preempt disable stops cpu going offline.
632 If already offline, we'll be on wrong CPU:
633 don't process */
634 if (cpu_is_offline((long)__bind_cpu))
635 goto wait_to_die;
636 do_softirq();
637 preempt_enable_no_resched();
638 cond_resched();
639 preempt_disable();
641 preempt_enable();
642 set_current_state(TASK_INTERRUPTIBLE);
644 __set_current_state(TASK_RUNNING);
645 return 0;
647 wait_to_die:
648 preempt_enable();
649 /* Wait for kthread_stop */
650 set_current_state(TASK_INTERRUPTIBLE);
651 while (!kthread_should_stop()) {
652 schedule();
653 set_current_state(TASK_INTERRUPTIBLE);
655 __set_current_state(TASK_RUNNING);
656 return 0;
659 #ifdef CONFIG_HOTPLUG_CPU
661 * tasklet_kill_immediate is called to remove a tasklet which can already be
662 * scheduled for execution on @cpu.
664 * Unlike tasklet_kill, this function removes the tasklet
665 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
667 * When this function is called, @cpu must be in the CPU_DEAD state.
669 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
671 struct tasklet_struct **i;
673 BUG_ON(cpu_online(cpu));
674 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
676 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
677 return;
679 /* CPU is dead, so no lock needed. */
680 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
681 if (*i == t) {
682 *i = t->next;
683 /* If this was the tail element, move the tail ptr */
684 if (*i == NULL)
685 per_cpu(tasklet_vec, cpu).tail = i;
686 return;
689 BUG();
692 static void takeover_tasklets(unsigned int cpu)
694 /* CPU is dead, so no lock needed. */
695 local_irq_disable();
697 /* Find end, append list for that CPU. */
698 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
699 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
700 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
701 per_cpu(tasklet_vec, cpu).head = NULL;
702 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
704 raise_softirq_irqoff(TASKLET_SOFTIRQ);
706 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
707 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
708 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
709 per_cpu(tasklet_hi_vec, cpu).head = NULL;
710 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
712 raise_softirq_irqoff(HI_SOFTIRQ);
714 local_irq_enable();
716 #endif /* CONFIG_HOTPLUG_CPU */
718 static int __cpuinit cpu_callback(struct notifier_block *nfb,
719 unsigned long action,
720 void *hcpu)
722 int hotcpu = (unsigned long)hcpu;
723 struct task_struct *p;
725 switch (action) {
726 case CPU_UP_PREPARE:
727 case CPU_UP_PREPARE_FROZEN:
728 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
729 if (IS_ERR(p)) {
730 printk("ksoftirqd for %i failed\n", hotcpu);
731 return NOTIFY_BAD;
733 kthread_bind(p, hotcpu);
734 per_cpu(ksoftirqd, hotcpu) = p;
735 break;
736 case CPU_ONLINE:
737 case CPU_ONLINE_FROZEN:
738 wake_up_process(per_cpu(ksoftirqd, hotcpu));
739 break;
740 #ifdef CONFIG_HOTPLUG_CPU
741 case CPU_UP_CANCELED:
742 case CPU_UP_CANCELED_FROZEN:
743 if (!per_cpu(ksoftirqd, hotcpu))
744 break;
745 /* Unbind so it can run. Fall thru. */
746 kthread_bind(per_cpu(ksoftirqd, hotcpu),
747 cpumask_any(cpu_online_mask));
748 case CPU_DEAD:
749 case CPU_DEAD_FROZEN: {
750 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
752 p = per_cpu(ksoftirqd, hotcpu);
753 per_cpu(ksoftirqd, hotcpu) = NULL;
754 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
755 kthread_stop(p);
756 takeover_tasklets(hotcpu);
757 break;
759 #endif /* CONFIG_HOTPLUG_CPU */
761 return NOTIFY_OK;
764 static struct notifier_block __cpuinitdata cpu_nfb = {
765 .notifier_call = cpu_callback
768 static __init int spawn_ksoftirqd(void)
770 void *cpu = (void *)(long)smp_processor_id();
771 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
773 BUG_ON(err == NOTIFY_BAD);
774 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
775 register_cpu_notifier(&cpu_nfb);
776 return 0;
778 early_initcall(spawn_ksoftirqd);
780 #ifdef CONFIG_SMP
782 * Call a function on all processors
784 int on_each_cpu(void (*func) (void *info), void *info, int wait)
786 int ret = 0;
788 preempt_disable();
789 ret = smp_call_function(func, info, wait);
790 local_irq_disable();
791 func(info);
792 local_irq_enable();
793 preempt_enable();
794 return ret;
796 EXPORT_SYMBOL(on_each_cpu);
797 #endif
800 * [ These __weak aliases are kept in a separate compilation unit, so that
801 * GCC does not inline them incorrectly. ]
804 int __init __weak early_irq_init(void)
806 return 0;
809 int __init __weak arch_early_irq_init(void)
811 return 0;
814 int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
816 return 0;