2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
52 EXPORT_SYMBOL(irq_stat
);
55 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
57 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
59 char *softirq_to_name
[NR_SOFTIRQS
] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
75 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags
);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt
;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt
)
113 trace_softirqs_off(ip
);
114 raw_local_irq_restore(flags
);
116 if (preempt_count() == cnt
)
117 trace_preempt_off(CALLER_ADDR0
, get_parent_ip(CALLER_ADDR1
));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip
, unsigned int cnt
)
122 add_preempt_count(cnt
);
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET
);
133 EXPORT_SYMBOL(local_bh_disable
);
135 static void __local_bh_enable(unsigned int cnt
)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt
)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt
);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
155 EXPORT_SYMBOL(_local_bh_enable
);
157 static inline void _local_bh_enable_ip(unsigned long ip
)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
167 trace_softirqs_on(ip
);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET
- 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
178 #ifdef CONFIG_TRACE_IRQFLAGS
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable
);
190 void local_bh_enable_ip(unsigned long ip
)
192 _local_bh_enable_ip(ip
);
194 EXPORT_SYMBOL(local_bh_enable_ip
);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage
void __do_softirq(void)
209 struct softirq_action
*h
;
211 int max_restart
= MAX_SOFTIRQ_RESTART
;
214 pending
= local_softirq_pending();
215 account_system_vtime(current
);
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
219 lockdep_softirq_enter();
221 cpu
= smp_processor_id();
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
232 int prev_count
= preempt_count();
233 kstat_incr_softirqs_this_cpu(h
- softirq_vec
);
235 trace_softirq_entry(h
, softirq_vec
);
237 trace_softirq_exit(h
, softirq_vec
);
238 if (unlikely(prev_count
!= preempt_count())) {
239 printk(KERN_ERR
"huh, entered softirq %td %s %p"
240 "with preempt_count %08x,"
241 " exited with %08x?\n", h
- softirq_vec
,
242 softirq_to_name
[h
- softirq_vec
],
243 h
->action
, prev_count
, preempt_count());
244 preempt_count() = prev_count
;
255 pending
= local_softirq_pending();
256 if (pending
&& --max_restart
)
262 lockdep_softirq_exit();
264 account_system_vtime(current
);
265 __local_bh_enable(SOFTIRQ_OFFSET
);
268 #ifndef __ARCH_HAS_DO_SOFTIRQ
270 asmlinkage
void do_softirq(void)
278 local_irq_save(flags
);
280 pending
= local_softirq_pending();
285 local_irq_restore(flags
);
291 * Enter an interrupt context.
295 int cpu
= smp_processor_id();
298 if (idle_cpu(cpu
) && !in_interrupt()) {
300 * Prevent raise_softirq from needlessly waking up ksoftirqd
301 * here, as softirq will be serviced on return from interrupt.
304 tick_check_idle(cpu
);
311 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
312 # define invoke_softirq() __do_softirq()
314 # define invoke_softirq() do_softirq()
318 * Exit an interrupt context. Process softirqs if needed and possible:
322 account_system_vtime(current
);
323 trace_hardirq_exit();
324 sub_preempt_count(IRQ_EXIT_OFFSET
);
325 if (!in_interrupt() && local_softirq_pending())
329 /* Make sure that timer wheel updates are propagated */
331 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
332 tick_nohz_stop_sched_tick(0);
334 preempt_enable_no_resched();
338 * This function must run with irqs disabled!
340 inline void raise_softirq_irqoff(unsigned int nr
)
342 __raise_softirq_irqoff(nr
);
345 * If we're in an interrupt or softirq, we're done
346 * (this also catches softirq-disabled code). We will
347 * actually run the softirq once we return from
348 * the irq or softirq.
350 * Otherwise we wake up ksoftirqd to make sure we
351 * schedule the softirq soon.
357 void raise_softirq(unsigned int nr
)
361 local_irq_save(flags
);
362 raise_softirq_irqoff(nr
);
363 local_irq_restore(flags
);
366 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
368 softirq_vec
[nr
].action
= action
;
376 struct tasklet_struct
*head
;
377 struct tasklet_struct
**tail
;
380 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
381 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
383 void __tasklet_schedule(struct tasklet_struct
*t
)
387 local_irq_save(flags
);
389 *__get_cpu_var(tasklet_vec
).tail
= t
;
390 __get_cpu_var(tasklet_vec
).tail
= &(t
->next
);
391 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
392 local_irq_restore(flags
);
395 EXPORT_SYMBOL(__tasklet_schedule
);
397 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
401 local_irq_save(flags
);
403 *__get_cpu_var(tasklet_hi_vec
).tail
= t
;
404 __get_cpu_var(tasklet_hi_vec
).tail
= &(t
->next
);
405 raise_softirq_irqoff(HI_SOFTIRQ
);
406 local_irq_restore(flags
);
409 EXPORT_SYMBOL(__tasklet_hi_schedule
);
411 void __tasklet_hi_schedule_first(struct tasklet_struct
*t
)
413 BUG_ON(!irqs_disabled());
415 t
->next
= __get_cpu_var(tasklet_hi_vec
).head
;
416 __get_cpu_var(tasklet_hi_vec
).head
= t
;
417 __raise_softirq_irqoff(HI_SOFTIRQ
);
420 EXPORT_SYMBOL(__tasklet_hi_schedule_first
);
422 static void tasklet_action(struct softirq_action
*a
)
424 struct tasklet_struct
*list
;
427 list
= __get_cpu_var(tasklet_vec
).head
;
428 __get_cpu_var(tasklet_vec
).head
= NULL
;
429 __get_cpu_var(tasklet_vec
).tail
= &__get_cpu_var(tasklet_vec
).head
;
433 struct tasklet_struct
*t
= list
;
437 if (tasklet_trylock(t
)) {
438 if (!atomic_read(&t
->count
)) {
439 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
450 *__get_cpu_var(tasklet_vec
).tail
= t
;
451 __get_cpu_var(tasklet_vec
).tail
= &(t
->next
);
452 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
457 static void tasklet_hi_action(struct softirq_action
*a
)
459 struct tasklet_struct
*list
;
462 list
= __get_cpu_var(tasklet_hi_vec
).head
;
463 __get_cpu_var(tasklet_hi_vec
).head
= NULL
;
464 __get_cpu_var(tasklet_hi_vec
).tail
= &__get_cpu_var(tasklet_hi_vec
).head
;
468 struct tasklet_struct
*t
= list
;
472 if (tasklet_trylock(t
)) {
473 if (!atomic_read(&t
->count
)) {
474 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
485 *__get_cpu_var(tasklet_hi_vec
).tail
= t
;
486 __get_cpu_var(tasklet_hi_vec
).tail
= &(t
->next
);
487 __raise_softirq_irqoff(HI_SOFTIRQ
);
493 void tasklet_init(struct tasklet_struct
*t
,
494 void (*func
)(unsigned long), unsigned long data
)
498 atomic_set(&t
->count
, 0);
503 EXPORT_SYMBOL(tasklet_init
);
505 void tasklet_kill(struct tasklet_struct
*t
)
508 printk("Attempt to kill tasklet from interrupt\n");
510 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
513 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
515 tasklet_unlock_wait(t
);
516 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
519 EXPORT_SYMBOL(tasklet_kill
);
526 * The trampoline is called when the hrtimer expires. If this is
527 * called from the hrtimer interrupt then we schedule the tasklet as
528 * the timer callback function expects to run in softirq context. If
529 * it's called in softirq context anyway (i.e. high resolution timers
530 * disabled) then the hrtimer callback is called right away.
532 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
534 struct tasklet_hrtimer
*ttimer
=
535 container_of(timer
, struct tasklet_hrtimer
, timer
);
537 if (hrtimer_is_hres_active(timer
)) {
538 tasklet_hi_schedule(&ttimer
->tasklet
);
539 return HRTIMER_NORESTART
;
541 return ttimer
->function(timer
);
545 * Helper function which calls the hrtimer callback from
546 * tasklet/softirq context
548 static void __tasklet_hrtimer_trampoline(unsigned long data
)
550 struct tasklet_hrtimer
*ttimer
= (void *)data
;
551 enum hrtimer_restart restart
;
553 restart
= ttimer
->function(&ttimer
->timer
);
554 if (restart
!= HRTIMER_NORESTART
)
555 hrtimer_restart(&ttimer
->timer
);
559 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
560 * @ttimer: tasklet_hrtimer which is initialized
561 * @function: hrtimer callback funtion which gets called from softirq context
562 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
563 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
565 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
566 enum hrtimer_restart (*function
)(struct hrtimer
*),
567 clockid_t which_clock
, enum hrtimer_mode mode
)
569 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
570 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
571 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
572 (unsigned long)ttimer
);
573 ttimer
->function
= function
;
575 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
578 * Remote softirq bits
581 DEFINE_PER_CPU(struct list_head
[NR_SOFTIRQS
], softirq_work_list
);
582 EXPORT_PER_CPU_SYMBOL(softirq_work_list
);
584 static void __local_trigger(struct call_single_data
*cp
, int softirq
)
586 struct list_head
*head
= &__get_cpu_var(softirq_work_list
[softirq
]);
588 list_add_tail(&cp
->list
, head
);
590 /* Trigger the softirq only if the list was previously empty. */
591 if (head
->next
== &cp
->list
)
592 raise_softirq_irqoff(softirq
);
595 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
596 static void remote_softirq_receive(void *data
)
598 struct call_single_data
*cp
= data
;
604 local_irq_save(flags
);
605 __local_trigger(cp
, softirq
);
606 local_irq_restore(flags
);
609 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
611 if (cpu_online(cpu
)) {
612 cp
->func
= remote_softirq_receive
;
617 __smp_call_function_single(cpu
, cp
, 0);
622 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
623 static int __try_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
630 * __send_remote_softirq - try to schedule softirq work on a remote cpu
631 * @cp: private SMP call function data area
632 * @cpu: the remote cpu
633 * @this_cpu: the currently executing cpu
634 * @softirq: the softirq for the work
636 * Attempt to schedule softirq work on a remote cpu. If this cannot be
637 * done, the work is instead queued up on the local cpu.
639 * Interrupts must be disabled.
641 void __send_remote_softirq(struct call_single_data
*cp
, int cpu
, int this_cpu
, int softirq
)
643 if (cpu
== this_cpu
|| __try_remote_softirq(cp
, cpu
, softirq
))
644 __local_trigger(cp
, softirq
);
646 EXPORT_SYMBOL(__send_remote_softirq
);
649 * send_remote_softirq - try to schedule softirq work on a remote cpu
650 * @cp: private SMP call function data area
651 * @cpu: the remote cpu
652 * @softirq: the softirq for the work
654 * Like __send_remote_softirq except that disabling interrupts and
655 * computing the current cpu is done for the caller.
657 void send_remote_softirq(struct call_single_data
*cp
, int cpu
, int softirq
)
662 local_irq_save(flags
);
663 this_cpu
= smp_processor_id();
664 __send_remote_softirq(cp
, cpu
, this_cpu
, softirq
);
665 local_irq_restore(flags
);
667 EXPORT_SYMBOL(send_remote_softirq
);
669 static int __cpuinit
remote_softirq_cpu_notify(struct notifier_block
*self
,
670 unsigned long action
, void *hcpu
)
673 * If a CPU goes away, splice its entries to the current CPU
674 * and trigger a run of the softirq
676 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
) {
677 int cpu
= (unsigned long) hcpu
;
681 for (i
= 0; i
< NR_SOFTIRQS
; i
++) {
682 struct list_head
*head
= &per_cpu(softirq_work_list
[i
], cpu
);
683 struct list_head
*local_head
;
685 if (list_empty(head
))
688 local_head
= &__get_cpu_var(softirq_work_list
[i
]);
689 list_splice_init(head
, local_head
);
690 raise_softirq_irqoff(i
);
698 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier
= {
699 .notifier_call
= remote_softirq_cpu_notify
,
702 void __init
softirq_init(void)
706 for_each_possible_cpu(cpu
) {
709 per_cpu(tasklet_vec
, cpu
).tail
=
710 &per_cpu(tasklet_vec
, cpu
).head
;
711 per_cpu(tasklet_hi_vec
, cpu
).tail
=
712 &per_cpu(tasklet_hi_vec
, cpu
).head
;
713 for (i
= 0; i
< NR_SOFTIRQS
; i
++)
714 INIT_LIST_HEAD(&per_cpu(softirq_work_list
[i
], cpu
));
717 register_hotcpu_notifier(&remote_softirq_cpu_notifier
);
719 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
720 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
723 static int ksoftirqd(void * __bind_cpu
)
725 set_current_state(TASK_INTERRUPTIBLE
);
727 current
->flags
|= PF_KSOFTIRQD
;
728 while (!kthread_should_stop()) {
730 if (!local_softirq_pending()) {
731 preempt_enable_no_resched();
736 __set_current_state(TASK_RUNNING
);
738 while (local_softirq_pending()) {
739 /* Preempt disable stops cpu going offline.
740 If already offline, we'll be on wrong CPU:
742 if (cpu_is_offline((long)__bind_cpu
))
745 preempt_enable_no_resched();
748 rcu_sched_qs((long)__bind_cpu
);
751 set_current_state(TASK_INTERRUPTIBLE
);
753 __set_current_state(TASK_RUNNING
);
758 /* Wait for kthread_stop */
759 set_current_state(TASK_INTERRUPTIBLE
);
760 while (!kthread_should_stop()) {
762 set_current_state(TASK_INTERRUPTIBLE
);
764 __set_current_state(TASK_RUNNING
);
768 #ifdef CONFIG_HOTPLUG_CPU
770 * tasklet_kill_immediate is called to remove a tasklet which can already be
771 * scheduled for execution on @cpu.
773 * Unlike tasklet_kill, this function removes the tasklet
774 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
776 * When this function is called, @cpu must be in the CPU_DEAD state.
778 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
780 struct tasklet_struct
**i
;
782 BUG_ON(cpu_online(cpu
));
783 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
785 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
788 /* CPU is dead, so no lock needed. */
789 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
792 /* If this was the tail element, move the tail ptr */
794 per_cpu(tasklet_vec
, cpu
).tail
= i
;
801 static void takeover_tasklets(unsigned int cpu
)
803 /* CPU is dead, so no lock needed. */
806 /* Find end, append list for that CPU. */
807 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
808 *(__get_cpu_var(tasklet_vec
).tail
) = per_cpu(tasklet_vec
, cpu
).head
;
809 __get_cpu_var(tasklet_vec
).tail
= per_cpu(tasklet_vec
, cpu
).tail
;
810 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
811 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
813 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
815 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
816 *__get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).head
;
817 __get_cpu_var(tasklet_hi_vec
).tail
= per_cpu(tasklet_hi_vec
, cpu
).tail
;
818 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
819 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
821 raise_softirq_irqoff(HI_SOFTIRQ
);
825 #endif /* CONFIG_HOTPLUG_CPU */
827 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
828 unsigned long action
,
831 int hotcpu
= (unsigned long)hcpu
;
832 struct task_struct
*p
;
836 case CPU_UP_PREPARE_FROZEN
:
837 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
839 printk("ksoftirqd for %i failed\n", hotcpu
);
842 kthread_bind(p
, hotcpu
);
843 per_cpu(ksoftirqd
, hotcpu
) = p
;
846 case CPU_ONLINE_FROZEN
:
847 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
849 #ifdef CONFIG_HOTPLUG_CPU
850 case CPU_UP_CANCELED
:
851 case CPU_UP_CANCELED_FROZEN
:
852 if (!per_cpu(ksoftirqd
, hotcpu
))
854 /* Unbind so it can run. Fall thru. */
855 kthread_bind(per_cpu(ksoftirqd
, hotcpu
),
856 cpumask_any(cpu_online_mask
));
858 case CPU_DEAD_FROZEN
: {
859 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
861 p
= per_cpu(ksoftirqd
, hotcpu
);
862 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
863 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
865 takeover_tasklets(hotcpu
);
868 #endif /* CONFIG_HOTPLUG_CPU */
873 static struct notifier_block __cpuinitdata cpu_nfb
= {
874 .notifier_call
= cpu_callback
877 static __init
int spawn_ksoftirqd(void)
879 void *cpu
= (void *)(long)smp_processor_id();
880 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
882 BUG_ON(err
== NOTIFY_BAD
);
883 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
884 register_cpu_notifier(&cpu_nfb
);
887 early_initcall(spawn_ksoftirqd
);
891 * Call a function on all processors
893 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
898 ret
= smp_call_function(func
, info
, wait
);
905 EXPORT_SYMBOL(on_each_cpu
);
909 * [ These __weak aliases are kept in a separate compilation unit, so that
910 * GCC does not inline them incorrectly. ]
913 int __init __weak
early_irq_init(void)
918 int __init __weak
arch_probe_nr_irqs(void)
923 int __init __weak
arch_early_irq_init(void)
928 int __weak
arch_init_chip_data(struct irq_desc
*desc
, int node
)